Home | History | Annotate | Download | only in storaged
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define LOG_TAG "storaged"
     18 
     19 #include <stdint.h>
     20 #include <time.h>
     21 
     22 #include <string>
     23 #include <unordered_map>
     24 
     25 #include <android/content/pm/IPackageManagerNative.h>
     26 #include <android-base/file.h>
     27 #include <android-base/logging.h>
     28 #include <android-base/macros.h>
     29 #include <android-base/parseint.h>
     30 #include <android-base/strings.h>
     31 #include <android-base/stringprintf.h>
     32 #include <binder/IServiceManager.h>
     33 #include <log/log_event_list.h>
     34 
     35 #include "storaged.h"
     36 #include "storaged_uid_monitor.h"
     37 
     38 using namespace android;
     39 using namespace android::base;
     40 using namespace android::content::pm;
     41 using namespace android::os::storaged;
     42 using namespace storaged_proto;
     43 
     44 namespace {
     45 
     46 bool refresh_uid_names;
     47 const char* UID_IO_STATS_PATH = "/proc/uid_io/stats";
     48 
     49 } // namepsace
     50 
     51 std::unordered_map<uint32_t, uid_info> uid_monitor::get_uid_io_stats()
     52 {
     53     Mutex::Autolock _l(uidm_mutex);
     54     return get_uid_io_stats_locked();
     55 };
     56 
     57 /* return true on parse success and false on failure */
     58 bool uid_info::parse_uid_io_stats(std::string&& s)
     59 {
     60     std::vector<std::string> fields = Split(s, " ");
     61     if (fields.size() < 11 ||
     62         !ParseUint(fields[0],  &uid) ||
     63         !ParseUint(fields[1],  &io[FOREGROUND].rchar) ||
     64         !ParseUint(fields[2],  &io[FOREGROUND].wchar) ||
     65         !ParseUint(fields[3],  &io[FOREGROUND].read_bytes) ||
     66         !ParseUint(fields[4],  &io[FOREGROUND].write_bytes) ||
     67         !ParseUint(fields[5],  &io[BACKGROUND].rchar) ||
     68         !ParseUint(fields[6],  &io[BACKGROUND].wchar) ||
     69         !ParseUint(fields[7],  &io[BACKGROUND].read_bytes) ||
     70         !ParseUint(fields[8],  &io[BACKGROUND].write_bytes) ||
     71         !ParseUint(fields[9],  &io[FOREGROUND].fsync) ||
     72         !ParseUint(fields[10], &io[BACKGROUND].fsync)) {
     73         LOG_TO(SYSTEM, WARNING) << "Invalid uid I/O stats: \""
     74                                 << s << "\"";
     75         return false;
     76     }
     77     return true;
     78 }
     79 
     80 /* return true on parse success and false on failure */
     81 bool task_info::parse_task_io_stats(std::string&& s)
     82 {
     83     std::vector<std::string> fields = Split(s, ",");
     84     size_t size = fields.size();
     85     if (size < 13 ||
     86         !ParseInt(fields[size - 11],  &pid) ||
     87         !ParseUint(fields[size - 10],  &io[FOREGROUND].rchar) ||
     88         !ParseUint(fields[size - 9],  &io[FOREGROUND].wchar) ||
     89         !ParseUint(fields[size - 8],  &io[FOREGROUND].read_bytes) ||
     90         !ParseUint(fields[size - 7],  &io[FOREGROUND].write_bytes) ||
     91         !ParseUint(fields[size - 6],  &io[BACKGROUND].rchar) ||
     92         !ParseUint(fields[size - 5],  &io[BACKGROUND].wchar) ||
     93         !ParseUint(fields[size - 4],  &io[BACKGROUND].read_bytes) ||
     94         !ParseUint(fields[size - 3], &io[BACKGROUND].write_bytes) ||
     95         !ParseUint(fields[size - 2], &io[FOREGROUND].fsync) ||
     96         !ParseUint(fields[size - 1], &io[BACKGROUND].fsync)) {
     97         LOG_TO(SYSTEM, WARNING) << "Invalid task I/O stats: \""
     98                                 << s << "\"";
     99         return false;
    100     }
    101     comm = Join(std::vector<std::string>(
    102                 fields.begin() + 1, fields.end() - 11), ',');
    103     return true;
    104 }
    105 
    106 bool io_usage::is_zero() const
    107 {
    108     for (int i = 0; i < IO_TYPES; i++) {
    109         for (int j = 0; j < UID_STATS; j++) {
    110             for (int k = 0; k < CHARGER_STATS; k++) {
    111                 if (bytes[i][j][k])
    112                     return false;
    113             }
    114         }
    115     }
    116     return true;
    117 }
    118 
    119 namespace {
    120 
    121 void get_uid_names(const vector<int>& uids, const vector<std::string*>& uid_names)
    122 {
    123     sp<IServiceManager> sm = defaultServiceManager();
    124     if (sm == NULL) {
    125         LOG_TO(SYSTEM, ERROR) << "defaultServiceManager failed";
    126         return;
    127     }
    128 
    129     sp<IBinder> binder = sm->getService(String16("package_native"));
    130     if (binder == NULL) {
    131         LOG_TO(SYSTEM, ERROR) << "getService package_native failed";
    132         return;
    133     }
    134 
    135     sp<IPackageManagerNative> package_mgr = interface_cast<IPackageManagerNative>(binder);
    136     std::vector<std::string> names;
    137     binder::Status status = package_mgr->getNamesForUids(uids, &names);
    138     if (!status.isOk()) {
    139         LOG_TO(SYSTEM, ERROR) << "package_native::getNamesForUids failed: "
    140                               << status.exceptionMessage();
    141         return;
    142     }
    143 
    144     for (uint32_t i = 0; i < uid_names.size(); i++) {
    145         if (!names[i].empty()) {
    146             *uid_names[i] = names[i];
    147         }
    148     }
    149 
    150     refresh_uid_names = false;
    151 }
    152 
    153 } // namespace
    154 
    155 std::unordered_map<uint32_t, uid_info> uid_monitor::get_uid_io_stats_locked()
    156 {
    157     std::unordered_map<uint32_t, uid_info> uid_io_stats;
    158     std::string buffer;
    159     if (!ReadFileToString(UID_IO_STATS_PATH, &buffer)) {
    160         PLOG_TO(SYSTEM, ERROR) << UID_IO_STATS_PATH << ": ReadFileToString failed";
    161         return uid_io_stats;
    162     }
    163 
    164     std::vector<std::string> io_stats = Split(std::move(buffer), "\n");
    165     uid_info u;
    166     vector<int> uids;
    167     vector<std::string*> uid_names;
    168 
    169     for (uint32_t i = 0; i < io_stats.size(); i++) {
    170         if (io_stats[i].empty()) {
    171             continue;
    172         }
    173 
    174         if (io_stats[i].compare(0, 4, "task")) {
    175             if (!u.parse_uid_io_stats(std::move(io_stats[i])))
    176                 continue;
    177             uid_io_stats[u.uid] = u;
    178             uid_io_stats[u.uid].name = std::to_string(u.uid);
    179             uids.push_back(u.uid);
    180             uid_names.push_back(&uid_io_stats[u.uid].name);
    181             if (last_uid_io_stats.find(u.uid) == last_uid_io_stats.end()) {
    182                 refresh_uid_names = true;
    183             } else {
    184                 uid_io_stats[u.uid].name = last_uid_io_stats[u.uid].name;
    185             }
    186         } else {
    187             task_info t;
    188             if (!t.parse_task_io_stats(std::move(io_stats[i])))
    189                 continue;
    190             uid_io_stats[u.uid].tasks[t.pid] = t;
    191         }
    192     }
    193 
    194     if (!uids.empty() && refresh_uid_names) {
    195         get_uid_names(uids, uid_names);
    196     }
    197 
    198     return uid_io_stats;
    199 }
    200 
    201 namespace {
    202 
    203 const int MAX_UID_RECORDS_SIZE = 1000 * 48; // 1000 uids in 48 hours
    204 
    205 inline size_t history_size(
    206     const std::map<uint64_t, struct uid_records>& history)
    207 {
    208     size_t count = 0;
    209     for (auto const& it : history) {
    210         count += it.second.entries.size();
    211     }
    212     return count;
    213 }
    214 
    215 } // namespace
    216 
    217 void uid_monitor::add_records_locked(uint64_t curr_ts)
    218 {
    219     // remove records more than 5 days old
    220     if (curr_ts > 5 * DAY_TO_SEC) {
    221         auto it = io_history.lower_bound(curr_ts - 5 * DAY_TO_SEC);
    222         io_history.erase(io_history.begin(), it);
    223     }
    224 
    225     struct uid_records new_records;
    226     for (const auto& p : curr_io_stats) {
    227         struct uid_record record = {};
    228         record.name = p.first;
    229         if (!p.second.uid_ios.is_zero()) {
    230             record.ios.user_id = p.second.user_id;
    231             record.ios.uid_ios = p.second.uid_ios;
    232             for (const auto& p_task : p.second.task_ios) {
    233                 if (!p_task.second.is_zero())
    234                     record.ios.task_ios[p_task.first] = p_task.second;
    235             }
    236             new_records.entries.push_back(record);
    237         }
    238     }
    239 
    240     curr_io_stats.clear();
    241     new_records.start_ts = start_ts;
    242     start_ts = curr_ts;
    243 
    244     if (new_records.entries.empty())
    245       return;
    246 
    247     // make some room for new records
    248     ssize_t overflow = history_size(io_history) +
    249         new_records.entries.size() - MAX_UID_RECORDS_SIZE;
    250     while (overflow > 0 && io_history.size() > 0) {
    251         auto del_it = io_history.begin();
    252         overflow -= del_it->second.entries.size();
    253         io_history.erase(io_history.begin());
    254     }
    255 
    256     io_history[curr_ts] = new_records;
    257 }
    258 
    259 std::map<uint64_t, struct uid_records> uid_monitor::dump(
    260     double hours, uint64_t threshold, bool force_report)
    261 {
    262     if (force_report) {
    263         report(nullptr);
    264     }
    265 
    266     Mutex::Autolock _l(uidm_mutex);
    267 
    268     std::map<uint64_t, struct uid_records> dump_records;
    269     uint64_t first_ts = 0;
    270 
    271     if (hours != 0) {
    272         first_ts = time(NULL) - hours * HOUR_TO_SEC;
    273     }
    274 
    275     for (auto it = io_history.lower_bound(first_ts); it != io_history.end(); ++it) {
    276         const std::vector<struct uid_record>& recs = it->second.entries;
    277         struct uid_records filtered;
    278 
    279         for (const auto& rec : recs) {
    280             const io_usage& uid_usage = rec.ios.uid_ios;
    281             if (uid_usage.bytes[READ][FOREGROUND][CHARGER_ON] +
    282                 uid_usage.bytes[READ][FOREGROUND][CHARGER_OFF] +
    283                 uid_usage.bytes[READ][BACKGROUND][CHARGER_ON] +
    284                 uid_usage.bytes[READ][BACKGROUND][CHARGER_OFF] +
    285                 uid_usage.bytes[WRITE][FOREGROUND][CHARGER_ON] +
    286                 uid_usage.bytes[WRITE][FOREGROUND][CHARGER_OFF] +
    287                 uid_usage.bytes[WRITE][BACKGROUND][CHARGER_ON] +
    288                 uid_usage.bytes[WRITE][BACKGROUND][CHARGER_OFF] > threshold) {
    289                 filtered.entries.push_back(rec);
    290             }
    291         }
    292 
    293         if (filtered.entries.empty())
    294             continue;
    295 
    296         filtered.start_ts = it->second.start_ts;
    297         dump_records.insert(
    298             std::pair<uint64_t, struct uid_records>(it->first, filtered));
    299     }
    300 
    301     return dump_records;
    302 }
    303 
    304 void uid_monitor::update_curr_io_stats_locked()
    305 {
    306     std::unordered_map<uint32_t, uid_info> uid_io_stats =
    307         get_uid_io_stats_locked();
    308     if (uid_io_stats.empty()) {
    309         return;
    310     }
    311 
    312     for (const auto& it : uid_io_stats) {
    313         const uid_info& uid = it.second;
    314         if (curr_io_stats.find(uid.name) == curr_io_stats.end()) {
    315             curr_io_stats[uid.name] = {};
    316         }
    317 
    318         struct uid_io_usage& usage = curr_io_stats[uid.name];
    319         usage.user_id = multiuser_get_user_id(uid.uid);
    320 
    321         int64_t fg_rd_delta = uid.io[FOREGROUND].read_bytes -
    322             last_uid_io_stats[uid.uid].io[FOREGROUND].read_bytes;
    323         int64_t bg_rd_delta = uid.io[BACKGROUND].read_bytes -
    324             last_uid_io_stats[uid.uid].io[BACKGROUND].read_bytes;
    325         int64_t fg_wr_delta = uid.io[FOREGROUND].write_bytes -
    326             last_uid_io_stats[uid.uid].io[FOREGROUND].write_bytes;
    327         int64_t bg_wr_delta = uid.io[BACKGROUND].write_bytes -
    328             last_uid_io_stats[uid.uid].io[BACKGROUND].write_bytes;
    329 
    330         usage.uid_ios.bytes[READ][FOREGROUND][charger_stat] +=
    331             (fg_rd_delta < 0) ? 0 : fg_rd_delta;
    332         usage.uid_ios.bytes[READ][BACKGROUND][charger_stat] +=
    333             (bg_rd_delta < 0) ? 0 : bg_rd_delta;
    334         usage.uid_ios.bytes[WRITE][FOREGROUND][charger_stat] +=
    335             (fg_wr_delta < 0) ? 0 : fg_wr_delta;
    336         usage.uid_ios.bytes[WRITE][BACKGROUND][charger_stat] +=
    337             (bg_wr_delta < 0) ? 0 : bg_wr_delta;
    338 
    339         for (const auto& task_it : uid.tasks) {
    340             const task_info& task = task_it.second;
    341             const pid_t pid = task_it.first;
    342             const std::string& comm = task_it.second.comm;
    343             int64_t task_fg_rd_delta = task.io[FOREGROUND].read_bytes -
    344                 last_uid_io_stats[uid.uid].tasks[pid].io[FOREGROUND].read_bytes;
    345             int64_t task_bg_rd_delta = task.io[BACKGROUND].read_bytes -
    346                 last_uid_io_stats[uid.uid].tasks[pid].io[BACKGROUND].read_bytes;
    347             int64_t task_fg_wr_delta = task.io[FOREGROUND].write_bytes -
    348                 last_uid_io_stats[uid.uid].tasks[pid].io[FOREGROUND].write_bytes;
    349             int64_t task_bg_wr_delta = task.io[BACKGROUND].write_bytes -
    350                 last_uid_io_stats[uid.uid].tasks[pid].io[BACKGROUND].write_bytes;
    351 
    352             io_usage& task_usage = usage.task_ios[comm];
    353             task_usage.bytes[READ][FOREGROUND][charger_stat] +=
    354                 (task_fg_rd_delta < 0) ? 0 : task_fg_rd_delta;
    355             task_usage.bytes[READ][BACKGROUND][charger_stat] +=
    356                 (task_bg_rd_delta < 0) ? 0 : task_bg_rd_delta;
    357             task_usage.bytes[WRITE][FOREGROUND][charger_stat] +=
    358                 (task_fg_wr_delta < 0) ? 0 : task_fg_wr_delta;
    359             task_usage.bytes[WRITE][BACKGROUND][charger_stat] +=
    360                 (task_bg_wr_delta < 0) ? 0 : task_bg_wr_delta;
    361         }
    362     }
    363 
    364     last_uid_io_stats = uid_io_stats;
    365 }
    366 
    367 void uid_monitor::report(unordered_map<int, StoragedProto>* protos)
    368 {
    369     if (!enabled()) return;
    370 
    371     Mutex::Autolock _l(uidm_mutex);
    372 
    373     update_curr_io_stats_locked();
    374     add_records_locked(time(NULL));
    375 
    376     if (protos) {
    377         update_uid_io_proto(protos);
    378     }
    379 }
    380 
    381 namespace {
    382 
    383 void set_io_usage_proto(IOUsage* usage_proto, const io_usage& usage)
    384 {
    385     usage_proto->set_rd_fg_chg_on(usage.bytes[READ][FOREGROUND][CHARGER_ON]);
    386     usage_proto->set_rd_fg_chg_off(usage.bytes[READ][FOREGROUND][CHARGER_OFF]);
    387     usage_proto->set_rd_bg_chg_on(usage.bytes[READ][BACKGROUND][CHARGER_ON]);
    388     usage_proto->set_rd_bg_chg_off(usage.bytes[READ][BACKGROUND][CHARGER_OFF]);
    389     usage_proto->set_wr_fg_chg_on(usage.bytes[WRITE][FOREGROUND][CHARGER_ON]);
    390     usage_proto->set_wr_fg_chg_off(usage.bytes[WRITE][FOREGROUND][CHARGER_OFF]);
    391     usage_proto->set_wr_bg_chg_on(usage.bytes[WRITE][BACKGROUND][CHARGER_ON]);
    392     usage_proto->set_wr_bg_chg_off(usage.bytes[WRITE][BACKGROUND][CHARGER_OFF]);
    393 }
    394 
    395 void get_io_usage_proto(io_usage* usage, const IOUsage& io_proto)
    396 {
    397     usage->bytes[READ][FOREGROUND][CHARGER_ON] = io_proto.rd_fg_chg_on();
    398     usage->bytes[READ][FOREGROUND][CHARGER_OFF] = io_proto.rd_fg_chg_off();
    399     usage->bytes[READ][BACKGROUND][CHARGER_ON] = io_proto.rd_bg_chg_on();
    400     usage->bytes[READ][BACKGROUND][CHARGER_OFF] = io_proto.rd_bg_chg_off();
    401     usage->bytes[WRITE][FOREGROUND][CHARGER_ON] = io_proto.wr_fg_chg_on();
    402     usage->bytes[WRITE][FOREGROUND][CHARGER_OFF] = io_proto.wr_fg_chg_off();
    403     usage->bytes[WRITE][BACKGROUND][CHARGER_ON] = io_proto.wr_bg_chg_on();
    404     usage->bytes[WRITE][BACKGROUND][CHARGER_OFF] = io_proto.wr_bg_chg_off();
    405 }
    406 
    407 } // namespace
    408 
    409 void uid_monitor::update_uid_io_proto(unordered_map<int, StoragedProto>* protos)
    410 {
    411     for (const auto& item : io_history) {
    412         const uint64_t& end_ts = item.first;
    413         const struct uid_records& recs = item.second;
    414         unordered_map<userid_t, UidIOItem*> user_items;
    415 
    416         for (const auto& entry : recs.entries) {
    417             userid_t user_id = entry.ios.user_id;
    418             UidIOItem* item_proto = user_items[user_id];
    419             if (item_proto == nullptr) {
    420                 item_proto = (*protos)[user_id].mutable_uid_io_usage()
    421                              ->add_uid_io_items();
    422                 user_items[user_id] = item_proto;
    423             }
    424             item_proto->set_end_ts(end_ts);
    425 
    426             UidIORecords* recs_proto = item_proto->mutable_records();
    427             recs_proto->set_start_ts(recs.start_ts);
    428 
    429             UidRecord* rec_proto = recs_proto->add_entries();
    430             rec_proto->set_uid_name(entry.name);
    431             rec_proto->set_user_id(user_id);
    432 
    433             IOUsage* uid_io_proto = rec_proto->mutable_uid_io();
    434             const io_usage& uio_ios = entry.ios.uid_ios;
    435             set_io_usage_proto(uid_io_proto, uio_ios);
    436 
    437             for (const auto& task_io : entry.ios.task_ios) {
    438                 const std::string& task_name = task_io.first;
    439                 const io_usage& task_ios = task_io.second;
    440 
    441                 TaskIOUsage* task_io_proto = rec_proto->add_task_io();
    442                 task_io_proto->set_task_name(task_name);
    443                 set_io_usage_proto(task_io_proto->mutable_ios(), task_ios);
    444             }
    445         }
    446     }
    447 }
    448 
    449 void uid_monitor::clear_user_history(userid_t user_id)
    450 {
    451     Mutex::Autolock _l(uidm_mutex);
    452 
    453     for (auto& item : io_history) {
    454         vector<uid_record>* entries = &item.second.entries;
    455         entries->erase(
    456             remove_if(entries->begin(), entries->end(),
    457                 [user_id](const uid_record& rec) {
    458                     return rec.ios.user_id == user_id;}),
    459             entries->end());
    460     }
    461 
    462     for (auto it = io_history.begin(); it != io_history.end(); ) {
    463         if (it->second.entries.empty()) {
    464             it = io_history.erase(it);
    465         } else {
    466             it++;
    467         }
    468     }
    469 }
    470 
    471 void uid_monitor::load_uid_io_proto(const UidIOUsage& uid_io_proto)
    472 {
    473     if (!enabled()) return;
    474 
    475     Mutex::Autolock _l(uidm_mutex);
    476 
    477     for (const auto& item_proto : uid_io_proto.uid_io_items()) {
    478         const UidIORecords& records_proto = item_proto.records();
    479         struct uid_records* recs = &io_history[item_proto.end_ts()];
    480 
    481         recs->start_ts = records_proto.start_ts();
    482         for (const auto& rec_proto : records_proto.entries()) {
    483             struct uid_record record;
    484             record.name = rec_proto.uid_name();
    485             record.ios.user_id = rec_proto.user_id();
    486             get_io_usage_proto(&record.ios.uid_ios, rec_proto.uid_io());
    487 
    488             for (const auto& task_io_proto : rec_proto.task_io()) {
    489                 get_io_usage_proto(
    490                     &record.ios.task_ios[task_io_proto.task_name()],
    491                     task_io_proto.ios());
    492             }
    493             recs->entries.push_back(record);
    494         }
    495     }
    496 }
    497 
    498 void uid_monitor::set_charger_state(charger_stat_t stat)
    499 {
    500     Mutex::Autolock _l(uidm_mutex);
    501 
    502     if (charger_stat == stat) {
    503         return;
    504     }
    505 
    506     update_curr_io_stats_locked();
    507     charger_stat = stat;
    508 }
    509 
    510 void uid_monitor::init(charger_stat_t stat)
    511 {
    512     charger_stat = stat;
    513 
    514     start_ts = time(NULL);
    515     last_uid_io_stats = get_uid_io_stats();
    516 }
    517 
    518 uid_monitor::uid_monitor()
    519     : enable(!access(UID_IO_STATS_PATH, R_OK)) {
    520 }
    521