1 /* 2 * Copyright (C) 2017 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "BpfUtils" 18 19 #include "bpf/BpfUtils.h" 20 21 #include <elf.h> 22 #include <inttypes.h> 23 #include <linux/bpf.h> 24 #include <linux/if_ether.h> 25 #include <linux/in.h> 26 #include <linux/pfkeyv2.h> 27 #include <stdlib.h> 28 #include <string.h> 29 #include <sys/mman.h> 30 #include <sys/resource.h> 31 #include <sys/socket.h> 32 #include <sys/stat.h> 33 #include <sys/utsname.h> 34 #include <sstream> 35 #include <string> 36 37 #include <android-base/properties.h> 38 #include <android-base/unique_fd.h> 39 #include <log/log.h> 40 #include <netdutils/MemBlock.h> 41 #include <netdutils/Slice.h> 42 #include <processgroup/processgroup.h> 43 44 using android::base::GetUintProperty; 45 using android::base::unique_fd; 46 using android::netdutils::MemBlock; 47 using android::netdutils::Slice; 48 49 // The buffer size for the buffer that records program loading logs, needs to be large enough for 50 // the largest kernel program. 51 52 namespace android { 53 namespace bpf { 54 55 /* The bpf_attr is a union which might have a much larger size then the struct we are using, while 56 * The inline initializer only reset the field we are using and leave the reset of the memory as 57 * is. The bpf kernel code will performs a much stricter check to ensure all unused field is 0. So 58 * this syscall will normally fail with E2BIG if we don't do a memset to bpf_attr. 59 */ 60 bool operator==(const StatsKey& lhs, const StatsKey& rhs) { 61 return ((lhs.uid == rhs.uid) && (lhs.tag == rhs.tag) && (lhs.counterSet == rhs.counterSet) && 62 (lhs.ifaceIndex == rhs.ifaceIndex)); 63 } 64 65 bool operator==(const UidTag& lhs, const UidTag& rhs) { 66 return ((lhs.uid == rhs.uid) && (lhs.tag == rhs.tag)); 67 } 68 69 bool operator==(const StatsValue& lhs, const StatsValue& rhs) { 70 return ((lhs.rxBytes == rhs.rxBytes) && (lhs.txBytes == rhs.txBytes) && 71 (lhs.rxPackets == rhs.rxPackets) && (lhs.txPackets == rhs.txPackets)); 72 } 73 74 int bpf(int cmd, Slice bpfAttr) { 75 return syscall(__NR_bpf, cmd, bpfAttr.base(), bpfAttr.size()); 76 } 77 78 int createMap(bpf_map_type map_type, uint32_t key_size, uint32_t value_size, uint32_t max_entries, 79 uint32_t map_flags) { 80 bpf_attr attr; 81 memset(&attr, 0, sizeof(attr)); 82 attr.map_type = map_type; 83 attr.key_size = key_size; 84 attr.value_size = value_size; 85 attr.max_entries = max_entries; 86 attr.map_flags = map_flags; 87 88 return bpf(BPF_MAP_CREATE, Slice(&attr, sizeof(attr))); 89 } 90 91 int writeToMapEntry(const base::unique_fd& map_fd, void* key, void* value, uint64_t flags) { 92 bpf_attr attr; 93 memset(&attr, 0, sizeof(attr)); 94 attr.map_fd = map_fd.get(); 95 attr.key = ptr_to_u64(key); 96 attr.value = ptr_to_u64(value); 97 attr.flags = flags; 98 99 return bpf(BPF_MAP_UPDATE_ELEM, Slice(&attr, sizeof(attr))); 100 } 101 102 int findMapEntry(const base::unique_fd& map_fd, void* key, void* value) { 103 bpf_attr attr; 104 memset(&attr, 0, sizeof(attr)); 105 attr.map_fd = map_fd.get(); 106 attr.key = ptr_to_u64(key); 107 attr.value = ptr_to_u64(value); 108 109 return bpf(BPF_MAP_LOOKUP_ELEM, Slice(&attr, sizeof(attr))); 110 } 111 112 int deleteMapEntry(const base::unique_fd& map_fd, void* key) { 113 bpf_attr attr; 114 memset(&attr, 0, sizeof(attr)); 115 attr.map_fd = map_fd.get(); 116 attr.key = ptr_to_u64(key); 117 118 return bpf(BPF_MAP_DELETE_ELEM, Slice(&attr, sizeof(attr))); 119 } 120 121 int getNextMapKey(const base::unique_fd& map_fd, void* key, void* next_key) { 122 bpf_attr attr; 123 memset(&attr, 0, sizeof(attr)); 124 attr.map_fd = map_fd.get(); 125 attr.key = ptr_to_u64(key); 126 attr.next_key = ptr_to_u64(next_key); 127 128 return bpf(BPF_MAP_GET_NEXT_KEY, Slice(&attr, sizeof(attr))); 129 } 130 131 int getFirstMapKey(const base::unique_fd& map_fd, void* firstKey) { 132 bpf_attr attr; 133 memset(&attr, 0, sizeof(attr)); 134 attr.map_fd = map_fd.get(); 135 attr.key = 0; 136 attr.next_key = ptr_to_u64(firstKey); 137 138 return bpf(BPF_MAP_GET_NEXT_KEY, Slice(&attr, sizeof(attr))); 139 } 140 141 int bpfProgLoad(bpf_prog_type prog_type, Slice bpf_insns, const char* license, 142 uint32_t kern_version, Slice bpf_log) { 143 bpf_attr attr; 144 memset(&attr, 0, sizeof(attr)); 145 attr.prog_type = prog_type; 146 attr.insns = ptr_to_u64(bpf_insns.base()); 147 attr.insn_cnt = bpf_insns.size() / sizeof(struct bpf_insn); 148 attr.license = ptr_to_u64((void*)license); 149 attr.log_buf = ptr_to_u64(bpf_log.base()); 150 attr.log_size = bpf_log.size(); 151 attr.log_level = DEFAULT_LOG_LEVEL; 152 attr.kern_version = kern_version; 153 int ret = bpf(BPF_PROG_LOAD, Slice(&attr, sizeof(attr))); 154 155 if (ret < 0) { 156 std::string prog_log = netdutils::toString(bpf_log); 157 std::istringstream iss(prog_log); 158 for (std::string line; std::getline(iss, line);) { 159 ALOGE("%s", line.c_str()); 160 } 161 } 162 return ret; 163 } 164 165 int bpfFdPin(const base::unique_fd& map_fd, const char* pathname) { 166 bpf_attr attr; 167 memset(&attr, 0, sizeof(attr)); 168 attr.pathname = ptr_to_u64((void*)pathname); 169 attr.bpf_fd = map_fd.get(); 170 171 return bpf(BPF_OBJ_PIN, Slice(&attr, sizeof(attr))); 172 } 173 174 int bpfFdGet(const char* pathname, uint32_t flag) { 175 bpf_attr attr; 176 memset(&attr, 0, sizeof(attr)); 177 attr.pathname = ptr_to_u64((void*)pathname); 178 attr.file_flags = flag; 179 return bpf(BPF_OBJ_GET, Slice(&attr, sizeof(attr))); 180 } 181 182 int mapRetrieve(const char* pathname, uint32_t flag) { 183 return bpfFdGet(pathname, flag); 184 } 185 186 int attachProgram(bpf_attach_type type, uint32_t prog_fd, uint32_t cg_fd) { 187 bpf_attr attr; 188 memset(&attr, 0, sizeof(attr)); 189 attr.target_fd = cg_fd; 190 attr.attach_bpf_fd = prog_fd; 191 attr.attach_type = type; 192 193 return bpf(BPF_PROG_ATTACH, Slice(&attr, sizeof(attr))); 194 } 195 196 int detachProgram(bpf_attach_type type, uint32_t cg_fd) { 197 bpf_attr attr; 198 memset(&attr, 0, sizeof(attr)); 199 attr.target_fd = cg_fd; 200 attr.attach_type = type; 201 202 return bpf(BPF_PROG_DETACH, Slice(&attr, sizeof(attr))); 203 } 204 205 uint64_t getSocketCookie(int sockFd) { 206 uint64_t sock_cookie; 207 socklen_t cookie_len = sizeof(sock_cookie); 208 int res = getsockopt(sockFd, SOL_SOCKET, SO_COOKIE, &sock_cookie, &cookie_len); 209 if (res < 0) { 210 res = -errno; 211 ALOGE("Failed to get socket cookie: %s\n", strerror(errno)); 212 errno = -res; 213 // 0 is an invalid cookie. See sock_gen_cookie. 214 return NONEXISTENT_COOKIE; 215 } 216 return sock_cookie; 217 } 218 219 int synchronizeKernelRCU() { 220 // This is a temporary hack for network stats map swap on devices running 221 // 4.9 kernels. The kernel code of socket release on pf_key socket will 222 // explicitly call synchronize_rcu() which is exactly what we need. 223 int pfSocket = socket(AF_KEY, SOCK_RAW | SOCK_CLOEXEC, PF_KEY_V2); 224 225 if (pfSocket < 0) { 226 int ret = -errno; 227 ALOGE("create PF_KEY socket failed: %s", strerror(errno)); 228 return ret; 229 } 230 231 // When closing socket, synchronize_rcu() gets called in sock_release(). 232 if (close(pfSocket)) { 233 int ret = -errno; 234 ALOGE("failed to close the PF_KEY socket: %s", strerror(errno)); 235 return ret; 236 } 237 return 0; 238 } 239 240 int setrlimitForTest() { 241 // Set the memory rlimit for the test process if the default MEMLOCK rlimit is not enough. 242 struct rlimit limit = { 243 .rlim_cur = TEST_LIMIT, 244 .rlim_max = TEST_LIMIT, 245 }; 246 int res = setrlimit(RLIMIT_MEMLOCK, &limit); 247 if (res) { 248 ALOGE("Failed to set the default MEMLOCK rlimit: %s", strerror(errno)); 249 } 250 return res; 251 } 252 253 std::string BpfLevelToString(BpfLevel bpfLevel) { 254 switch (bpfLevel) { 255 case BpfLevel::NONE: return "NONE_SUPPORT"; 256 case BpfLevel::BASIC: return "BPF_LEVEL_BASIC"; 257 case BpfLevel::EXTENDED: return "BPF_LEVEL_EXTENDED"; 258 // No default statement. We want to see errors of the form: 259 // "enumeration value 'BPF_LEVEL_xxx' not handled in switch [-Werror,-Wswitch]". 260 } 261 } 262 263 BpfLevel getBpfSupportLevel() { 264 struct utsname buf; 265 int kernel_version_major; 266 int kernel_version_minor; 267 268 uint64_t api_level = GetUintProperty<uint64_t>("ro.product.first_api_level", 0); 269 if (api_level == 0) { 270 ALOGE("Cannot determine initial API level of the device"); 271 api_level = GetUintProperty<uint64_t>("ro.build.version.sdk", 0); 272 } 273 274 // Check if the device is shipped originally with android P. 275 if (api_level < MINIMUM_API_REQUIRED) return BpfLevel::NONE; 276 277 int ret = uname(&buf); 278 if (ret) { 279 return BpfLevel::NONE; 280 } 281 char dummy; 282 ret = sscanf(buf.release, "%d.%d%c", &kernel_version_major, &kernel_version_minor, &dummy); 283 // Check the device kernel version 284 if (ret < 2) return BpfLevel::NONE; 285 if (kernel_version_major > 4 || (kernel_version_major == 4 && kernel_version_minor >= 14)) 286 return BpfLevel::EXTENDED; 287 if (kernel_version_major == 4 && kernel_version_minor >= 9) return BpfLevel::BASIC; 288 289 return BpfLevel::NONE; 290 } 291 292 } // namespace bpf 293 } // namespace android 294