/packages/modules/Connectivity/bpf_progs/ |
D | netd.c | 184 static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \ in DEFINE_BPF_MAP_RO_NETD() 197 uint64_t bytes = skb->len; \ in DEFINE_BPF_MAP_RO_NETD() 199 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \ in DEFINE_BPF_MAP_RO_NETD() 223 static __always_inline inline int bpf_skb_load_bytes_net(const struct __sk_buff* const skb, 245 ? bpf_skb_load_bytes_relative(skb, L3_off, to, len, BPF_HDR_START_NET) 246 : bpf_skb_load_bytes(skb, L3_off, to, len); 250 const struct __sk_buff* const skb, const struct egress_bool egress, const uint32_t uid, in do_packet_tracing() argument 270 if (skb->protocol == htons(ETH_P_IP)) { in do_packet_tracing() 271 (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver); in do_packet_tracing() 272 (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &L4_off, sizeof(L4_off), kver); in do_packet_tracing() [all …]
|
D | offload.c | 127 static inline __always_inline int do_forward6(struct __sk_buff* skb, 134 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE; 137 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE; 145 try_make_writable(skb, l2_header_size + IP6_HLEN + TCP_HLEN); 147 void* data = (void*)(long)skb->data; 148 const void* data_end = (void*)(long)skb->data_end; 195 .iif = skb->ifindex, 200 .iif = skb->ifindex, 212 uint32_t stat_and_limit_k = stream.down ? skb->ifindex : v->oif; 235 uint64_t L3_bytes = skb->len - l2_header_size; [all …]
|
D | offload@mainline.c | 127 static inline __always_inline int do_forward6(struct __sk_buff* skb, 134 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE; 137 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE; 145 try_make_writable(skb, l2_header_size + IP6_HLEN + TCP_HLEN); 147 void* data = (void*)(long)skb->data; 148 const void* data_end = (void*)(long)skb->data_end; 195 .iif = skb->ifindex, 200 .iif = skb->ifindex, 212 uint32_t stat_and_limit_k = stream.down ? skb->ifindex : v->oif; 235 uint64_t L3_bytes = skb->len - l2_header_size; [all …]
|
D | clatd.c | 57 static inline __always_inline int nat64(struct __sk_buff* skb, in nat64() argument 63 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE; in nat64() 66 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE; in nat64() 73 try_make_writable(skb, l2_header_size + sizeof(struct ipv6hdr)); in nat64() 75 void* data = (void*)(long)skb->data; in nat64() 76 const void* data_end = (void*)(long)skb->data_end; in nat64() 99 .iif = skb->ifindex, in nat64() 154 skb->mark = CLAT_MARK; in nat64() 200 if (bpf_skb_change_proto(skb, htons(ETH_P_IP), 0)) { in nat64() 204 skb->mark = CLAT_MARK; in nat64() [all …]
|
D | bpf_net_helpers.h | 37 static uint64_t (*bpf_get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie; 40 static uint32_t (*bpf_get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid; 42 static int (*bpf_skb_pull_data)(struct __sk_buff* skb, __u32 len) = (void*)BPF_FUNC_skb_pull_data; 44 static int (*bpf_skb_load_bytes)(const struct __sk_buff* skb, int off, void* to, 47 static int (*bpf_skb_load_bytes_relative)(const struct __sk_buff* skb, int off, void* to, int len, 50 static int (*bpf_skb_store_bytes)(struct __sk_buff* skb, __u32 offset, const void* from, __u32 len, 56 static int64_t (*bpf_csum_update)(struct __sk_buff* skb, __wsum csum) = (void*)BPF_FUNC_csum_update; 58 static int (*bpf_skb_change_proto)(struct __sk_buff* skb, __be16 proto, 60 static int (*bpf_l3_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to, 62 static int (*bpf_l4_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to, [all …]
|
D | dscpPolicy.c | 45 static inline __always_inline void match_policy(struct __sk_buff* skb, bool ipv4) { in DEFINE_BPF_MAP_GRW() 46 void* data = (void*)(long)skb->data; in DEFINE_BPF_MAP_GRW() 47 const void* data_end = (void*)(long)skb->data_end; in DEFINE_BPF_MAP_GRW() 57 uint64_t cookie = bpf_get_socket_cookie(skb); in DEFINE_BPF_MAP_GRW() 127 skb->ifindex == existing_rule->ifindex && in DEFINE_BPF_MAP_GRW() 134 bpf_l3_csum_replace(skb, IP4_OFFSET(check, l2_header_size), htons(tos), htons(newTos), in DEFINE_BPF_MAP_GRW() 136 bpf_skb_store_bytes(skb, IP4_OFFSET(tos, l2_header_size), &newTos, sizeof(newTos), 0); in DEFINE_BPF_MAP_GRW() 140 bpf_skb_store_bytes(skb, l2_header_size, &new_first_be32, sizeof(__be32), in DEFINE_BPF_MAP_GRW() 166 if (policy->ifindex != skb->ifindex) continue; in DEFINE_BPF_MAP_GRW() 199 .ifindex = skb->ifindex, in DEFINE_BPF_MAP_GRW() [all …]
|
D | dscpPolicy.h | 42 static uint64_t (*bpf_get_socket_cookie)(struct __sk_buff* skb) = 44 static int (*bpf_skb_store_bytes)(struct __sk_buff* skb, __u32 offset, const void* from, __u32 len, 46 static int (*bpf_l3_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to, 48 static long (*bpf_skb_ecn_set_ce)(struct __sk_buff* skb) =
|
D | gentle.c | 25 (struct __sk_buff *skb) {
|
/packages/modules/Connectivity/staticlibs/native/bpf_headers/include/bpf/ |
D | bpf_helpers.h | 383 unsigned long long load_byte(void* skb, unsigned long long off) asm("llvm.bpf.load.byte"); 384 unsigned long long load_half(void* skb, unsigned long long off) asm("llvm.bpf.load.half"); 385 unsigned long long load_word(void* skb, unsigned long long off) asm("llvm.bpf.load.word");
|
/packages/modules/AdServices/adservices/tests/unittest/service-core/assets/classifier/ |
D | precomputed_test_app_list.csv | 9096 com.skb.btvmobile 10055,10283
|
/packages/modules/AdServices/adservices/apk/assets/classifier/ |
D | precomputed_app_list.csv | 9096 com.skb.btvmobile 10055,10283
|