1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  */
8 #ifndef _UAPI__LINUX_BPF_H__
9 #define _UAPI__LINUX_BPF_H__
10 
11 #include <linux/types.h>
12 #include "bpf_common.h"
13 
14 /* Extended instruction set based on top of classic BPF */
15 
16 /* instruction classes */
17 #define BPF_ALU64	0x07	/* alu mode in double word width */
18 
19 /* ld/ldx fields */
20 #define BPF_DW		0x18	/* double word (64-bit) */
21 #define BPF_XADD	0xc0	/* exclusive add */
22 
23 /* alu/jmp fields */
24 #define BPF_MOV		0xb0	/* mov reg to reg */
25 #define BPF_ARSH	0xc0	/* sign extending arithmetic shift right */
26 
27 /* change endianness of a register */
28 #define BPF_END		0xd0	/* flags for endianness conversion: */
29 #define BPF_TO_LE	0x00	/* convert to little-endian */
30 #define BPF_TO_BE	0x08	/* convert to big-endian */
31 #define BPF_FROM_LE	BPF_TO_LE
32 #define BPF_FROM_BE	BPF_TO_BE
33 
34 /* jmp encodings */
35 #define BPF_JNE		0x50	/* jump != */
36 #define BPF_JLT		0xa0	/* LT is unsigned, '<' */
37 #define BPF_JLE		0xb0	/* LE is unsigned, '<=' */
38 #define BPF_JSGT	0x60	/* SGT is signed '>', GT in x86 */
39 #define BPF_JSGE	0x70	/* SGE is signed '>=', GE in x86 */
40 #define BPF_JSLT	0xc0	/* SLT is signed, '<' */
41 #define BPF_JSLE	0xd0	/* SLE is signed, '<=' */
42 #define BPF_CALL	0x80	/* function call */
43 #define BPF_EXIT	0x90	/* function return */
44 
45 /* Register numbers */
46 enum {
47 	BPF_REG_0 = 0,
48 	BPF_REG_1,
49 	BPF_REG_2,
50 	BPF_REG_3,
51 	BPF_REG_4,
52 	BPF_REG_5,
53 	BPF_REG_6,
54 	BPF_REG_7,
55 	BPF_REG_8,
56 	BPF_REG_9,
57 	BPF_REG_10,
58 	__MAX_BPF_REG,
59 };
60 
61 /* BPF has 10 general purpose 64-bit registers and stack frame. */
62 #define MAX_BPF_REG	__MAX_BPF_REG
63 
64 struct bpf_insn {
65 	__u8	code;		/* opcode */
66 	__u8	dst_reg:4;	/* dest register */
67 	__u8	src_reg:4;	/* source register */
68 	__s16	off;		/* signed offset */
69 	__s32	imm;		/* signed immediate constant */
70 };
71 
72 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
73 struct bpf_lpm_trie_key {
74 	__u32	prefixlen;	/* up to 32 for AF_INET, 128 for AF_INET6 */
75 	__u8	data[0];	/* Arbitrary size */
76 };
77 
78 struct bpf_cgroup_storage_key {
79 	__u64	cgroup_inode_id;	/* cgroup inode id */
80 	__u32	attach_type;		/* program attach type */
81 };
82 
83 /* BPF syscall commands, see bpf(2) man-page for details. */
84 enum bpf_cmd {
85 	BPF_MAP_CREATE,
86 	BPF_MAP_LOOKUP_ELEM,
87 	BPF_MAP_UPDATE_ELEM,
88 	BPF_MAP_DELETE_ELEM,
89 	BPF_MAP_GET_NEXT_KEY,
90 	BPF_PROG_LOAD,
91 	BPF_OBJ_PIN,
92 	BPF_OBJ_GET,
93 	BPF_PROG_ATTACH,
94 	BPF_PROG_DETACH,
95 	BPF_PROG_TEST_RUN,
96 	BPF_PROG_GET_NEXT_ID,
97 	BPF_MAP_GET_NEXT_ID,
98 	BPF_PROG_GET_FD_BY_ID,
99 	BPF_MAP_GET_FD_BY_ID,
100 	BPF_OBJ_GET_INFO_BY_FD,
101 	BPF_PROG_QUERY,
102 	BPF_RAW_TRACEPOINT_OPEN,
103 	BPF_BTF_LOAD,
104 	BPF_BTF_GET_FD_BY_ID,
105 	BPF_TASK_FD_QUERY,
106 	BPF_MAP_LOOKUP_AND_DELETE_ELEM,
107 };
108 
109 enum bpf_map_type {
110 	BPF_MAP_TYPE_UNSPEC,
111 	BPF_MAP_TYPE_HASH,
112 	BPF_MAP_TYPE_ARRAY,
113 	BPF_MAP_TYPE_PROG_ARRAY,
114 	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
115 	BPF_MAP_TYPE_PERCPU_HASH,
116 	BPF_MAP_TYPE_PERCPU_ARRAY,
117 	BPF_MAP_TYPE_STACK_TRACE,
118 	BPF_MAP_TYPE_CGROUP_ARRAY,
119 	BPF_MAP_TYPE_LRU_HASH,
120 	BPF_MAP_TYPE_LRU_PERCPU_HASH,
121 	BPF_MAP_TYPE_LPM_TRIE,
122 	BPF_MAP_TYPE_ARRAY_OF_MAPS,
123 	BPF_MAP_TYPE_HASH_OF_MAPS,
124 	BPF_MAP_TYPE_DEVMAP,
125 	BPF_MAP_TYPE_SOCKMAP,
126 	BPF_MAP_TYPE_CPUMAP,
127 	BPF_MAP_TYPE_XSKMAP,
128 	BPF_MAP_TYPE_SOCKHASH,
129 	BPF_MAP_TYPE_CGROUP_STORAGE,
130 	BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
131 	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
132 	BPF_MAP_TYPE_QUEUE,
133 	BPF_MAP_TYPE_STACK,
134 };
135 
136 enum bpf_prog_type {
137 	BPF_PROG_TYPE_UNSPEC,
138 	BPF_PROG_TYPE_SOCKET_FILTER,
139 	BPF_PROG_TYPE_KPROBE,
140 	BPF_PROG_TYPE_SCHED_CLS,
141 	BPF_PROG_TYPE_SCHED_ACT,
142 	BPF_PROG_TYPE_TRACEPOINT,
143 	BPF_PROG_TYPE_XDP,
144 	BPF_PROG_TYPE_PERF_EVENT,
145 	BPF_PROG_TYPE_CGROUP_SKB,
146 	BPF_PROG_TYPE_CGROUP_SOCK,
147 	BPF_PROG_TYPE_LWT_IN,
148 	BPF_PROG_TYPE_LWT_OUT,
149 	BPF_PROG_TYPE_LWT_XMIT,
150 	BPF_PROG_TYPE_SOCK_OPS,
151 	BPF_PROG_TYPE_SK_SKB,
152 	BPF_PROG_TYPE_CGROUP_DEVICE,
153 	BPF_PROG_TYPE_SK_MSG,
154 	BPF_PROG_TYPE_RAW_TRACEPOINT,
155 	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
156 	BPF_PROG_TYPE_LWT_SEG6LOCAL,
157 	BPF_PROG_TYPE_LIRC_MODE2,
158 	BPF_PROG_TYPE_SK_REUSEPORT,
159 	BPF_PROG_TYPE_FLOW_DISSECTOR,
160 };
161 
162 enum bpf_attach_type {
163 	BPF_CGROUP_INET_INGRESS,
164 	BPF_CGROUP_INET_EGRESS,
165 	BPF_CGROUP_INET_SOCK_CREATE,
166 	BPF_CGROUP_SOCK_OPS,
167 	BPF_SK_SKB_STREAM_PARSER,
168 	BPF_SK_SKB_STREAM_VERDICT,
169 	BPF_CGROUP_DEVICE,
170 	BPF_SK_MSG_VERDICT,
171 	BPF_CGROUP_INET4_BIND,
172 	BPF_CGROUP_INET6_BIND,
173 	BPF_CGROUP_INET4_CONNECT,
174 	BPF_CGROUP_INET6_CONNECT,
175 	BPF_CGROUP_INET4_POST_BIND,
176 	BPF_CGROUP_INET6_POST_BIND,
177 	BPF_CGROUP_UDP4_SENDMSG,
178 	BPF_CGROUP_UDP6_SENDMSG,
179 	BPF_LIRC_MODE2,
180 	BPF_FLOW_DISSECTOR,
181 	__MAX_BPF_ATTACH_TYPE
182 };
183 
184 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
185 
186 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
187  *
188  * NONE(default): No further bpf programs allowed in the subtree.
189  *
190  * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
191  * the program in this cgroup yields to sub-cgroup program.
192  *
193  * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
194  * that cgroup program gets run in addition to the program in this cgroup.
195  *
196  * Only one program is allowed to be attached to a cgroup with
197  * NONE or BPF_F_ALLOW_OVERRIDE flag.
198  * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
199  * release old program and attach the new one. Attach flags has to match.
200  *
201  * Multiple programs are allowed to be attached to a cgroup with
202  * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
203  * (those that were attached first, run first)
204  * The programs of sub-cgroup are executed first, then programs of
205  * this cgroup and then programs of parent cgroup.
206  * When children program makes decision (like picking TCP CA or sock bind)
207  * parent program has a chance to override it.
208  *
209  * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
210  * A cgroup with NONE doesn't allow any programs in sub-cgroups.
211  * Ex1:
212  * cgrp1 (MULTI progs A, B) ->
213  *    cgrp2 (OVERRIDE prog C) ->
214  *      cgrp3 (MULTI prog D) ->
215  *        cgrp4 (OVERRIDE prog E) ->
216  *          cgrp5 (NONE prog F)
217  * the event in cgrp5 triggers execution of F,D,A,B in that order.
218  * if prog F is detached, the execution is E,D,A,B
219  * if prog F and D are detached, the execution is E,A,B
220  * if prog F, E and D are detached, the execution is C,A,B
221  *
222  * All eligible programs are executed regardless of return code from
223  * earlier programs.
224  */
225 #define BPF_F_ALLOW_OVERRIDE	(1U << 0)
226 #define BPF_F_ALLOW_MULTI	(1U << 1)
227 
228 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
229  * verifier will perform strict alignment checking as if the kernel
230  * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
231  * and NET_IP_ALIGN defined to 2.
232  */
233 #define BPF_F_STRICT_ALIGNMENT	(1U << 0)
234 
235 /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
236 #define BPF_PSEUDO_MAP_FD	1
237 
238 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
239  * offset to another bpf function
240  */
241 #define BPF_PSEUDO_CALL		1
242 
243 /* flags for BPF_MAP_UPDATE_ELEM command */
244 #define BPF_ANY		0 /* create new element or update existing */
245 #define BPF_NOEXIST	1 /* create new element if it didn't exist */
246 #define BPF_EXIST	2 /* update existing element */
247 
248 /* flags for BPF_MAP_CREATE command */
249 #define BPF_F_NO_PREALLOC	(1U << 0)
250 /* Instead of having one common LRU list in the
251  * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
252  * which can scale and perform better.
253  * Note, the LRU nodes (including free nodes) cannot be moved
254  * across different LRU lists.
255  */
256 #define BPF_F_NO_COMMON_LRU	(1U << 1)
257 /* Specify numa node during map creation */
258 #define BPF_F_NUMA_NODE		(1U << 2)
259 
260 /* flags for BPF_PROG_QUERY */
261 #define BPF_F_QUERY_EFFECTIVE	(1U << 0)
262 
263 #define BPF_OBJ_NAME_LEN 16U
264 
265 /* Flags for accessing BPF object */
266 #define BPF_F_RDONLY		(1U << 3)
267 #define BPF_F_WRONLY		(1U << 4)
268 
269 /* Flag for stack_map, store build_id+offset instead of pointer */
270 #define BPF_F_STACK_BUILD_ID	(1U << 5)
271 
272 enum bpf_stack_build_id_status {
273 	/* user space need an empty entry to identify end of a trace */
274 	BPF_STACK_BUILD_ID_EMPTY = 0,
275 	/* with valid build_id and offset */
276 	BPF_STACK_BUILD_ID_VALID = 1,
277 	/* couldn't get build_id, fallback to ip */
278 	BPF_STACK_BUILD_ID_IP = 2,
279 };
280 
281 #define BPF_BUILD_ID_SIZE 20
282 struct bpf_stack_build_id {
283 	__s32		status;
284 	unsigned char	build_id[BPF_BUILD_ID_SIZE];
285 	union {
286 		__u64	offset;
287 		__u64	ip;
288 	};
289 };
290 
291 union bpf_attr {
292 	struct { /* anonymous struct used by BPF_MAP_CREATE command */
293 		__u32	map_type;	/* one of enum bpf_map_type */
294 		__u32	key_size;	/* size of key in bytes */
295 		__u32	value_size;	/* size of value in bytes */
296 		__u32	max_entries;	/* max number of entries in a map */
297 		__u32	map_flags;	/* BPF_MAP_CREATE related
298 					 * flags defined above.
299 					 */
300 		__u32	inner_map_fd;	/* fd pointing to the inner map */
301 		__u32	numa_node;	/* numa node (effective only if
302 					 * BPF_F_NUMA_NODE is set).
303 					 */
304 		char	map_name[BPF_OBJ_NAME_LEN];
305 		__u32	map_ifindex;	/* ifindex of netdev to create on */
306 		__u32	btf_fd;		/* fd pointing to a BTF type data */
307 		__u32	btf_key_type_id;	/* BTF type_id of the key */
308 		__u32	btf_value_type_id;	/* BTF type_id of the value */
309 	};
310 
311 	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
312 		__u32		map_fd;
313 		__aligned_u64	key;
314 		union {
315 			__aligned_u64 value;
316 			__aligned_u64 next_key;
317 		};
318 		__u64		flags;
319 	};
320 
321 	struct { /* anonymous struct used by BPF_PROG_LOAD command */
322 		__u32		prog_type;	/* one of enum bpf_prog_type */
323 		__u32		insn_cnt;
324 		__aligned_u64	insns;
325 		__aligned_u64	license;
326 		__u32		log_level;	/* verbosity level of verifier */
327 		__u32		log_size;	/* size of user buffer */
328 		__aligned_u64	log_buf;	/* user supplied buffer */
329 		__u32		kern_version;	/* checked when prog_type=kprobe */
330 		__u32		prog_flags;
331 		char		prog_name[BPF_OBJ_NAME_LEN];
332 		__u32		prog_ifindex;	/* ifindex of netdev to prep for */
333 		/* For some prog types expected attach type must be known at
334 		 * load time to verify attach type specific parts of prog
335 		 * (context accesses, allowed helpers, etc).
336 		 */
337 		__u32		expected_attach_type;
338 	};
339 
340 	struct { /* anonymous struct used by BPF_OBJ_* commands */
341 		__aligned_u64	pathname;
342 		__u32		bpf_fd;
343 		__u32		file_flags;
344 	};
345 
346 	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
347 		__u32		target_fd;	/* container object to attach to */
348 		__u32		attach_bpf_fd;	/* eBPF program to attach */
349 		__u32		attach_type;
350 		__u32		attach_flags;
351 	};
352 
353 	struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
354 		__u32		prog_fd;
355 		__u32		retval;
356 		__u32		data_size_in;
357 		__u32		data_size_out;
358 		__aligned_u64	data_in;
359 		__aligned_u64	data_out;
360 		__u32		repeat;
361 		__u32		duration;
362 	} test;
363 
364 	struct { /* anonymous struct used by BPF_*_GET_*_ID */
365 		union {
366 			__u32		start_id;
367 			__u32		prog_id;
368 			__u32		map_id;
369 			__u32		btf_id;
370 		};
371 		__u32		next_id;
372 		__u32		open_flags;
373 	};
374 
375 	struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
376 		__u32		bpf_fd;
377 		__u32		info_len;
378 		__aligned_u64	info;
379 	} info;
380 
381 	struct { /* anonymous struct used by BPF_PROG_QUERY command */
382 		__u32		target_fd;	/* container object to query */
383 		__u32		attach_type;
384 		__u32		query_flags;
385 		__u32		attach_flags;
386 		__aligned_u64	prog_ids;
387 		__u32		prog_cnt;
388 	} query;
389 
390 	struct {
391 		__u64 name;
392 		__u32 prog_fd;
393 	} raw_tracepoint;
394 
395 	struct { /* anonymous struct for BPF_BTF_LOAD */
396 		__aligned_u64	btf;
397 		__aligned_u64	btf_log_buf;
398 		__u32		btf_size;
399 		__u32		btf_log_size;
400 		__u32		btf_log_level;
401 	};
402 
403 	struct {
404 		__u32		pid;		/* input: pid */
405 		__u32		fd;		/* input: fd */
406 		__u32		flags;		/* input: flags */
407 		__u32		buf_len;	/* input/output: buf len */
408 		__aligned_u64	buf;		/* input/output:
409 						 *   tp_name for tracepoint
410 						 *   symbol for kprobe
411 						 *   filename for uprobe
412 						 */
413 		__u32		prog_id;	/* output: prod_id */
414 		__u32		fd_type;	/* output: BPF_FD_TYPE_* */
415 		__u64		probe_offset;	/* output: probe_offset */
416 		__u64		probe_addr;	/* output: probe_addr */
417 	} task_fd_query;
418 } __attribute__((aligned(8)));
419 
420 /* The description below is an attempt at providing documentation to eBPF
421  * developers about the multiple available eBPF helper functions. It can be
422  * parsed and used to produce a manual page. The workflow is the following,
423  * and requires the rst2man utility:
424  *
425  *     $ ./scripts/bpf_helpers_doc.py \
426  *             --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
427  *     $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
428  *     $ man /tmp/bpf-helpers.7
429  *
430  * Note that in order to produce this external documentation, some RST
431  * formatting is used in the descriptions to get "bold" and "italics" in
432  * manual pages. Also note that the few trailing white spaces are
433  * intentional, removing them would break paragraphs for rst2man.
434  *
435  * Start of BPF helper function descriptions:
436  *
437  * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
438  * 	Description
439  * 		Perform a lookup in *map* for an entry associated to *key*.
440  * 	Return
441  * 		Map value associated to *key*, or **NULL** if no entry was
442  * 		found.
443  *
444  * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
445  * 	Description
446  * 		Add or update the value of the entry associated to *key* in
447  * 		*map* with *value*. *flags* is one of:
448  *
449  * 		**BPF_NOEXIST**
450  * 			The entry for *key* must not exist in the map.
451  * 		**BPF_EXIST**
452  * 			The entry for *key* must already exist in the map.
453  * 		**BPF_ANY**
454  * 			No condition on the existence of the entry for *key*.
455  *
456  * 		Flag value **BPF_NOEXIST** cannot be used for maps of types
457  * 		**BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY**  (all
458  * 		elements always exist), the helper would return an error.
459  * 	Return
460  * 		0 on success, or a negative error in case of failure.
461  *
462  * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
463  * 	Description
464  * 		Delete entry with *key* from *map*.
465  * 	Return
466  * 		0 on success, or a negative error in case of failure.
467  *
468  * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
469  * 	Description
470  * 		Push an element *value* in *map*. *flags* is one of:
471  *
472  * 		**BPF_EXIST**
473  * 		If the queue/stack is full, the oldest element is removed to
474  * 		make room for this.
475  * 	Return
476  * 		0 on success, or a negative error in case of failure.
477  *
478  * int bpf_map_pop_elem(struct bpf_map *map, void *value)
479  * 	Description
480  * 		Pop an element from *map*.
481  * Return
482  * 		0 on success, or a negative error in case of failure.
483  *
484  * int bpf_map_peek_elem(struct bpf_map *map, void *value)
485  * 	Description
486  * 		Get an element from *map* without removing it.
487  * Return
488  * 		0 on success, or a negative error in case of failure.
489  *
490  * int bpf_probe_read(void *dst, u32 size, const void *src)
491  * 	Description
492  * 		For tracing programs, safely attempt to read *size* bytes from
493  * 		address *src* and store the data in *dst*.
494  * 	Return
495  * 		0 on success, or a negative error in case of failure.
496  *
497  * u64 bpf_ktime_get_ns(void)
498  * 	Description
499  * 		Return the time elapsed since system boot, in nanoseconds.
500  * 	Return
501  * 		Current *ktime*.
502  *
503  * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
504  * 	Description
505  * 		This helper is a "printk()-like" facility for debugging. It
506  * 		prints a message defined by format *fmt* (of size *fmt_size*)
507  * 		to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
508  * 		available. It can take up to three additional **u64**
509  * 		arguments (as an eBPF helpers, the total number of arguments is
510  * 		limited to five).
511  *
512  * 		Each time the helper is called, it appends a line to the trace.
513  * 		The format of the trace is customizable, and the exact output
514  * 		one will get depends on the options set in
515  * 		*\/sys/kernel/debug/tracing/trace_options* (see also the
516  * 		*README* file under the same directory). However, it usually
517  * 		defaults to something like:
518  *
519  * 		::
520  *
521  * 			telnet-470   [001] .N.. 419421.045894: 0x00000001: <formatted msg>
522  *
523  * 		In the above:
524  *
525  * 			* ``telnet`` is the name of the current task.
526  * 			* ``470`` is the PID of the current task.
527  * 			* ``001`` is the CPU number on which the task is
528  * 			  running.
529  * 			* In ``.N..``, each character refers to a set of
530  * 			  options (whether irqs are enabled, scheduling
531  * 			  options, whether hard/softirqs are running, level of
532  * 			  preempt_disabled respectively). **N** means that
533  * 			  **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
534  * 			  are set.
535  * 			* ``419421.045894`` is a timestamp.
536  * 			* ``0x00000001`` is a fake value used by BPF for the
537  * 			  instruction pointer register.
538  * 			* ``<formatted msg>`` is the message formatted with
539  * 			  *fmt*.
540  *
541  * 		The conversion specifiers supported by *fmt* are similar, but
542  * 		more limited than for printk(). They are **%d**, **%i**,
543  * 		**%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
544  * 		**%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
545  * 		of field, padding with zeroes, etc.) is available, and the
546  * 		helper will return **-EINVAL** (but print nothing) if it
547  * 		encounters an unknown specifier.
548  *
549  * 		Also, note that **bpf_trace_printk**\ () is slow, and should
550  * 		only be used for debugging purposes. For this reason, a notice
551  * 		bloc (spanning several lines) is printed to kernel logs and
552  * 		states that the helper should not be used "for production use"
553  * 		the first time this helper is used (or more precisely, when
554  * 		**trace_printk**\ () buffers are allocated). For passing values
555  * 		to user space, perf events should be preferred.
556  * 	Return
557  * 		The number of bytes written to the buffer, or a negative error
558  * 		in case of failure.
559  *
560  * u32 bpf_get_prandom_u32(void)
561  * 	Description
562  * 		Get a pseudo-random number.
563  *
564  * 		From a security point of view, this helper uses its own
565  * 		pseudo-random internal state, and cannot be used to infer the
566  * 		seed of other random functions in the kernel. However, it is
567  * 		essential to note that the generator used by the helper is not
568  * 		cryptographically secure.
569  * 	Return
570  * 		A random 32-bit unsigned value.
571  *
572  * u32 bpf_get_smp_processor_id(void)
573  * 	Description
574  * 		Get the SMP (symmetric multiprocessing) processor id. Note that
575  * 		all programs run with preemption disabled, which means that the
576  * 		SMP processor id is stable during all the execution of the
577  * 		program.
578  * 	Return
579  * 		The SMP id of the processor running the program.
580  *
581  * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
582  * 	Description
583  * 		Store *len* bytes from address *from* into the packet
584  * 		associated to *skb*, at *offset*. *flags* are a combination of
585  * 		**BPF_F_RECOMPUTE_CSUM** (automatically recompute the
586  * 		checksum for the packet after storing the bytes) and
587  * 		**BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
588  * 		**->swhash** and *skb*\ **->l4hash** to 0).
589  *
590  * 		A call to this helper is susceptible to change the underlaying
591  * 		packet buffer. Therefore, at load time, all checks on pointers
592  * 		previously done by the verifier are invalidated and must be
593  * 		performed again, if the helper is used in combination with
594  * 		direct packet access.
595  * 	Return
596  * 		0 on success, or a negative error in case of failure.
597  *
598  * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
599  * 	Description
600  * 		Recompute the layer 3 (e.g. IP) checksum for the packet
601  * 		associated to *skb*. Computation is incremental, so the helper
602  * 		must know the former value of the header field that was
603  * 		modified (*from*), the new value of this field (*to*), and the
604  * 		number of bytes (2 or 4) for this field, stored in *size*.
605  * 		Alternatively, it is possible to store the difference between
606  * 		the previous and the new values of the header field in *to*, by
607  * 		setting *from* and *size* to 0. For both methods, *offset*
608  * 		indicates the location of the IP checksum within the packet.
609  *
610  * 		This helper works in combination with **bpf_csum_diff**\ (),
611  * 		which does not update the checksum in-place, but offers more
612  * 		flexibility and can handle sizes larger than 2 or 4 for the
613  * 		checksum to update.
614  *
615  * 		A call to this helper is susceptible to change the underlaying
616  * 		packet buffer. Therefore, at load time, all checks on pointers
617  * 		previously done by the verifier are invalidated and must be
618  * 		performed again, if the helper is used in combination with
619  * 		direct packet access.
620  * 	Return
621  * 		0 on success, or a negative error in case of failure.
622  *
623  * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
624  * 	Description
625  * 		Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
626  * 		packet associated to *skb*. Computation is incremental, so the
627  * 		helper must know the former value of the header field that was
628  * 		modified (*from*), the new value of this field (*to*), and the
629  * 		number of bytes (2 or 4) for this field, stored on the lowest
630  * 		four bits of *flags*. Alternatively, it is possible to store
631  * 		the difference between the previous and the new values of the
632  * 		header field in *to*, by setting *from* and the four lowest
633  * 		bits of *flags* to 0. For both methods, *offset* indicates the
634  * 		location of the IP checksum within the packet. In addition to
635  * 		the size of the field, *flags* can be added (bitwise OR) actual
636  * 		flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
637  * 		untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
638  * 		for updates resulting in a null checksum the value is set to
639  * 		**CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
640  * 		the checksum is to be computed against a pseudo-header.
641  *
642  * 		This helper works in combination with **bpf_csum_diff**\ (),
643  * 		which does not update the checksum in-place, but offers more
644  * 		flexibility and can handle sizes larger than 2 or 4 for the
645  * 		checksum to update.
646  *
647  * 		A call to this helper is susceptible to change the underlaying
648  * 		packet buffer. Therefore, at load time, all checks on pointers
649  * 		previously done by the verifier are invalidated and must be
650  * 		performed again, if the helper is used in combination with
651  * 		direct packet access.
652  * 	Return
653  * 		0 on success, or a negative error in case of failure.
654  *
655  * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
656  * 	Description
657  * 		This special helper is used to trigger a "tail call", or in
658  * 		other words, to jump into another eBPF program. The same stack
659  * 		frame is used (but values on stack and in registers for the
660  * 		caller are not accessible to the callee). This mechanism allows
661  * 		for program chaining, either for raising the maximum number of
662  * 		available eBPF instructions, or to execute given programs in
663  * 		conditional blocks. For security reasons, there is an upper
664  * 		limit to the number of successive tail calls that can be
665  * 		performed.
666  *
667  * 		Upon call of this helper, the program attempts to jump into a
668  * 		program referenced at index *index* in *prog_array_map*, a
669  * 		special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
670  * 		*ctx*, a pointer to the context.
671  *
672  * 		If the call succeeds, the kernel immediately runs the first
673  * 		instruction of the new program. This is not a function call,
674  * 		and it never returns to the previous program. If the call
675  * 		fails, then the helper has no effect, and the caller continues
676  * 		to run its subsequent instructions. A call can fail if the
677  * 		destination program for the jump does not exist (i.e. *index*
678  * 		is superior to the number of entries in *prog_array_map*), or
679  * 		if the maximum number of tail calls has been reached for this
680  * 		chain of programs. This limit is defined in the kernel by the
681  * 		macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
682  * 		which is currently set to 32.
683  * 	Return
684  * 		0 on success, or a negative error in case of failure.
685  *
686  * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
687  * 	Description
688  * 		Clone and redirect the packet associated to *skb* to another
689  * 		net device of index *ifindex*. Both ingress and egress
690  * 		interfaces can be used for redirection. The **BPF_F_INGRESS**
691  * 		value in *flags* is used to make the distinction (ingress path
692  * 		is selected if the flag is present, egress path otherwise).
693  * 		This is the only flag supported for now.
694  *
695  * 		In comparison with **bpf_redirect**\ () helper,
696  * 		**bpf_clone_redirect**\ () has the associated cost of
697  * 		duplicating the packet buffer, but this can be executed out of
698  * 		the eBPF program. Conversely, **bpf_redirect**\ () is more
699  * 		efficient, but it is handled through an action code where the
700  * 		redirection happens only after the eBPF program has returned.
701  *
702  * 		A call to this helper is susceptible to change the underlaying
703  * 		packet buffer. Therefore, at load time, all checks on pointers
704  * 		previously done by the verifier are invalidated and must be
705  * 		performed again, if the helper is used in combination with
706  * 		direct packet access.
707  * 	Return
708  * 		0 on success, or a negative error in case of failure.
709  *
710  * u64 bpf_get_current_pid_tgid(void)
711  * 	Return
712  * 		A 64-bit integer containing the current tgid and pid, and
713  * 		created as such:
714  * 		*current_task*\ **->tgid << 32 \|**
715  * 		*current_task*\ **->pid**.
716  *
717  * u64 bpf_get_current_uid_gid(void)
718  * 	Return
719  * 		A 64-bit integer containing the current GID and UID, and
720  * 		created as such: *current_gid* **<< 32 \|** *current_uid*.
721  *
722  * int bpf_get_current_comm(char *buf, u32 size_of_buf)
723  * 	Description
724  * 		Copy the **comm** attribute of the current task into *buf* of
725  * 		*size_of_buf*. The **comm** attribute contains the name of
726  * 		the executable (excluding the path) for the current task. The
727  * 		*size_of_buf* must be strictly positive. On success, the
728  * 		helper makes sure that the *buf* is NUL-terminated. On failure,
729  * 		it is filled with zeroes.
730  * 	Return
731  * 		0 on success, or a negative error in case of failure.
732  *
733  * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
734  * 	Description
735  * 		Retrieve the classid for the current task, i.e. for the net_cls
736  * 		cgroup to which *skb* belongs.
737  *
738  * 		This helper can be used on TC egress path, but not on ingress.
739  *
740  * 		The net_cls cgroup provides an interface to tag network packets
741  * 		based on a user-provided identifier for all traffic coming from
742  * 		the tasks belonging to the related cgroup. See also the related
743  * 		kernel documentation, available from the Linux sources in file
744  * 		*Documentation/cgroup-v1/net_cls.txt*.
745  *
746  * 		The Linux kernel has two versions for cgroups: there are
747  * 		cgroups v1 and cgroups v2. Both are available to users, who can
748  * 		use a mixture of them, but note that the net_cls cgroup is for
749  * 		cgroup v1 only. This makes it incompatible with BPF programs
750  * 		run on cgroups, which is a cgroup-v2-only feature (a socket can
751  * 		only hold data for one version of cgroups at a time).
752  *
753  * 		This helper is only available is the kernel was compiled with
754  * 		the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
755  * 		"**y**" or to "**m**".
756  * 	Return
757  * 		The classid, or 0 for the default unconfigured classid.
758  *
759  * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
760  * 	Description
761  * 		Push a *vlan_tci* (VLAN tag control information) of protocol
762  * 		*vlan_proto* to the packet associated to *skb*, then update
763  * 		the checksum. Note that if *vlan_proto* is different from
764  * 		**ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
765  * 		be **ETH_P_8021Q**.
766  *
767  * 		A call to this helper is susceptible to change the underlaying
768  * 		packet buffer. Therefore, at load time, all checks on pointers
769  * 		previously done by the verifier are invalidated and must be
770  * 		performed again, if the helper is used in combination with
771  * 		direct packet access.
772  * 	Return
773  * 		0 on success, or a negative error in case of failure.
774  *
775  * int bpf_skb_vlan_pop(struct sk_buff *skb)
776  * 	Description
777  * 		Pop a VLAN header from the packet associated to *skb*.
778  *
779  * 		A call to this helper is susceptible to change the underlaying
780  * 		packet buffer. Therefore, at load time, all checks on pointers
781  * 		previously done by the verifier are invalidated and must be
782  * 		performed again, if the helper is used in combination with
783  * 		direct packet access.
784  * 	Return
785  * 		0 on success, or a negative error in case of failure.
786  *
787  * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
788  * 	Description
789  * 		Get tunnel metadata. This helper takes a pointer *key* to an
790  * 		empty **struct bpf_tunnel_key** of **size**, that will be
791  * 		filled with tunnel metadata for the packet associated to *skb*.
792  * 		The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
793  * 		indicates that the tunnel is based on IPv6 protocol instead of
794  * 		IPv4.
795  *
796  * 		The **struct bpf_tunnel_key** is an object that generalizes the
797  * 		principal parameters used by various tunneling protocols into a
798  * 		single struct. This way, it can be used to easily make a
799  * 		decision based on the contents of the encapsulation header,
800  * 		"summarized" in this struct. In particular, it holds the IP
801  * 		address of the remote end (IPv4 or IPv6, depending on the case)
802  * 		in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
803  * 		this struct exposes the *key*\ **->tunnel_id**, which is
804  * 		generally mapped to a VNI (Virtual Network Identifier), making
805  * 		it programmable together with the **bpf_skb_set_tunnel_key**\
806  * 		() helper.
807  *
808  * 		Let's imagine that the following code is part of a program
809  * 		attached to the TC ingress interface, on one end of a GRE
810  * 		tunnel, and is supposed to filter out all messages coming from
811  * 		remote ends with IPv4 address other than 10.0.0.1:
812  *
813  * 		::
814  *
815  * 			int ret;
816  * 			struct bpf_tunnel_key key = {};
817  *
818  * 			ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
819  * 			if (ret < 0)
820  * 				return TC_ACT_SHOT;	// drop packet
821  *
822  * 			if (key.remote_ipv4 != 0x0a000001)
823  * 				return TC_ACT_SHOT;	// drop packet
824  *
825  * 			return TC_ACT_OK;		// accept packet
826  *
827  * 		This interface can also be used with all encapsulation devices
828  * 		that can operate in "collect metadata" mode: instead of having
829  * 		one network device per specific configuration, the "collect
830  * 		metadata" mode only requires a single device where the
831  * 		configuration can be extracted from this helper.
832  *
833  * 		This can be used together with various tunnels such as VXLan,
834  * 		Geneve, GRE or IP in IP (IPIP).
835  * 	Return
836  * 		0 on success, or a negative error in case of failure.
837  *
838  * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
839  * 	Description
840  * 		Populate tunnel metadata for packet associated to *skb.* The
841  * 		tunnel metadata is set to the contents of *key*, of *size*. The
842  * 		*flags* can be set to a combination of the following values:
843  *
844  * 		**BPF_F_TUNINFO_IPV6**
845  * 			Indicate that the tunnel is based on IPv6 protocol
846  * 			instead of IPv4.
847  * 		**BPF_F_ZERO_CSUM_TX**
848  * 			For IPv4 packets, add a flag to tunnel metadata
849  * 			indicating that checksum computation should be skipped
850  * 			and checksum set to zeroes.
851  * 		**BPF_F_DONT_FRAGMENT**
852  * 			Add a flag to tunnel metadata indicating that the
853  * 			packet should not be fragmented.
854  * 		**BPF_F_SEQ_NUMBER**
855  * 			Add a flag to tunnel metadata indicating that a
856  * 			sequence number should be added to tunnel header before
857  * 			sending the packet. This flag was added for GRE
858  * 			encapsulation, but might be used with other protocols
859  * 			as well in the future.
860  *
861  * 		Here is a typical usage on the transmit path:
862  *
863  * 		::
864  *
865  * 			struct bpf_tunnel_key key;
866  * 			     populate key ...
867  * 			bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
868  * 			bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
869  *
870  * 		See also the description of the **bpf_skb_get_tunnel_key**\ ()
871  * 		helper for additional information.
872  * 	Return
873  * 		0 on success, or a negative error in case of failure.
874  *
875  * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
876  * 	Description
877  * 		Read the value of a perf event counter. This helper relies on a
878  * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
879  * 		the perf event counter is selected when *map* is updated with
880  * 		perf event file descriptors. The *map* is an array whose size
881  * 		is the number of available CPUs, and each cell contains a value
882  * 		relative to one CPU. The value to retrieve is indicated by
883  * 		*flags*, that contains the index of the CPU to look up, masked
884  * 		with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
885  * 		**BPF_F_CURRENT_CPU** to indicate that the value for the
886  * 		current CPU should be retrieved.
887  *
888  * 		Note that before Linux 4.13, only hardware perf event can be
889  * 		retrieved.
890  *
891  * 		Also, be aware that the newer helper
892  * 		**bpf_perf_event_read_value**\ () is recommended over
893  * 		**bpf_perf_event_read**\ () in general. The latter has some ABI
894  * 		quirks where error and counter value are used as a return code
895  * 		(which is wrong to do since ranges may overlap). This issue is
896  * 		fixed with **bpf_perf_event_read_value**\ (), which at the same
897  * 		time provides more features over the **bpf_perf_event_read**\
898  * 		() interface. Please refer to the description of
899  * 		**bpf_perf_event_read_value**\ () for details.
900  * 	Return
901  * 		The value of the perf event counter read from the map, or a
902  * 		negative error code in case of failure.
903  *
904  * int bpf_redirect(u32 ifindex, u64 flags)
905  * 	Description
906  * 		Redirect the packet to another net device of index *ifindex*.
907  * 		This helper is somewhat similar to **bpf_clone_redirect**\
908  * 		(), except that the packet is not cloned, which provides
909  * 		increased performance.
910  *
911  * 		Except for XDP, both ingress and egress interfaces can be used
912  * 		for redirection. The **BPF_F_INGRESS** value in *flags* is used
913  * 		to make the distinction (ingress path is selected if the flag
914  * 		is present, egress path otherwise). Currently, XDP only
915  * 		supports redirection to the egress interface, and accepts no
916  * 		flag at all.
917  *
918  * 		The same effect can be attained with the more generic
919  * 		**bpf_redirect_map**\ (), which requires specific maps to be
920  * 		used but offers better performance.
921  * 	Return
922  * 		For XDP, the helper returns **XDP_REDIRECT** on success or
923  * 		**XDP_ABORTED** on error. For other program types, the values
924  * 		are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
925  * 		error.
926  *
927  * u32 bpf_get_route_realm(struct sk_buff *skb)
928  * 	Description
929  * 		Retrieve the realm or the route, that is to say the
930  * 		**tclassid** field of the destination for the *skb*. The
931  * 		indentifier retrieved is a user-provided tag, similar to the
932  * 		one used with the net_cls cgroup (see description for
933  * 		**bpf_get_cgroup_classid**\ () helper), but here this tag is
934  * 		held by a route (a destination entry), not by a task.
935  *
936  * 		Retrieving this identifier works with the clsact TC egress hook
937  * 		(see also **tc-bpf(8)**), or alternatively on conventional
938  * 		classful egress qdiscs, but not on TC ingress path. In case of
939  * 		clsact TC egress hook, this has the advantage that, internally,
940  * 		the destination entry has not been dropped yet in the transmit
941  * 		path. Therefore, the destination entry does not need to be
942  * 		artificially held via **netif_keep_dst**\ () for a classful
943  * 		qdisc until the *skb* is freed.
944  *
945  * 		This helper is available only if the kernel was compiled with
946  * 		**CONFIG_IP_ROUTE_CLASSID** configuration option.
947  * 	Return
948  * 		The realm of the route for the packet associated to *skb*, or 0
949  * 		if none was found.
950  *
951  * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
952  * 	Description
953  * 		Write raw *data* blob into a special BPF perf event held by
954  * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
955  * 		event must have the following attributes: **PERF_SAMPLE_RAW**
956  * 		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
957  * 		**PERF_COUNT_SW_BPF_OUTPUT** as **config**.
958  *
959  * 		The *flags* are used to indicate the index in *map* for which
960  * 		the value must be put, masked with **BPF_F_INDEX_MASK**.
961  * 		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
962  * 		to indicate that the index of the current CPU core should be
963  * 		used.
964  *
965  * 		The value to write, of *size*, is passed through eBPF stack and
966  * 		pointed by *data*.
967  *
968  * 		The context of the program *ctx* needs also be passed to the
969  * 		helper.
970  *
971  * 		On user space, a program willing to read the values needs to
972  * 		call **perf_event_open**\ () on the perf event (either for
973  * 		one or for all CPUs) and to store the file descriptor into the
974  * 		*map*. This must be done before the eBPF program can send data
975  * 		into it. An example is available in file
976  * 		*samples/bpf/trace_output_user.c* in the Linux kernel source
977  * 		tree (the eBPF program counterpart is in
978  * 		*samples/bpf/trace_output_kern.c*).
979  *
980  * 		**bpf_perf_event_output**\ () achieves better performance
981  * 		than **bpf_trace_printk**\ () for sharing data with user
982  * 		space, and is much better suitable for streaming data from eBPF
983  * 		programs.
984  *
985  * 		Note that this helper is not restricted to tracing use cases
986  * 		and can be used with programs attached to TC or XDP as well,
987  * 		where it allows for passing data to user space listeners. Data
988  * 		can be:
989  *
990  * 		* Only custom structs,
991  * 		* Only the packet payload, or
992  * 		* A combination of both.
993  * 	Return
994  * 		0 on success, or a negative error in case of failure.
995  *
996  * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
997  * 	Description
998  * 		This helper was provided as an easy way to load data from a
999  * 		packet. It can be used to load *len* bytes from *offset* from
1000  * 		the packet associated to *skb*, into the buffer pointed by
1001  * 		*to*.
1002  *
1003  * 		Since Linux 4.7, usage of this helper has mostly been replaced
1004  * 		by "direct packet access", enabling packet data to be
1005  * 		manipulated with *skb*\ **->data** and *skb*\ **->data_end**
1006  * 		pointing respectively to the first byte of packet data and to
1007  * 		the byte after the last byte of packet data. However, it
1008  * 		remains useful if one wishes to read large quantities of data
1009  * 		at once from a packet into the eBPF stack.
1010  * 	Return
1011  * 		0 on success, or a negative error in case of failure.
1012  *
1013  * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags)
1014  * 	Description
1015  * 		Walk a user or a kernel stack and return its id. To achieve
1016  * 		this, the helper needs *ctx*, which is a pointer to the context
1017  * 		on which the tracing program is executed, and a pointer to a
1018  * 		*map* of type **BPF_MAP_TYPE_STACK_TRACE**.
1019  *
1020  * 		The last argument, *flags*, holds the number of stack frames to
1021  * 		skip (from 0 to 255), masked with
1022  * 		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1023  * 		a combination of the following flags:
1024  *
1025  * 		**BPF_F_USER_STACK**
1026  * 			Collect a user space stack instead of a kernel stack.
1027  * 		**BPF_F_FAST_STACK_CMP**
1028  * 			Compare stacks by hash only.
1029  * 		**BPF_F_REUSE_STACKID**
1030  * 			If two different stacks hash into the same *stackid*,
1031  * 			discard the old one.
1032  *
1033  * 		The stack id retrieved is a 32 bit long integer handle which
1034  * 		can be further combined with other data (including other stack
1035  * 		ids) and used as a key into maps. This can be useful for
1036  * 		generating a variety of graphs (such as flame graphs or off-cpu
1037  * 		graphs).
1038  *
1039  * 		For walking a stack, this helper is an improvement over
1040  * 		**bpf_probe_read**\ (), which can be used with unrolled loops
1041  * 		but is not efficient and consumes a lot of eBPF instructions.
1042  * 		Instead, **bpf_get_stackid**\ () can collect up to
1043  * 		**PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
1044  * 		this limit can be controlled with the **sysctl** program, and
1045  * 		that it should be manually increased in order to profile long
1046  * 		user stacks (such as stacks for Java programs). To do so, use:
1047  *
1048  * 		::
1049  *
1050  * 			# sysctl kernel.perf_event_max_stack=<new value>
1051  * 	Return
1052  * 		The positive or null stack id on success, or a negative error
1053  * 		in case of failure.
1054  *
1055  * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
1056  * 	Description
1057  * 		Compute a checksum difference, from the raw buffer pointed by
1058  * 		*from*, of length *from_size* (that must be a multiple of 4),
1059  * 		towards the raw buffer pointed by *to*, of size *to_size*
1060  * 		(same remark). An optional *seed* can be added to the value
1061  * 		(this can be cascaded, the seed may come from a previous call
1062  * 		to the helper).
1063  *
1064  * 		This is flexible enough to be used in several ways:
1065  *
1066  * 		* With *from_size* == 0, *to_size* > 0 and *seed* set to
1067  * 		  checksum, it can be used when pushing new data.
1068  * 		* With *from_size* > 0, *to_size* == 0 and *seed* set to
1069  * 		  checksum, it can be used when removing data from a packet.
1070  * 		* With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
1071  * 		  can be used to compute a diff. Note that *from_size* and
1072  * 		  *to_size* do not need to be equal.
1073  *
1074  * 		This helper can be used in combination with
1075  * 		**bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
1076  * 		which one can feed in the difference computed with
1077  * 		**bpf_csum_diff**\ ().
1078  * 	Return
1079  * 		The checksum result, or a negative error code in case of
1080  * 		failure.
1081  *
1082  * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
1083  * 	Description
1084  * 		Retrieve tunnel options metadata for the packet associated to
1085  * 		*skb*, and store the raw tunnel option data to the buffer *opt*
1086  * 		of *size*.
1087  *
1088  * 		This helper can be used with encapsulation devices that can
1089  * 		operate in "collect metadata" mode (please refer to the related
1090  * 		note in the description of **bpf_skb_get_tunnel_key**\ () for
1091  * 		more details). A particular example where this can be used is
1092  * 		in combination with the Geneve encapsulation protocol, where it
1093  * 		allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
1094  * 		and retrieving arbitrary TLVs (Type-Length-Value headers) from
1095  * 		the eBPF program. This allows for full customization of these
1096  * 		headers.
1097  * 	Return
1098  * 		The size of the option data retrieved.
1099  *
1100  * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
1101  * 	Description
1102  * 		Set tunnel options metadata for the packet associated to *skb*
1103  * 		to the option data contained in the raw buffer *opt* of *size*.
1104  *
1105  * 		See also the description of the **bpf_skb_get_tunnel_opt**\ ()
1106  * 		helper for additional information.
1107  * 	Return
1108  * 		0 on success, or a negative error in case of failure.
1109  *
1110  * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
1111  * 	Description
1112  * 		Change the protocol of the *skb* to *proto*. Currently
1113  * 		supported are transition from IPv4 to IPv6, and from IPv6 to
1114  * 		IPv4. The helper takes care of the groundwork for the
1115  * 		transition, including resizing the socket buffer. The eBPF
1116  * 		program is expected to fill the new headers, if any, via
1117  * 		**skb_store_bytes**\ () and to recompute the checksums with
1118  * 		**bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
1119  * 		(). The main case for this helper is to perform NAT64
1120  * 		operations out of an eBPF program.
1121  *
1122  * 		Internally, the GSO type is marked as dodgy so that headers are
1123  * 		checked and segments are recalculated by the GSO/GRO engine.
1124  * 		The size for GSO target is adapted as well.
1125  *
1126  * 		All values for *flags* are reserved for future usage, and must
1127  * 		be left at zero.
1128  *
1129  * 		A call to this helper is susceptible to change the underlaying
1130  * 		packet buffer. Therefore, at load time, all checks on pointers
1131  * 		previously done by the verifier are invalidated and must be
1132  * 		performed again, if the helper is used in combination with
1133  * 		direct packet access.
1134  * 	Return
1135  * 		0 on success, or a negative error in case of failure.
1136  *
1137  * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
1138  * 	Description
1139  * 		Change the packet type for the packet associated to *skb*. This
1140  * 		comes down to setting *skb*\ **->pkt_type** to *type*, except
1141  * 		the eBPF program does not have a write access to *skb*\
1142  * 		**->pkt_type** beside this helper. Using a helper here allows
1143  * 		for graceful handling of errors.
1144  *
1145  * 		The major use case is to change incoming *skb*s to
1146  * 		**PACKET_HOST** in a programmatic way instead of having to
1147  * 		recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
1148  * 		example.
1149  *
1150  * 		Note that *type* only allows certain values. At this time, they
1151  * 		are:
1152  *
1153  * 		**PACKET_HOST**
1154  * 			Packet is for us.
1155  * 		**PACKET_BROADCAST**
1156  * 			Send packet to all.
1157  * 		**PACKET_MULTICAST**
1158  * 			Send packet to group.
1159  * 		**PACKET_OTHERHOST**
1160  * 			Send packet to someone else.
1161  * 	Return
1162  * 		0 on success, or a negative error in case of failure.
1163  *
1164  * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
1165  * 	Description
1166  * 		Check whether *skb* is a descendant of the cgroup2 held by
1167  * 		*map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1168  * 	Return
1169  * 		The return value depends on the result of the test, and can be:
1170  *
1171  * 		* 0, if the *skb* failed the cgroup2 descendant test.
1172  * 		* 1, if the *skb* succeeded the cgroup2 descendant test.
1173  * 		* A negative error code, if an error occurred.
1174  *
1175  * u32 bpf_get_hash_recalc(struct sk_buff *skb)
1176  * 	Description
1177  * 		Retrieve the hash of the packet, *skb*\ **->hash**. If it is
1178  * 		not set, in particular if the hash was cleared due to mangling,
1179  * 		recompute this hash. Later accesses to the hash can be done
1180  * 		directly with *skb*\ **->hash**.
1181  *
1182  * 		Calling **bpf_set_hash_invalid**\ (), changing a packet
1183  * 		prototype with **bpf_skb_change_proto**\ (), or calling
1184  * 		**bpf_skb_store_bytes**\ () with the
1185  * 		**BPF_F_INVALIDATE_HASH** are actions susceptible to clear
1186  * 		the hash and to trigger a new computation for the next call to
1187  * 		**bpf_get_hash_recalc**\ ().
1188  * 	Return
1189  * 		The 32-bit hash.
1190  *
1191  * u64 bpf_get_current_task(void)
1192  * 	Return
1193  * 		A pointer to the current task struct.
1194  *
1195  * int bpf_probe_write_user(void *dst, const void *src, u32 len)
1196  * 	Description
1197  * 		Attempt in a safe way to write *len* bytes from the buffer
1198  * 		*src* to *dst* in memory. It only works for threads that are in
1199  * 		user context, and *dst* must be a valid user space address.
1200  *
1201  * 		This helper should not be used to implement any kind of
1202  * 		security mechanism because of TOC-TOU attacks, but rather to
1203  * 		debug, divert, and manipulate execution of semi-cooperative
1204  * 		processes.
1205  *
1206  * 		Keep in mind that this feature is meant for experiments, and it
1207  * 		has a risk of crashing the system and running programs.
1208  * 		Therefore, when an eBPF program using this helper is attached,
1209  * 		a warning including PID and process name is printed to kernel
1210  * 		logs.
1211  * 	Return
1212  * 		0 on success, or a negative error in case of failure.
1213  *
1214  * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
1215  * 	Description
1216  * 		Check whether the probe is being run is the context of a given
1217  * 		subset of the cgroup2 hierarchy. The cgroup2 to test is held by
1218  * 		*map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1219  * 	Return
1220  * 		The return value depends on the result of the test, and can be:
1221  *
1222  * 		* 0, if the *skb* task belongs to the cgroup2.
1223  * 		* 1, if the *skb* task does not belong to the cgroup2.
1224  * 		* A negative error code, if an error occurred.
1225  *
1226  * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
1227  * 	Description
1228  * 		Resize (trim or grow) the packet associated to *skb* to the
1229  * 		new *len*. The *flags* are reserved for future usage, and must
1230  * 		be left at zero.
1231  *
1232  * 		The basic idea is that the helper performs the needed work to
1233  * 		change the size of the packet, then the eBPF program rewrites
1234  * 		the rest via helpers like **bpf_skb_store_bytes**\ (),
1235  * 		**bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
1236  * 		and others. This helper is a slow path utility intended for
1237  * 		replies with control messages. And because it is targeted for
1238  * 		slow path, the helper itself can afford to be slow: it
1239  * 		implicitly linearizes, unclones and drops offloads from the
1240  * 		*skb*.
1241  *
1242  * 		A call to this helper is susceptible to change the underlaying
1243  * 		packet buffer. Therefore, at load time, all checks on pointers
1244  * 		previously done by the verifier are invalidated and must be
1245  * 		performed again, if the helper is used in combination with
1246  * 		direct packet access.
1247  * 	Return
1248  * 		0 on success, or a negative error in case of failure.
1249  *
1250  * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
1251  * 	Description
1252  * 		Pull in non-linear data in case the *skb* is non-linear and not
1253  * 		all of *len* are part of the linear section. Make *len* bytes
1254  * 		from *skb* readable and writable. If a zero value is passed for
1255  * 		*len*, then the whole length of the *skb* is pulled.
1256  *
1257  * 		This helper is only needed for reading and writing with direct
1258  * 		packet access.
1259  *
1260  * 		For direct packet access, testing that offsets to access
1261  * 		are within packet boundaries (test on *skb*\ **->data_end**) is
1262  * 		susceptible to fail if offsets are invalid, or if the requested
1263  * 		data is in non-linear parts of the *skb*. On failure the
1264  * 		program can just bail out, or in the case of a non-linear
1265  * 		buffer, use a helper to make the data available. The
1266  * 		**bpf_skb_load_bytes**\ () helper is a first solution to access
1267  * 		the data. Another one consists in using **bpf_skb_pull_data**
1268  * 		to pull in once the non-linear parts, then retesting and
1269  * 		eventually access the data.
1270  *
1271  * 		At the same time, this also makes sure the *skb* is uncloned,
1272  * 		which is a necessary condition for direct write. As this needs
1273  * 		to be an invariant for the write part only, the verifier
1274  * 		detects writes and adds a prologue that is calling
1275  * 		**bpf_skb_pull_data()** to effectively unclone the *skb* from
1276  * 		the very beginning in case it is indeed cloned.
1277  *
1278  * 		A call to this helper is susceptible to change the underlaying
1279  * 		packet buffer. Therefore, at load time, all checks on pointers
1280  * 		previously done by the verifier are invalidated and must be
1281  * 		performed again, if the helper is used in combination with
1282  * 		direct packet access.
1283  * 	Return
1284  * 		0 on success, or a negative error in case of failure.
1285  *
1286  * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
1287  * 	Description
1288  * 		Add the checksum *csum* into *skb*\ **->csum** in case the
1289  * 		driver has supplied a checksum for the entire packet into that
1290  * 		field. Return an error otherwise. This helper is intended to be
1291  * 		used in combination with **bpf_csum_diff**\ (), in particular
1292  * 		when the checksum needs to be updated after data has been
1293  * 		written into the packet through direct packet access.
1294  * 	Return
1295  * 		The checksum on success, or a negative error code in case of
1296  * 		failure.
1297  *
1298  * void bpf_set_hash_invalid(struct sk_buff *skb)
1299  * 	Description
1300  * 		Invalidate the current *skb*\ **->hash**. It can be used after
1301  * 		mangling on headers through direct packet access, in order to
1302  * 		indicate that the hash is outdated and to trigger a
1303  * 		recalculation the next time the kernel tries to access this
1304  * 		hash or when the **bpf_get_hash_recalc**\ () helper is called.
1305  *
1306  * int bpf_get_numa_node_id(void)
1307  * 	Description
1308  * 		Return the id of the current NUMA node. The primary use case
1309  * 		for this helper is the selection of sockets for the local NUMA
1310  * 		node, when the program is attached to sockets using the
1311  * 		**SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
1312  * 		but the helper is also available to other eBPF program types,
1313  * 		similarly to **bpf_get_smp_processor_id**\ ().
1314  * 	Return
1315  * 		The id of current NUMA node.
1316  *
1317  * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
1318  * 	Description
1319  * 		Grows headroom of packet associated to *skb* and adjusts the
1320  * 		offset of the MAC header accordingly, adding *len* bytes of
1321  * 		space. It automatically extends and reallocates memory as
1322  * 		required.
1323  *
1324  * 		This helper can be used on a layer 3 *skb* to push a MAC header
1325  * 		for redirection into a layer 2 device.
1326  *
1327  * 		All values for *flags* are reserved for future usage, and must
1328  * 		be left at zero.
1329  *
1330  * 		A call to this helper is susceptible to change the underlaying
1331  * 		packet buffer. Therefore, at load time, all checks on pointers
1332  * 		previously done by the verifier are invalidated and must be
1333  * 		performed again, if the helper is used in combination with
1334  * 		direct packet access.
1335  * 	Return
1336  * 		0 on success, or a negative error in case of failure.
1337  *
1338  * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
1339  * 	Description
1340  * 		Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
1341  * 		it is possible to use a negative value for *delta*. This helper
1342  * 		can be used to prepare the packet for pushing or popping
1343  * 		headers.
1344  *
1345  * 		A call to this helper is susceptible to change the underlaying
1346  * 		packet buffer. Therefore, at load time, all checks on pointers
1347  * 		previously done by the verifier are invalidated and must be
1348  * 		performed again, if the helper is used in combination with
1349  * 		direct packet access.
1350  * 	Return
1351  * 		0 on success, or a negative error in case of failure.
1352  *
1353  * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
1354  * 	Description
1355  * 		Copy a NUL terminated string from an unsafe address
1356  * 		*unsafe_ptr* to *dst*. The *size* should include the
1357  * 		terminating NUL byte. In case the string length is smaller than
1358  * 		*size*, the target is not padded with further NUL bytes. If the
1359  * 		string length is larger than *size*, just *size*-1 bytes are
1360  * 		copied and the last byte is set to NUL.
1361  *
1362  * 		On success, the length of the copied string is returned. This
1363  * 		makes this helper useful in tracing programs for reading
1364  * 		strings, and more importantly to get its length at runtime. See
1365  * 		the following snippet:
1366  *
1367  * 		::
1368  *
1369  * 			SEC("kprobe/sys_open")
1370  * 			void bpf_sys_open(struct pt_regs *ctx)
1371  * 			{
1372  * 			        char buf[PATHLEN]; // PATHLEN is defined to 256
1373  * 			        int res = bpf_probe_read_str(buf, sizeof(buf),
1374  * 				                             ctx->di);
1375  *
1376  * 				// Consume buf, for example push it to
1377  * 				// userspace via bpf_perf_event_output(); we
1378  * 				// can use res (the string length) as event
1379  * 				// size, after checking its boundaries.
1380  * 			}
1381  *
1382  * 		In comparison, using **bpf_probe_read()** helper here instead
1383  * 		to read the string would require to estimate the length at
1384  * 		compile time, and would often result in copying more memory
1385  * 		than necessary.
1386  *
1387  * 		Another useful use case is when parsing individual process
1388  * 		arguments or individual environment variables navigating
1389  * 		*current*\ **->mm->arg_start** and *current*\
1390  * 		**->mm->env_start**: using this helper and the return value,
1391  * 		one can quickly iterate at the right offset of the memory area.
1392  * 	Return
1393  * 		On success, the strictly positive length of the string,
1394  * 		including the trailing NUL character. On error, a negative
1395  * 		value.
1396  *
1397  * u64 bpf_get_socket_cookie(struct sk_buff *skb)
1398  * 	Description
1399  * 		If the **struct sk_buff** pointed by *skb* has a known socket,
1400  * 		retrieve the cookie (generated by the kernel) of this socket.
1401  * 		If no cookie has been set yet, generate a new cookie. Once
1402  * 		generated, the socket cookie remains stable for the life of the
1403  * 		socket. This helper can be useful for monitoring per socket
1404  * 		networking traffic statistics as it provides a unique socket
1405  * 		identifier per namespace.
1406  * 	Return
1407  * 		A 8-byte long non-decreasing number on success, or 0 if the
1408  * 		socket field is missing inside *skb*.
1409  *
1410  * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
1411  * 	Description
1412  * 		Equivalent to bpf_get_socket_cookie() helper that accepts
1413  * 		*skb*, but gets socket from **struct bpf_sock_addr** contex.
1414  * 	Return
1415  * 		A 8-byte long non-decreasing number.
1416  *
1417  * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
1418  * 	Description
1419  * 		Equivalent to bpf_get_socket_cookie() helper that accepts
1420  * 		*skb*, but gets socket from **struct bpf_sock_ops** contex.
1421  * 	Return
1422  * 		A 8-byte long non-decreasing number.
1423  *
1424  * u32 bpf_get_socket_uid(struct sk_buff *skb)
1425  * 	Return
1426  * 		The owner UID of the socket associated to *skb*. If the socket
1427  * 		is **NULL**, or if it is not a full socket (i.e. if it is a
1428  * 		time-wait or a request socket instead), **overflowuid** value
1429  * 		is returned (note that **overflowuid** might also be the actual
1430  * 		UID value for the socket).
1431  *
1432  * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
1433  * 	Description
1434  * 		Set the full hash for *skb* (set the field *skb*\ **->hash**)
1435  * 		to value *hash*.
1436  * 	Return
1437  * 		0
1438  *
1439  * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
1440  * 	Description
1441  * 		Emulate a call to **setsockopt()** on the socket associated to
1442  * 		*bpf_socket*, which must be a full socket. The *level* at
1443  * 		which the option resides and the name *optname* of the option
1444  * 		must be specified, see **setsockopt(2)** for more information.
1445  * 		The option value of length *optlen* is pointed by *optval*.
1446  *
1447  * 		This helper actually implements a subset of **setsockopt()**.
1448  * 		It supports the following *level*\ s:
1449  *
1450  * 		* **SOL_SOCKET**, which supports the following *optname*\ s:
1451  * 		  **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
1452  * 		  **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
1453  * 		* **IPPROTO_TCP**, which supports the following *optname*\ s:
1454  * 		  **TCP_CONGESTION**, **TCP_BPF_IW**,
1455  * 		  **TCP_BPF_SNDCWND_CLAMP**.
1456  * 		* **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1457  * 		* **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1458  * 	Return
1459  * 		0 on success, or a negative error in case of failure.
1460  *
1461  * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
1462  * 	Description
1463  * 		Grow or shrink the room for data in the packet associated to
1464  * 		*skb* by *len_diff*, and according to the selected *mode*.
1465  *
1466  * 		There is a single supported mode at this time:
1467  *
1468  * 		* **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
1469  * 		  (room space is added or removed below the layer 3 header).
1470  *
1471  * 		All values for *flags* are reserved for future usage, and must
1472  * 		be left at zero.
1473  *
1474  * 		A call to this helper is susceptible to change the underlaying
1475  * 		packet buffer. Therefore, at load time, all checks on pointers
1476  * 		previously done by the verifier are invalidated and must be
1477  * 		performed again, if the helper is used in combination with
1478  * 		direct packet access.
1479  * 	Return
1480  * 		0 on success, or a negative error in case of failure.
1481  *
1482  * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1483  * 	Description
1484  * 		Redirect the packet to the endpoint referenced by *map* at
1485  * 		index *key*. Depending on its type, this *map* can contain
1486  * 		references to net devices (for forwarding packets through other
1487  * 		ports), or to CPUs (for redirecting XDP frames to another CPU;
1488  * 		but this is only implemented for native XDP (with driver
1489  * 		support) as of this writing).
1490  *
1491  * 		All values for *flags* are reserved for future usage, and must
1492  * 		be left at zero.
1493  *
1494  * 		When used to redirect packets to net devices, this helper
1495  * 		provides a high performance increase over **bpf_redirect**\ ().
1496  * 		This is due to various implementation details of the underlying
1497  * 		mechanisms, one of which is the fact that **bpf_redirect_map**\
1498  * 		() tries to send packet as a "bulk" to the device.
1499  * 	Return
1500  * 		**XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
1501  *
1502  * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1503  * 	Description
1504  * 		Redirect the packet to the socket referenced by *map* (of type
1505  * 		**BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1506  * 		egress interfaces can be used for redirection. The
1507  * 		**BPF_F_INGRESS** value in *flags* is used to make the
1508  * 		distinction (ingress path is selected if the flag is present,
1509  * 		egress path otherwise). This is the only flag supported for now.
1510  * 	Return
1511  * 		**SK_PASS** on success, or **SK_DROP** on error.
1512  *
1513  * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
1514  * 	Description
1515  * 		Add an entry to, or update a *map* referencing sockets. The
1516  * 		*skops* is used as a new value for the entry associated to
1517  * 		*key*. *flags* is one of:
1518  *
1519  * 		**BPF_NOEXIST**
1520  * 			The entry for *key* must not exist in the map.
1521  * 		**BPF_EXIST**
1522  * 			The entry for *key* must already exist in the map.
1523  * 		**BPF_ANY**
1524  * 			No condition on the existence of the entry for *key*.
1525  *
1526  * 		If the *map* has eBPF programs (parser and verdict), those will
1527  * 		be inherited by the socket being added. If the socket is
1528  * 		already attached to eBPF programs, this results in an error.
1529  * 	Return
1530  * 		0 on success, or a negative error in case of failure.
1531  *
1532  * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
1533  * 	Description
1534  * 		Adjust the address pointed by *xdp_md*\ **->data_meta** by
1535  * 		*delta* (which can be positive or negative). Note that this
1536  * 		operation modifies the address stored in *xdp_md*\ **->data**,
1537  * 		so the latter must be loaded only after the helper has been
1538  * 		called.
1539  *
1540  * 		The use of *xdp_md*\ **->data_meta** is optional and programs
1541  * 		are not required to use it. The rationale is that when the
1542  * 		packet is processed with XDP (e.g. as DoS filter), it is
1543  * 		possible to push further meta data along with it before passing
1544  * 		to the stack, and to give the guarantee that an ingress eBPF
1545  * 		program attached as a TC classifier on the same device can pick
1546  * 		this up for further post-processing. Since TC works with socket
1547  * 		buffers, it remains possible to set from XDP the **mark** or
1548  * 		**priority** pointers, or other pointers for the socket buffer.
1549  * 		Having this scratch space generic and programmable allows for
1550  * 		more flexibility as the user is free to store whatever meta
1551  * 		data they need.
1552  *
1553  * 		A call to this helper is susceptible to change the underlaying
1554  * 		packet buffer. Therefore, at load time, all checks on pointers
1555  * 		previously done by the verifier are invalidated and must be
1556  * 		performed again, if the helper is used in combination with
1557  * 		direct packet access.
1558  * 	Return
1559  * 		0 on success, or a negative error in case of failure.
1560  *
1561  * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
1562  * 	Description
1563  * 		Read the value of a perf event counter, and store it into *buf*
1564  * 		of size *buf_size*. This helper relies on a *map* of type
1565  * 		**BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
1566  * 		counter is selected when *map* is updated with perf event file
1567  * 		descriptors. The *map* is an array whose size is the number of
1568  * 		available CPUs, and each cell contains a value relative to one
1569  * 		CPU. The value to retrieve is indicated by *flags*, that
1570  * 		contains the index of the CPU to look up, masked with
1571  * 		**BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1572  * 		**BPF_F_CURRENT_CPU** to indicate that the value for the
1573  * 		current CPU should be retrieved.
1574  *
1575  * 		This helper behaves in a way close to
1576  * 		**bpf_perf_event_read**\ () helper, save that instead of
1577  * 		just returning the value observed, it fills the *buf*
1578  * 		structure. This allows for additional data to be retrieved: in
1579  * 		particular, the enabled and running times (in *buf*\
1580  * 		**->enabled** and *buf*\ **->running**, respectively) are
1581  * 		copied. In general, **bpf_perf_event_read_value**\ () is
1582  * 		recommended over **bpf_perf_event_read**\ (), which has some
1583  * 		ABI issues and provides fewer functionalities.
1584  *
1585  * 		These values are interesting, because hardware PMU (Performance
1586  * 		Monitoring Unit) counters are limited resources. When there are
1587  * 		more PMU based perf events opened than available counters,
1588  * 		kernel will multiplex these events so each event gets certain
1589  * 		percentage (but not all) of the PMU time. In case that
1590  * 		multiplexing happens, the number of samples or counter value
1591  * 		will not reflect the case compared to when no multiplexing
1592  * 		occurs. This makes comparison between different runs difficult.
1593  * 		Typically, the counter value should be normalized before
1594  * 		comparing to other experiments. The usual normalization is done
1595  * 		as follows.
1596  *
1597  * 		::
1598  *
1599  * 			normalized_counter = counter * t_enabled / t_running
1600  *
1601  * 		Where t_enabled is the time enabled for event and t_running is
1602  * 		the time running for event since last normalization. The
1603  * 		enabled and running times are accumulated since the perf event
1604  * 		open. To achieve scaling factor between two invocations of an
1605  * 		eBPF program, users can can use CPU id as the key (which is
1606  * 		typical for perf array usage model) to remember the previous
1607  * 		value and do the calculation inside the eBPF program.
1608  * 	Return
1609  * 		0 on success, or a negative error in case of failure.
1610  *
1611  * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
1612  * 	Description
1613  * 		For en eBPF program attached to a perf event, retrieve the
1614  * 		value of the event counter associated to *ctx* and store it in
1615  * 		the structure pointed by *buf* and of size *buf_size*. Enabled
1616  * 		and running times are also stored in the structure (see
1617  * 		description of helper **bpf_perf_event_read_value**\ () for
1618  * 		more details).
1619  * 	Return
1620  * 		0 on success, or a negative error in case of failure.
1621  *
1622  * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
1623  * 	Description
1624  * 		Emulate a call to **getsockopt()** on the socket associated to
1625  * 		*bpf_socket*, which must be a full socket. The *level* at
1626  * 		which the option resides and the name *optname* of the option
1627  * 		must be specified, see **getsockopt(2)** for more information.
1628  * 		The retrieved value is stored in the structure pointed by
1629  * 		*opval* and of length *optlen*.
1630  *
1631  * 		This helper actually implements a subset of **getsockopt()**.
1632  * 		It supports the following *level*\ s:
1633  *
1634  * 		* **IPPROTO_TCP**, which supports *optname*
1635  * 		  **TCP_CONGESTION**.
1636  * 		* **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1637  * 		* **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1638  * 	Return
1639  * 		0 on success, or a negative error in case of failure.
1640  *
1641  * int bpf_override_return(struct pt_reg *regs, u64 rc)
1642  * 	Description
1643  * 		Used for error injection, this helper uses kprobes to override
1644  * 		the return value of the probed function, and to set it to *rc*.
1645  * 		The first argument is the context *regs* on which the kprobe
1646  * 		works.
1647  *
1648  * 		This helper works by setting setting the PC (program counter)
1649  * 		to an override function which is run in place of the original
1650  * 		probed function. This means the probed function is not run at
1651  * 		all. The replacement function just returns with the required
1652  * 		value.
1653  *
1654  * 		This helper has security implications, and thus is subject to
1655  * 		restrictions. It is only available if the kernel was compiled
1656  * 		with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
1657  * 		option, and in this case it only works on functions tagged with
1658  * 		**ALLOW_ERROR_INJECTION** in the kernel code.
1659  *
1660  * 		Also, the helper is only available for the architectures having
1661  * 		the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
1662  * 		x86 architecture is the only one to support this feature.
1663  * 	Return
1664  * 		0
1665  *
1666  * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
1667  * 	Description
1668  * 		Attempt to set the value of the **bpf_sock_ops_cb_flags** field
1669  * 		for the full TCP socket associated to *bpf_sock_ops* to
1670  * 		*argval*.
1671  *
1672  * 		The primary use of this field is to determine if there should
1673  * 		be calls to eBPF programs of type
1674  * 		**BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
1675  * 		code. A program of the same type can change its value, per
1676  * 		connection and as necessary, when the connection is
1677  * 		established. This field is directly accessible for reading, but
1678  * 		this helper must be used for updates in order to return an
1679  * 		error if an eBPF program tries to set a callback that is not
1680  * 		supported in the current kernel.
1681  *
1682  * 		The supported callback values that *argval* can combine are:
1683  *
1684  * 		* **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
1685  * 		* **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
1686  * 		* **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
1687  *
1688  * 		Here are some examples of where one could call such eBPF
1689  * 		program:
1690  *
1691  * 		* When RTO fires.
1692  * 		* When a packet is retransmitted.
1693  * 		* When the connection terminates.
1694  * 		* When a packet is sent.
1695  * 		* When a packet is received.
1696  * 	Return
1697  * 		Code **-EINVAL** if the socket is not a full TCP socket;
1698  * 		otherwise, a positive number containing the bits that could not
1699  * 		be set is returned (which comes down to 0 if all bits were set
1700  * 		as required).
1701  *
1702  * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
1703  * 	Description
1704  * 		This helper is used in programs implementing policies at the
1705  * 		socket level. If the message *msg* is allowed to pass (i.e. if
1706  * 		the verdict eBPF program returns **SK_PASS**), redirect it to
1707  * 		the socket referenced by *map* (of type
1708  * 		**BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1709  * 		egress interfaces can be used for redirection. The
1710  * 		**BPF_F_INGRESS** value in *flags* is used to make the
1711  * 		distinction (ingress path is selected if the flag is present,
1712  * 		egress path otherwise). This is the only flag supported for now.
1713  * 	Return
1714  * 		**SK_PASS** on success, or **SK_DROP** on error.
1715  *
1716  * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
1717  * 	Description
1718  * 		For socket policies, apply the verdict of the eBPF program to
1719  * 		the next *bytes* (number of bytes) of message *msg*.
1720  *
1721  * 		For example, this helper can be used in the following cases:
1722  *
1723  * 		* A single **sendmsg**\ () or **sendfile**\ () system call
1724  * 		  contains multiple logical messages that the eBPF program is
1725  * 		  supposed to read and for which it should apply a verdict.
1726  * 		* An eBPF program only cares to read the first *bytes* of a
1727  * 		  *msg*. If the message has a large payload, then setting up
1728  * 		  and calling the eBPF program repeatedly for all bytes, even
1729  * 		  though the verdict is already known, would create unnecessary
1730  * 		  overhead.
1731  *
1732  * 		When called from within an eBPF program, the helper sets a
1733  * 		counter internal to the BPF infrastructure, that is used to
1734  * 		apply the last verdict to the next *bytes*. If *bytes* is
1735  * 		smaller than the current data being processed from a
1736  * 		**sendmsg**\ () or **sendfile**\ () system call, the first
1737  * 		*bytes* will be sent and the eBPF program will be re-run with
1738  * 		the pointer for start of data pointing to byte number *bytes*
1739  * 		**+ 1**. If *bytes* is larger than the current data being
1740  * 		processed, then the eBPF verdict will be applied to multiple
1741  * 		**sendmsg**\ () or **sendfile**\ () calls until *bytes* are
1742  * 		consumed.
1743  *
1744  * 		Note that if a socket closes with the internal counter holding
1745  * 		a non-zero value, this is not a problem because data is not
1746  * 		being buffered for *bytes* and is sent as it is received.
1747  * 	Return
1748  * 		0
1749  *
1750  * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
1751  * 	Description
1752  * 		For socket policies, prevent the execution of the verdict eBPF
1753  * 		program for message *msg* until *bytes* (byte number) have been
1754  * 		accumulated.
1755  *
1756  * 		This can be used when one needs a specific number of bytes
1757  * 		before a verdict can be assigned, even if the data spans
1758  * 		multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
1759  * 		case would be a user calling **sendmsg**\ () repeatedly with
1760  * 		1-byte long message segments. Obviously, this is bad for
1761  * 		performance, but it is still valid. If the eBPF program needs
1762  * 		*bytes* bytes to validate a header, this helper can be used to
1763  * 		prevent the eBPF program to be called again until *bytes* have
1764  * 		been accumulated.
1765  * 	Return
1766  * 		0
1767  *
1768  * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
1769  * 	Description
1770  * 		For socket policies, pull in non-linear data from user space
1771  * 		for *msg* and set pointers *msg*\ **->data** and *msg*\
1772  * 		**->data_end** to *start* and *end* bytes offsets into *msg*,
1773  * 		respectively.
1774  *
1775  * 		If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
1776  * 		*msg* it can only parse data that the (**data**, **data_end**)
1777  * 		pointers have already consumed. For **sendmsg**\ () hooks this
1778  * 		is likely the first scatterlist element. But for calls relying
1779  * 		on the **sendpage** handler (e.g. **sendfile**\ ()) this will
1780  * 		be the range (**0**, **0**) because the data is shared with
1781  * 		user space and by default the objective is to avoid allowing
1782  * 		user space to modify data while (or after) eBPF verdict is
1783  * 		being decided. This helper can be used to pull in data and to
1784  * 		set the start and end pointer to given values. Data will be
1785  * 		copied if necessary (i.e. if data was not linear and if start
1786  * 		and end pointers do not point to the same chunk).
1787  *
1788  * 		A call to this helper is susceptible to change the underlaying
1789  * 		packet buffer. Therefore, at load time, all checks on pointers
1790  * 		previously done by the verifier are invalidated and must be
1791  * 		performed again, if the helper is used in combination with
1792  * 		direct packet access.
1793  *
1794  * 		All values for *flags* are reserved for future usage, and must
1795  * 		be left at zero.
1796  * 	Return
1797  * 		0 on success, or a negative error in case of failure.
1798  *
1799  * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
1800  * 	Description
1801  * 		Bind the socket associated to *ctx* to the address pointed by
1802  * 		*addr*, of length *addr_len*. This allows for making outgoing
1803  * 		connection from the desired IP address, which can be useful for
1804  * 		example when all processes inside a cgroup should use one
1805  * 		single IP address on a host that has multiple IP configured.
1806  *
1807  * 		This helper works for IPv4 and IPv6, TCP and UDP sockets. The
1808  * 		domain (*addr*\ **->sa_family**) must be **AF_INET** (or
1809  * 		**AF_INET6**). Looking for a free port to bind to can be
1810  * 		expensive, therefore binding to port is not permitted by the
1811  * 		helper: *addr*\ **->sin_port** (or **sin6_port**, respectively)
1812  * 		must be set to zero.
1813  * 	Return
1814  * 		0 on success, or a negative error in case of failure.
1815  *
1816  * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
1817  * 	Description
1818  * 		Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
1819  * 		only possible to shrink the packet as of this writing,
1820  * 		therefore *delta* must be a negative integer.
1821  *
1822  * 		A call to this helper is susceptible to change the underlaying
1823  * 		packet buffer. Therefore, at load time, all checks on pointers
1824  * 		previously done by the verifier are invalidated and must be
1825  * 		performed again, if the helper is used in combination with
1826  * 		direct packet access.
1827  * 	Return
1828  * 		0 on success, or a negative error in case of failure.
1829  *
1830  * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
1831  * 	Description
1832  * 		Retrieve the XFRM state (IP transform framework, see also
1833  * 		**ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
1834  *
1835  * 		The retrieved value is stored in the **struct bpf_xfrm_state**
1836  * 		pointed by *xfrm_state* and of length *size*.
1837  *
1838  * 		All values for *flags* are reserved for future usage, and must
1839  * 		be left at zero.
1840  *
1841  * 		This helper is available only if the kernel was compiled with
1842  * 		**CONFIG_XFRM** configuration option.
1843  * 	Return
1844  * 		0 on success, or a negative error in case of failure.
1845  *
1846  * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
1847  * 	Description
1848  * 		Return a user or a kernel stack in bpf program provided buffer.
1849  * 		To achieve this, the helper needs *ctx*, which is a pointer
1850  * 		to the context on which the tracing program is executed.
1851  * 		To store the stacktrace, the bpf program provides *buf* with
1852  * 		a nonnegative *size*.
1853  *
1854  * 		The last argument, *flags*, holds the number of stack frames to
1855  * 		skip (from 0 to 255), masked with
1856  * 		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1857  * 		the following flags:
1858  *
1859  * 		**BPF_F_USER_STACK**
1860  * 			Collect a user space stack instead of a kernel stack.
1861  * 		**BPF_F_USER_BUILD_ID**
1862  * 			Collect buildid+offset instead of ips for user stack,
1863  * 			only valid if **BPF_F_USER_STACK** is also specified.
1864  *
1865  * 		**bpf_get_stack**\ () can collect up to
1866  * 		**PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
1867  * 		to sufficient large buffer size. Note that
1868  * 		this limit can be controlled with the **sysctl** program, and
1869  * 		that it should be manually increased in order to profile long
1870  * 		user stacks (such as stacks for Java programs). To do so, use:
1871  *
1872  * 		::
1873  *
1874  * 			# sysctl kernel.perf_event_max_stack=<new value>
1875  * 	Return
1876  * 		A non-negative value equal to or less than *size* on success,
1877  * 		or a negative error in case of failure.
1878  *
1879  * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
1880  * 	Description
1881  * 		This helper is similar to **bpf_skb_load_bytes**\ () in that
1882  * 		it provides an easy way to load *len* bytes from *offset*
1883  * 		from the packet associated to *skb*, into the buffer pointed
1884  * 		by *to*. The difference to **bpf_skb_load_bytes**\ () is that
1885  * 		a fifth argument *start_header* exists in order to select a
1886  * 		base offset to start from. *start_header* can be one of:
1887  *
1888  * 		**BPF_HDR_START_MAC**
1889  * 			Base offset to load data from is *skb*'s mac header.
1890  * 		**BPF_HDR_START_NET**
1891  * 			Base offset to load data from is *skb*'s network header.
1892  *
1893  * 		In general, "direct packet access" is the preferred method to
1894  * 		access packet data, however, this helper is in particular useful
1895  * 		in socket filters where *skb*\ **->data** does not always point
1896  * 		to the start of the mac header and where "direct packet access"
1897  * 		is not available.
1898  * 	Return
1899  * 		0 on success, or a negative error in case of failure.
1900  *
1901  * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
1902  *	Description
1903  *		Do FIB lookup in kernel tables using parameters in *params*.
1904  *		If lookup is successful and result shows packet is to be
1905  *		forwarded, the neighbor tables are searched for the nexthop.
1906  *		If successful (ie., FIB lookup shows forwarding and nexthop
1907  *		is resolved), the nexthop address is returned in ipv4_dst
1908  *		or ipv6_dst based on family, smac is set to mac address of
1909  *		egress device, dmac is set to nexthop mac address, rt_metric
1910  *		is set to metric from route (IPv4/IPv6 only), and ifindex
1911  *		is set to the device index of the nexthop from the FIB lookup.
1912  *
1913  *             *plen* argument is the size of the passed in struct.
1914  *             *flags* argument can be a combination of one or more of the
1915  *             following values:
1916  *
1917  *		**BPF_FIB_LOOKUP_DIRECT**
1918  *			Do a direct table lookup vs full lookup using FIB
1919  *			rules.
1920  *		**BPF_FIB_LOOKUP_OUTPUT**
1921  *			Perform lookup from an egress perspective (default is
1922  *			ingress).
1923  *
1924  *             *ctx* is either **struct xdp_md** for XDP programs or
1925  *             **struct sk_buff** tc cls_act programs.
1926  *     Return
1927  *		* < 0 if any input argument is invalid
1928  *		*   0 on success (packet is forwarded, nexthop neighbor exists)
1929  *		* > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
1930  *		  packet is not forwarded or needs assist from full stack
1931  *
1932  * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
1933  *	Description
1934  *		Add an entry to, or update a sockhash *map* referencing sockets.
1935  *		The *skops* is used as a new value for the entry associated to
1936  *		*key*. *flags* is one of:
1937  *
1938  *		**BPF_NOEXIST**
1939  *			The entry for *key* must not exist in the map.
1940  *		**BPF_EXIST**
1941  *			The entry for *key* must already exist in the map.
1942  *		**BPF_ANY**
1943  *			No condition on the existence of the entry for *key*.
1944  *
1945  *		If the *map* has eBPF programs (parser and verdict), those will
1946  *		be inherited by the socket being added. If the socket is
1947  *		already attached to eBPF programs, this results in an error.
1948  *	Return
1949  *		0 on success, or a negative error in case of failure.
1950  *
1951  * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
1952  *	Description
1953  *		This helper is used in programs implementing policies at the
1954  *		socket level. If the message *msg* is allowed to pass (i.e. if
1955  *		the verdict eBPF program returns **SK_PASS**), redirect it to
1956  *		the socket referenced by *map* (of type
1957  *		**BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
1958  *		egress interfaces can be used for redirection. The
1959  *		**BPF_F_INGRESS** value in *flags* is used to make the
1960  *		distinction (ingress path is selected if the flag is present,
1961  *		egress path otherwise). This is the only flag supported for now.
1962  *	Return
1963  *		**SK_PASS** on success, or **SK_DROP** on error.
1964  *
1965  * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
1966  *	Description
1967  *		This helper is used in programs implementing policies at the
1968  *		skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
1969  *		if the verdeict eBPF program returns **SK_PASS**), redirect it
1970  *		to the socket referenced by *map* (of type
1971  *		**BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
1972  *		egress interfaces can be used for redirection. The
1973  *		**BPF_F_INGRESS** value in *flags* is used to make the
1974  *		distinction (ingress path is selected if the flag is present,
1975  *		egress otherwise). This is the only flag supported for now.
1976  *	Return
1977  *		**SK_PASS** on success, or **SK_DROP** on error.
1978  *
1979  * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
1980  *	Description
1981  *		Encapsulate the packet associated to *skb* within a Layer 3
1982  *		protocol header. This header is provided in the buffer at
1983  *		address *hdr*, with *len* its size in bytes. *type* indicates
1984  *		the protocol of the header and can be one of:
1985  *
1986  *		**BPF_LWT_ENCAP_SEG6**
1987  *			IPv6 encapsulation with Segment Routing Header
1988  *			(**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
1989  *			the IPv6 header is computed by the kernel.
1990  *		**BPF_LWT_ENCAP_SEG6_INLINE**
1991  *			Only works if *skb* contains an IPv6 packet. Insert a
1992  *			Segment Routing Header (**struct ipv6_sr_hdr**) inside
1993  *			the IPv6 header.
1994  *
1995  * 		A call to this helper is susceptible to change the underlaying
1996  * 		packet buffer. Therefore, at load time, all checks on pointers
1997  * 		previously done by the verifier are invalidated and must be
1998  * 		performed again, if the helper is used in combination with
1999  * 		direct packet access.
2000  *	Return
2001  * 		0 on success, or a negative error in case of failure.
2002  *
2003  * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
2004  *	Description
2005  *		Store *len* bytes from address *from* into the packet
2006  *		associated to *skb*, at *offset*. Only the flags, tag and TLVs
2007  *		inside the outermost IPv6 Segment Routing Header can be
2008  *		modified through this helper.
2009  *
2010  * 		A call to this helper is susceptible to change the underlaying
2011  * 		packet buffer. Therefore, at load time, all checks on pointers
2012  * 		previously done by the verifier are invalidated and must be
2013  * 		performed again, if the helper is used in combination with
2014  * 		direct packet access.
2015  *	Return
2016  * 		0 on success, or a negative error in case of failure.
2017  *
2018  * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
2019  *	Description
2020  *		Adjust the size allocated to TLVs in the outermost IPv6
2021  *		Segment Routing Header contained in the packet associated to
2022  *		*skb*, at position *offset* by *delta* bytes. Only offsets
2023  *		after the segments are accepted. *delta* can be as well
2024  *		positive (growing) as negative (shrinking).
2025  *
2026  * 		A call to this helper is susceptible to change the underlaying
2027  * 		packet buffer. Therefore, at load time, all checks on pointers
2028  * 		previously done by the verifier are invalidated and must be
2029  * 		performed again, if the helper is used in combination with
2030  * 		direct packet access.
2031  *	Return
2032  * 		0 on success, or a negative error in case of failure.
2033  *
2034  * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
2035  *	Description
2036  *		Apply an IPv6 Segment Routing action of type *action* to the
2037  *		packet associated to *skb*. Each action takes a parameter
2038  *		contained at address *param*, and of length *param_len* bytes.
2039  *		*action* can be one of:
2040  *
2041  *		**SEG6_LOCAL_ACTION_END_X**
2042  *			End.X action: Endpoint with Layer-3 cross-connect.
2043  *			Type of *param*: **struct in6_addr**.
2044  *		**SEG6_LOCAL_ACTION_END_T**
2045  *			End.T action: Endpoint with specific IPv6 table lookup.
2046  *			Type of *param*: **int**.
2047  *		**SEG6_LOCAL_ACTION_END_B6**
2048  *			End.B6 action: Endpoint bound to an SRv6 policy.
2049  *			Type of param: **struct ipv6_sr_hdr**.
2050  *		**SEG6_LOCAL_ACTION_END_B6_ENCAP**
2051  *			End.B6.Encap action: Endpoint bound to an SRv6
2052  *			encapsulation policy.
2053  *			Type of param: **struct ipv6_sr_hdr**.
2054  *
2055  * 		A call to this helper is susceptible to change the underlaying
2056  * 		packet buffer. Therefore, at load time, all checks on pointers
2057  * 		previously done by the verifier are invalidated and must be
2058  * 		performed again, if the helper is used in combination with
2059  * 		direct packet access.
2060  *	Return
2061  * 		0 on success, or a negative error in case of failure.
2062  *
2063  * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
2064  *	Description
2065  *		This helper is used in programs implementing IR decoding, to
2066  *		report a successfully decoded key press with *scancode*,
2067  *		*toggle* value in the given *protocol*. The scancode will be
2068  *		translated to a keycode using the rc keymap, and reported as
2069  *		an input key down event. After a period a key up event is
2070  *		generated. This period can be extended by calling either
2071  *		**bpf_rc_keydown** () again with the same values, or calling
2072  *		**bpf_rc_repeat** ().
2073  *
2074  *		Some protocols include a toggle bit, in case the button	was
2075  *		released and pressed again between consecutive scancodes.
2076  *
2077  *		The *ctx* should point to the lirc sample as passed into
2078  *		the program.
2079  *
2080  *		The *protocol* is the decoded protocol number (see
2081  *		**enum rc_proto** for some predefined values).
2082  *
2083  *		This helper is only available is the kernel was compiled with
2084  *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2085  *		"**y**".
2086  *	Return
2087  *		0
2088  *
2089  * int bpf_rc_repeat(void *ctx)
2090  *	Description
2091  *		This helper is used in programs implementing IR decoding, to
2092  *		report a successfully decoded repeat key message. This delays
2093  *		the generation of a key up event for previously generated
2094  *		key down event.
2095  *
2096  *		Some IR protocols like NEC have a special IR message for
2097  *		repeating last button, for when a button is held down.
2098  *
2099  *		The *ctx* should point to the lirc sample as passed into
2100  *		the program.
2101  *
2102  *		This helper is only available is the kernel was compiled with
2103  *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2104  *		"**y**".
2105  *	Return
2106  *		0
2107  *
2108  * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
2109  * 	Description
2110  * 		Return the cgroup v2 id of the socket associated with the *skb*.
2111  * 		This is roughly similar to the **bpf_get_cgroup_classid**\ ()
2112  * 		helper for cgroup v1 by providing a tag resp. identifier that
2113  * 		can be matched on or used for map lookups e.g. to implement
2114  * 		policy. The cgroup v2 id of a given path in the hierarchy is
2115  * 		exposed in user space through the f_handle API in order to get
2116  * 		to the same 64-bit id.
2117  *
2118  * 		This helper can be used on TC egress path, but not on ingress,
2119  * 		and is available only if the kernel was compiled with the
2120  * 		**CONFIG_SOCK_CGROUP_DATA** configuration option.
2121  * 	Return
2122  * 		The id is returned or 0 in case the id could not be retrieved.
2123  *
2124  * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
2125  *	Description
2126  *		Return id of cgroup v2 that is ancestor of cgroup associated
2127  *		with the *skb* at the *ancestor_level*.  The root cgroup is at
2128  *		*ancestor_level* zero and each step down the hierarchy
2129  *		increments the level. If *ancestor_level* == level of cgroup
2130  *		associated with *skb*, then return value will be same as that
2131  *		of **bpf_skb_cgroup_id**\ ().
2132  *
2133  *		The helper is useful to implement policies based on cgroups
2134  *		that are upper in hierarchy than immediate cgroup associated
2135  *		with *skb*.
2136  *
2137  *		The format of returned id and helper limitations are same as in
2138  *		**bpf_skb_cgroup_id**\ ().
2139  *	Return
2140  *		The id is returned or 0 in case the id could not be retrieved.
2141  *
2142  * u64 bpf_get_current_cgroup_id(void)
2143  * 	Return
2144  * 		A 64-bit integer containing the current cgroup id based
2145  * 		on the cgroup within which the current task is running.
2146  *
2147  * void* get_local_storage(void *map, u64 flags)
2148  *	Description
2149  *		Get the pointer to the local storage area.
2150  *		The type and the size of the local storage is defined
2151  *		by the *map* argument.
2152  *		The *flags* meaning is specific for each map type,
2153  *		and has to be 0 for cgroup local storage.
2154  *
2155  *		Depending on the bpf program type, a local storage area
2156  *		can be shared between multiple instances of the bpf program,
2157  *		running simultaneously.
2158  *
2159  *		A user should care about the synchronization by himself.
2160  *		For example, by using the BPF_STX_XADD instruction to alter
2161  *		the shared data.
2162  *	Return
2163  *		Pointer to the local storage area.
2164  *
2165  * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
2166  *	Description
2167  *		Select a SO_REUSEPORT sk from a	BPF_MAP_TYPE_REUSEPORT_ARRAY map
2168  *		It checks the selected sk is matching the incoming
2169  *		request in the skb.
2170  *	Return
2171  *		0 on success, or a negative error in case of failure.
2172  *
2173  * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
2174  *	Description
2175  *		Look for TCP socket matching *tuple*, optionally in a child
2176  *		network namespace *netns*. The return value must be checked,
2177  *		and if non-NULL, released via **bpf_sk_release**\ ().
2178  *
2179  *		The *ctx* should point to the context of the program, such as
2180  *		the skb or socket (depending on the hook in use). This is used
2181  *		to determine the base network namespace for the lookup.
2182  *
2183  *		*tuple_size* must be one of:
2184  *
2185  *		**sizeof**\ (*tuple*\ **->ipv4**)
2186  *			Look for an IPv4 socket.
2187  *		**sizeof**\ (*tuple*\ **->ipv6**)
2188  *			Look for an IPv6 socket.
2189  *
2190  *		If the *netns* is zero, then the socket lookup table in the
2191  *		netns associated with the *ctx* will be used. For the TC hooks,
2192  *		this in the netns of the device in the skb. For socket hooks,
2193  *		this in the netns of the socket. If *netns* is non-zero, then
2194  *		it specifies the ID of the netns relative to the netns
2195  *		associated with the *ctx*.
2196  *
2197  *		All values for *flags* are reserved for future usage, and must
2198  *		be left at zero.
2199  *
2200  *		This helper is available only if the kernel was compiled with
2201  *		**CONFIG_NET** configuration option.
2202  *	Return
2203  *		Pointer to *struct bpf_sock*, or NULL in case of failure.
2204  *
2205  * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
2206  *	Description
2207  *		Look for UDP socket matching *tuple*, optionally in a child
2208  *		network namespace *netns*. The return value must be checked,
2209  *		and if non-NULL, released via **bpf_sk_release**\ ().
2210  *
2211  *		The *ctx* should point to the context of the program, such as
2212  *		the skb or socket (depending on the hook in use). This is used
2213  *		to determine the base network namespace for the lookup.
2214  *
2215  *		*tuple_size* must be one of:
2216  *
2217  *		**sizeof**\ (*tuple*\ **->ipv4**)
2218  *			Look for an IPv4 socket.
2219  *		**sizeof**\ (*tuple*\ **->ipv6**)
2220  *			Look for an IPv6 socket.
2221  *
2222  *		If the *netns* is zero, then the socket lookup table in the
2223  *		netns associated with the *ctx* will be used. For the TC hooks,
2224  *		this in the netns of the device in the skb. For socket hooks,
2225  *		this in the netns of the socket. If *netns* is non-zero, then
2226  *		it specifies the ID of the netns relative to the netns
2227  *		associated with the *ctx*.
2228  *
2229  *		All values for *flags* are reserved for future usage, and must
2230  *		be left at zero.
2231  *
2232  *		This helper is available only if the kernel was compiled with
2233  *		**CONFIG_NET** configuration option.
2234  *	Return
2235  *		Pointer to *struct bpf_sock*, or NULL in case of failure.
2236  *
2237  * int bpf_sk_release(struct bpf_sock *sk)
2238  *	Description
2239  *		Release the reference held by *sock*. *sock* must be a non-NULL
2240  *		pointer that was returned from bpf_sk_lookup_xxx\ ().
2241  *	Return
2242  *		0 on success, or a negative error in case of failure.
2243  *
2244  * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
2245  *	Description
2246  *		For socket policies, insert *len* bytes into msg at offset
2247  *		*start*.
2248  *
2249  *		If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
2250  *		*msg* it may want to insert metadata or options into the msg.
2251  *		This can later be read and used by any of the lower layer BPF
2252  *		hooks.
2253  *
2254  *		This helper may fail if under memory pressure (a malloc
2255  *		fails) in these cases BPF programs will get an appropriate
2256  *		error and BPF programs will need to handle them.
2257  *
2258  *	Return
2259  *		0 on success, or a negative error in case of failure.
2260  */
2261 #define __BPF_FUNC_MAPPER(FN)		\
2262 	FN(unspec),			\
2263 	FN(map_lookup_elem),		\
2264 	FN(map_update_elem),		\
2265 	FN(map_delete_elem),		\
2266 	FN(probe_read),			\
2267 	FN(ktime_get_ns),		\
2268 	FN(trace_printk),		\
2269 	FN(get_prandom_u32),		\
2270 	FN(get_smp_processor_id),	\
2271 	FN(skb_store_bytes),		\
2272 	FN(l3_csum_replace),		\
2273 	FN(l4_csum_replace),		\
2274 	FN(tail_call),			\
2275 	FN(clone_redirect),		\
2276 	FN(get_current_pid_tgid),	\
2277 	FN(get_current_uid_gid),	\
2278 	FN(get_current_comm),		\
2279 	FN(get_cgroup_classid),		\
2280 	FN(skb_vlan_push),		\
2281 	FN(skb_vlan_pop),		\
2282 	FN(skb_get_tunnel_key),		\
2283 	FN(skb_set_tunnel_key),		\
2284 	FN(perf_event_read),		\
2285 	FN(redirect),			\
2286 	FN(get_route_realm),		\
2287 	FN(perf_event_output),		\
2288 	FN(skb_load_bytes),		\
2289 	FN(get_stackid),		\
2290 	FN(csum_diff),			\
2291 	FN(skb_get_tunnel_opt),		\
2292 	FN(skb_set_tunnel_opt),		\
2293 	FN(skb_change_proto),		\
2294 	FN(skb_change_type),		\
2295 	FN(skb_under_cgroup),		\
2296 	FN(get_hash_recalc),		\
2297 	FN(get_current_task),		\
2298 	FN(probe_write_user),		\
2299 	FN(current_task_under_cgroup),	\
2300 	FN(skb_change_tail),		\
2301 	FN(skb_pull_data),		\
2302 	FN(csum_update),		\
2303 	FN(set_hash_invalid),		\
2304 	FN(get_numa_node_id),		\
2305 	FN(skb_change_head),		\
2306 	FN(xdp_adjust_head),		\
2307 	FN(probe_read_str),		\
2308 	FN(get_socket_cookie),		\
2309 	FN(get_socket_uid),		\
2310 	FN(set_hash),			\
2311 	FN(setsockopt),			\
2312 	FN(skb_adjust_room),		\
2313 	FN(redirect_map),		\
2314 	FN(sk_redirect_map),		\
2315 	FN(sock_map_update),		\
2316 	FN(xdp_adjust_meta),		\
2317 	FN(perf_event_read_value),	\
2318 	FN(perf_prog_read_value),	\
2319 	FN(getsockopt),			\
2320 	FN(override_return),		\
2321 	FN(sock_ops_cb_flags_set),	\
2322 	FN(msg_redirect_map),		\
2323 	FN(msg_apply_bytes),		\
2324 	FN(msg_cork_bytes),		\
2325 	FN(msg_pull_data),		\
2326 	FN(bind),			\
2327 	FN(xdp_adjust_tail),		\
2328 	FN(skb_get_xfrm_state),		\
2329 	FN(get_stack),			\
2330 	FN(skb_load_bytes_relative),	\
2331 	FN(fib_lookup),			\
2332 	FN(sock_hash_update),		\
2333 	FN(msg_redirect_hash),		\
2334 	FN(sk_redirect_hash),		\
2335 	FN(lwt_push_encap),		\
2336 	FN(lwt_seg6_store_bytes),	\
2337 	FN(lwt_seg6_adjust_srh),	\
2338 	FN(lwt_seg6_action),		\
2339 	FN(rc_repeat),			\
2340 	FN(rc_keydown),			\
2341 	FN(skb_cgroup_id),		\
2342 	FN(get_current_cgroup_id),	\
2343 	FN(get_local_storage),		\
2344 	FN(sk_select_reuseport),	\
2345 	FN(skb_ancestor_cgroup_id),	\
2346 	FN(sk_lookup_tcp),		\
2347 	FN(sk_lookup_udp),		\
2348 	FN(sk_release),			\
2349 	FN(map_push_elem),		\
2350 	FN(map_pop_elem),		\
2351 	FN(map_peek_elem),		\
2352 	FN(msg_push_data),
2353 
2354 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
2355  * function eBPF program intends to call
2356  */
2357 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
2358 enum bpf_func_id {
2359 	__BPF_FUNC_MAPPER(__BPF_ENUM_FN)
2360 	__BPF_FUNC_MAX_ID,
2361 };
2362 #undef __BPF_ENUM_FN
2363 
2364 /* All flags used by eBPF helper functions, placed here. */
2365 
2366 /* BPF_FUNC_skb_store_bytes flags. */
2367 #define BPF_F_RECOMPUTE_CSUM		(1ULL << 0)
2368 #define BPF_F_INVALIDATE_HASH		(1ULL << 1)
2369 
2370 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
2371  * First 4 bits are for passing the header field size.
2372  */
2373 #define BPF_F_HDR_FIELD_MASK		0xfULL
2374 
2375 /* BPF_FUNC_l4_csum_replace flags. */
2376 #define BPF_F_PSEUDO_HDR		(1ULL << 4)
2377 #define BPF_F_MARK_MANGLED_0		(1ULL << 5)
2378 #define BPF_F_MARK_ENFORCE		(1ULL << 6)
2379 
2380 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
2381 #define BPF_F_INGRESS			(1ULL << 0)
2382 
2383 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
2384 #define BPF_F_TUNINFO_IPV6		(1ULL << 0)
2385 
2386 /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
2387 #define BPF_F_SKIP_FIELD_MASK		0xffULL
2388 #define BPF_F_USER_STACK		(1ULL << 8)
2389 /* flags used by BPF_FUNC_get_stackid only. */
2390 #define BPF_F_FAST_STACK_CMP		(1ULL << 9)
2391 #define BPF_F_REUSE_STACKID		(1ULL << 10)
2392 /* flags used by BPF_FUNC_get_stack only. */
2393 #define BPF_F_USER_BUILD_ID		(1ULL << 11)
2394 
2395 /* BPF_FUNC_skb_set_tunnel_key flags. */
2396 #define BPF_F_ZERO_CSUM_TX		(1ULL << 1)
2397 #define BPF_F_DONT_FRAGMENT		(1ULL << 2)
2398 #define BPF_F_SEQ_NUMBER		(1ULL << 3)
2399 
2400 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
2401  * BPF_FUNC_perf_event_read_value flags.
2402  */
2403 #define BPF_F_INDEX_MASK		0xffffffffULL
2404 #define BPF_F_CURRENT_CPU		BPF_F_INDEX_MASK
2405 /* BPF_FUNC_perf_event_output for sk_buff input context. */
2406 #define BPF_F_CTXLEN_MASK		(0xfffffULL << 32)
2407 
2408 /* Mode for BPF_FUNC_skb_adjust_room helper. */
2409 enum bpf_adj_room_mode {
2410 	BPF_ADJ_ROOM_NET,
2411 };
2412 
2413 /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
2414 enum bpf_hdr_start_off {
2415 	BPF_HDR_START_MAC,
2416 	BPF_HDR_START_NET,
2417 };
2418 
2419 /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
2420 enum bpf_lwt_encap_mode {
2421 	BPF_LWT_ENCAP_SEG6,
2422 	BPF_LWT_ENCAP_SEG6_INLINE
2423 };
2424 
2425 /* user accessible mirror of in-kernel sk_buff.
2426  * new fields can only be added to the end of this structure
2427  */
2428 struct __sk_buff {
2429 	__u32 len;
2430 	__u32 pkt_type;
2431 	__u32 mark;
2432 	__u32 queue_mapping;
2433 	__u32 protocol;
2434 	__u32 vlan_present;
2435 	__u32 vlan_tci;
2436 	__u32 vlan_proto;
2437 	__u32 priority;
2438 	__u32 ingress_ifindex;
2439 	__u32 ifindex;
2440 	__u32 tc_index;
2441 	__u32 cb[5];
2442 	__u32 hash;
2443 	__u32 tc_classid;
2444 	__u32 data;
2445 	__u32 data_end;
2446 	__u32 napi_id;
2447 
2448 	/* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
2449 	__u32 family;
2450 	__u32 remote_ip4;	/* Stored in network byte order */
2451 	__u32 local_ip4;	/* Stored in network byte order */
2452 	__u32 remote_ip6[4];	/* Stored in network byte order */
2453 	__u32 local_ip6[4];	/* Stored in network byte order */
2454 	__u32 remote_port;	/* Stored in network byte order */
2455 	__u32 local_port;	/* stored in host byte order */
2456 	/* ... here. */
2457 
2458 	__u32 data_meta;
2459 	struct bpf_flow_keys *flow_keys;
2460 };
2461 
2462 struct bpf_tunnel_key {
2463 	__u32 tunnel_id;
2464 	union {
2465 		__u32 remote_ipv4;
2466 		__u32 remote_ipv6[4];
2467 	};
2468 	__u8 tunnel_tos;
2469 	__u8 tunnel_ttl;
2470 	__u16 tunnel_ext;	/* Padding, future use. */
2471 	__u32 tunnel_label;
2472 };
2473 
2474 /* user accessible mirror of in-kernel xfrm_state.
2475  * new fields can only be added to the end of this structure
2476  */
2477 struct bpf_xfrm_state {
2478 	__u32 reqid;
2479 	__u32 spi;	/* Stored in network byte order */
2480 	__u16 family;
2481 	__u16 ext;	/* Padding, future use. */
2482 	union {
2483 		__u32 remote_ipv4;	/* Stored in network byte order */
2484 		__u32 remote_ipv6[4];	/* Stored in network byte order */
2485 	};
2486 };
2487 
2488 /* Generic BPF return codes which all BPF program types may support.
2489  * The values are binary compatible with their TC_ACT_* counter-part to
2490  * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
2491  * programs.
2492  *
2493  * XDP is handled seprately, see XDP_*.
2494  */
2495 enum bpf_ret_code {
2496 	BPF_OK = 0,
2497 	/* 1 reserved */
2498 	BPF_DROP = 2,
2499 	/* 3-6 reserved */
2500 	BPF_REDIRECT = 7,
2501 	/* >127 are reserved for prog type specific return codes */
2502 };
2503 
2504 struct bpf_sock {
2505 	__u32 bound_dev_if;
2506 	__u32 family;
2507 	__u32 type;
2508 	__u32 protocol;
2509 	__u32 mark;
2510 	__u32 priority;
2511 	__u32 src_ip4;		/* Allows 1,2,4-byte read.
2512 				 * Stored in network byte order.
2513 				 */
2514 	__u32 src_ip6[4];	/* Allows 1,2,4-byte read.
2515 				 * Stored in network byte order.
2516 				 */
2517 	__u32 src_port;		/* Allows 4-byte read.
2518 				 * Stored in host byte order
2519 				 */
2520 };
2521 
2522 struct bpf_sock_tuple {
2523 	union {
2524 		struct {
2525 			__be32 saddr;
2526 			__be32 daddr;
2527 			__be16 sport;
2528 			__be16 dport;
2529 		} ipv4;
2530 		struct {
2531 			__be32 saddr[4];
2532 			__be32 daddr[4];
2533 			__be16 sport;
2534 			__be16 dport;
2535 		} ipv6;
2536 	};
2537 };
2538 
2539 #define XDP_PACKET_HEADROOM 256
2540 
2541 /* User return codes for XDP prog type.
2542  * A valid XDP program must return one of these defined values. All other
2543  * return codes are reserved for future use. Unknown return codes will
2544  * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
2545  */
2546 enum xdp_action {
2547 	XDP_ABORTED = 0,
2548 	XDP_DROP,
2549 	XDP_PASS,
2550 	XDP_TX,
2551 	XDP_REDIRECT,
2552 };
2553 
2554 /* user accessible metadata for XDP packet hook
2555  * new fields must be added to the end of this structure
2556  */
2557 struct xdp_md {
2558 	__u32 data;
2559 	__u32 data_end;
2560 	__u32 data_meta;
2561 	/* Below access go through struct xdp_rxq_info */
2562 	__u32 ingress_ifindex; /* rxq->dev->ifindex */
2563 	__u32 rx_queue_index;  /* rxq->queue_index  */
2564 };
2565 
2566 enum sk_action {
2567 	SK_DROP = 0,
2568 	SK_PASS,
2569 };
2570 
2571 /* user accessible metadata for SK_MSG packet hook, new fields must
2572  * be added to the end of this structure
2573  */
2574 struct sk_msg_md {
2575 	void *data;
2576 	void *data_end;
2577 
2578 	__u32 family;
2579 	__u32 remote_ip4;	/* Stored in network byte order */
2580 	__u32 local_ip4;	/* Stored in network byte order */
2581 	__u32 remote_ip6[4];	/* Stored in network byte order */
2582 	__u32 local_ip6[4];	/* Stored in network byte order */
2583 	__u32 remote_port;	/* Stored in network byte order */
2584 	__u32 local_port;	/* stored in host byte order */
2585 };
2586 
2587 struct sk_reuseport_md {
2588 	/*
2589 	 * Start of directly accessible data. It begins from
2590 	 * the tcp/udp header.
2591 	 */
2592 	void *data;
2593 	void *data_end;		/* End of directly accessible data */
2594 	/*
2595 	 * Total length of packet (starting from the tcp/udp header).
2596 	 * Note that the directly accessible bytes (data_end - data)
2597 	 * could be less than this "len".  Those bytes could be
2598 	 * indirectly read by a helper "bpf_skb_load_bytes()".
2599 	 */
2600 	__u32 len;
2601 	/*
2602 	 * Eth protocol in the mac header (network byte order). e.g.
2603 	 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
2604 	 */
2605 	__u32 eth_protocol;
2606 	__u32 ip_protocol;	/* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
2607 	__u32 bind_inany;	/* Is sock bound to an INANY address? */
2608 	__u32 hash;		/* A hash of the packet 4 tuples */
2609 };
2610 
2611 #define BPF_TAG_SIZE	8
2612 
2613 struct bpf_prog_info {
2614 	__u32 type;
2615 	__u32 id;
2616 	__u8  tag[BPF_TAG_SIZE];
2617 	__u32 jited_prog_len;
2618 	__u32 xlated_prog_len;
2619 	__aligned_u64 jited_prog_insns;
2620 	__aligned_u64 xlated_prog_insns;
2621 	__u64 load_time;	/* ns since boottime */
2622 	__u32 created_by_uid;
2623 	__u32 nr_map_ids;
2624 	__aligned_u64 map_ids;
2625 	char name[BPF_OBJ_NAME_LEN];
2626 	__u32 ifindex;
2627 	__u32 gpl_compatible:1;
2628 	__u64 netns_dev;
2629 	__u64 netns_ino;
2630 	__u32 nr_jited_ksyms;
2631 	__u32 nr_jited_func_lens;
2632 	__aligned_u64 jited_ksyms;
2633 	__aligned_u64 jited_func_lens;
2634 } __attribute__((aligned(8)));
2635 
2636 struct bpf_map_info {
2637 	__u32 type;
2638 	__u32 id;
2639 	__u32 key_size;
2640 	__u32 value_size;
2641 	__u32 max_entries;
2642 	__u32 map_flags;
2643 	char  name[BPF_OBJ_NAME_LEN];
2644 	__u32 ifindex;
2645 	__u32 :32;
2646 	__u64 netns_dev;
2647 	__u64 netns_ino;
2648 	__u32 btf_id;
2649 	__u32 btf_key_type_id;
2650 	__u32 btf_value_type_id;
2651 } __attribute__((aligned(8)));
2652 
2653 struct bpf_btf_info {
2654 	__aligned_u64 btf;
2655 	__u32 btf_size;
2656 	__u32 id;
2657 } __attribute__((aligned(8)));
2658 
2659 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
2660  * by user and intended to be used by socket (e.g. to bind to, depends on
2661  * attach attach type).
2662  */
2663 struct bpf_sock_addr {
2664 	__u32 user_family;	/* Allows 4-byte read, but no write. */
2665 	__u32 user_ip4;		/* Allows 1,2,4-byte read and 4-byte write.
2666 				 * Stored in network byte order.
2667 				 */
2668 	__u32 user_ip6[4];	/* Allows 1,2,4-byte read an 4-byte write.
2669 				 * Stored in network byte order.
2670 				 */
2671 	__u32 user_port;	/* Allows 4-byte read and write.
2672 				 * Stored in network byte order
2673 				 */
2674 	__u32 family;		/* Allows 4-byte read, but no write */
2675 	__u32 type;		/* Allows 4-byte read, but no write */
2676 	__u32 protocol;		/* Allows 4-byte read, but no write */
2677 	__u32 msg_src_ip4;	/* Allows 1,2,4-byte read an 4-byte write.
2678 				 * Stored in network byte order.
2679 				 */
2680 	__u32 msg_src_ip6[4];	/* Allows 1,2,4-byte read an 4-byte write.
2681 				 * Stored in network byte order.
2682 				 */
2683 };
2684 
2685 /* User bpf_sock_ops struct to access socket values and specify request ops
2686  * and their replies.
2687  * Some of this fields are in network (bigendian) byte order and may need
2688  * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
2689  * New fields can only be added at the end of this structure
2690  */
2691 struct bpf_sock_ops {
2692 	__u32 op;
2693 	union {
2694 		__u32 args[4];		/* Optionally passed to bpf program */
2695 		__u32 reply;		/* Returned by bpf program	    */
2696 		__u32 replylong[4];	/* Optionally returned by bpf prog  */
2697 	};
2698 	__u32 family;
2699 	__u32 remote_ip4;	/* Stored in network byte order */
2700 	__u32 local_ip4;	/* Stored in network byte order */
2701 	__u32 remote_ip6[4];	/* Stored in network byte order */
2702 	__u32 local_ip6[4];	/* Stored in network byte order */
2703 	__u32 remote_port;	/* Stored in network byte order */
2704 	__u32 local_port;	/* stored in host byte order */
2705 	__u32 is_fullsock;	/* Some TCP fields are only valid if
2706 				 * there is a full socket. If not, the
2707 				 * fields read as zero.
2708 				 */
2709 	__u32 snd_cwnd;
2710 	__u32 srtt_us;		/* Averaged RTT << 3 in usecs */
2711 	__u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
2712 	__u32 state;
2713 	__u32 rtt_min;
2714 	__u32 snd_ssthresh;
2715 	__u32 rcv_nxt;
2716 	__u32 snd_nxt;
2717 	__u32 snd_una;
2718 	__u32 mss_cache;
2719 	__u32 ecn_flags;
2720 	__u32 rate_delivered;
2721 	__u32 rate_interval_us;
2722 	__u32 packets_out;
2723 	__u32 retrans_out;
2724 	__u32 total_retrans;
2725 	__u32 segs_in;
2726 	__u32 data_segs_in;
2727 	__u32 segs_out;
2728 	__u32 data_segs_out;
2729 	__u32 lost_out;
2730 	__u32 sacked_out;
2731 	__u32 sk_txhash;
2732 	__u64 bytes_received;
2733 	__u64 bytes_acked;
2734 };
2735 
2736 /* Definitions for bpf_sock_ops_cb_flags */
2737 #define BPF_SOCK_OPS_RTO_CB_FLAG	(1<<0)
2738 #define BPF_SOCK_OPS_RETRANS_CB_FLAG	(1<<1)
2739 #define BPF_SOCK_OPS_STATE_CB_FLAG	(1<<2)
2740 #define BPF_SOCK_OPS_ALL_CB_FLAGS       0x7		/* Mask of all currently
2741 							 * supported cb flags
2742 							 */
2743 
2744 /* List of known BPF sock_ops operators.
2745  * New entries can only be added at the end
2746  */
2747 enum {
2748 	BPF_SOCK_OPS_VOID,
2749 	BPF_SOCK_OPS_TIMEOUT_INIT,	/* Should return SYN-RTO value to use or
2750 					 * -1 if default value should be used
2751 					 */
2752 	BPF_SOCK_OPS_RWND_INIT,		/* Should return initial advertized
2753 					 * window (in packets) or -1 if default
2754 					 * value should be used
2755 					 */
2756 	BPF_SOCK_OPS_TCP_CONNECT_CB,	/* Calls BPF program right before an
2757 					 * active connection is initialized
2758 					 */
2759 	BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB,	/* Calls BPF program when an
2760 						 * active connection is
2761 						 * established
2762 						 */
2763 	BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,	/* Calls BPF program when a
2764 						 * passive connection is
2765 						 * established
2766 						 */
2767 	BPF_SOCK_OPS_NEEDS_ECN,		/* If connection's congestion control
2768 					 * needs ECN
2769 					 */
2770 	BPF_SOCK_OPS_BASE_RTT,		/* Get base RTT. The correct value is
2771 					 * based on the path and may be
2772 					 * dependent on the congestion control
2773 					 * algorithm. In general it indicates
2774 					 * a congestion threshold. RTTs above
2775 					 * this indicate congestion
2776 					 */
2777 	BPF_SOCK_OPS_RTO_CB,		/* Called when an RTO has triggered.
2778 					 * Arg1: value of icsk_retransmits
2779 					 * Arg2: value of icsk_rto
2780 					 * Arg3: whether RTO has expired
2781 					 */
2782 	BPF_SOCK_OPS_RETRANS_CB,	/* Called when skb is retransmitted.
2783 					 * Arg1: sequence number of 1st byte
2784 					 * Arg2: # segments
2785 					 * Arg3: return value of
2786 					 *       tcp_transmit_skb (0 => success)
2787 					 */
2788 	BPF_SOCK_OPS_STATE_CB,		/* Called when TCP changes state.
2789 					 * Arg1: old_state
2790 					 * Arg2: new_state
2791 					 */
2792 	BPF_SOCK_OPS_TCP_LISTEN_CB,	/* Called on listen(2), right after
2793 					 * socket transition to LISTEN state.
2794 					 */
2795 };
2796 
2797 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
2798  * changes between the TCP and BPF versions. Ideally this should never happen.
2799  * If it does, we need to add code to convert them before calling
2800  * the BPF sock_ops function.
2801  */
2802 enum {
2803 	BPF_TCP_ESTABLISHED = 1,
2804 	BPF_TCP_SYN_SENT,
2805 	BPF_TCP_SYN_RECV,
2806 	BPF_TCP_FIN_WAIT1,
2807 	BPF_TCP_FIN_WAIT2,
2808 	BPF_TCP_TIME_WAIT,
2809 	BPF_TCP_CLOSE,
2810 	BPF_TCP_CLOSE_WAIT,
2811 	BPF_TCP_LAST_ACK,
2812 	BPF_TCP_LISTEN,
2813 	BPF_TCP_CLOSING,	/* Now a valid state */
2814 	BPF_TCP_NEW_SYN_RECV,
2815 
2816 	BPF_TCP_MAX_STATES	/* Leave at the end! */
2817 };
2818 
2819 #define TCP_BPF_IW		1001	/* Set TCP initial congestion window */
2820 #define TCP_BPF_SNDCWND_CLAMP	1002	/* Set sndcwnd_clamp */
2821 
2822 struct bpf_perf_event_value {
2823 	__u64 counter;
2824 	__u64 enabled;
2825 	__u64 running;
2826 };
2827 
2828 #define BPF_DEVCG_ACC_MKNOD	(1ULL << 0)
2829 #define BPF_DEVCG_ACC_READ	(1ULL << 1)
2830 #define BPF_DEVCG_ACC_WRITE	(1ULL << 2)
2831 
2832 #define BPF_DEVCG_DEV_BLOCK	(1ULL << 0)
2833 #define BPF_DEVCG_DEV_CHAR	(1ULL << 1)
2834 
2835 struct bpf_cgroup_dev_ctx {
2836 	/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
2837 	__u32 access_type;
2838 	__u32 major;
2839 	__u32 minor;
2840 };
2841 
2842 struct bpf_raw_tracepoint_args {
2843 	__u64 args[0];
2844 };
2845 
2846 /* DIRECT:  Skip the FIB rules and go to FIB table associated with device
2847  * OUTPUT:  Do lookup from egress perspective; default is ingress
2848  */
2849 #define BPF_FIB_LOOKUP_DIRECT  BIT(0)
2850 #define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
2851 
2852 enum {
2853 	BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
2854 	BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
2855 	BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
2856 	BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
2857 	BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
2858 	BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
2859 	BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
2860 	BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
2861 	BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
2862 };
2863 
2864 struct bpf_fib_lookup {
2865 	/* input:  network family for lookup (AF_INET, AF_INET6)
2866 	 * output: network family of egress nexthop
2867 	 */
2868 	__u8	family;
2869 
2870 	/* set if lookup is to consider L4 data - e.g., FIB rules */
2871 	__u8	l4_protocol;
2872 	__be16	sport;
2873 	__be16	dport;
2874 
2875 	/* total length of packet from network header - used for MTU check */
2876 	__u16	tot_len;
2877 
2878 	/* input: L3 device index for lookup
2879 	 * output: device index from FIB lookup
2880 	 */
2881 	__u32	ifindex;
2882 
2883 	union {
2884 		/* inputs to lookup */
2885 		__u8	tos;		/* AF_INET  */
2886 		__be32	flowinfo;	/* AF_INET6, flow_label + priority */
2887 
2888 		/* output: metric of fib result (IPv4/IPv6 only) */
2889 		__u32	rt_metric;
2890 	};
2891 
2892 	union {
2893 		__be32		ipv4_src;
2894 		__u32		ipv6_src[4];  /* in6_addr; network order */
2895 	};
2896 
2897 	/* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
2898 	 * network header. output: bpf_fib_lookup sets to gateway address
2899 	 * if FIB lookup returns gateway route
2900 	 */
2901 	union {
2902 		__be32		ipv4_dst;
2903 		__u32		ipv6_dst[4];  /* in6_addr; network order */
2904 	};
2905 
2906 	/* output */
2907 	__be16	h_vlan_proto;
2908 	__be16	h_vlan_TCI;
2909 	__u8	smac[6];     /* ETH_ALEN */
2910 	__u8	dmac[6];     /* ETH_ALEN */
2911 };
2912 
2913 enum bpf_task_fd_type {
2914 	BPF_FD_TYPE_RAW_TRACEPOINT,	/* tp name */
2915 	BPF_FD_TYPE_TRACEPOINT,		/* tp name */
2916 	BPF_FD_TYPE_KPROBE,		/* (symbol + offset) or addr */
2917 	BPF_FD_TYPE_KRETPROBE,		/* (symbol + offset) or addr */
2918 	BPF_FD_TYPE_UPROBE,		/* filename + offset */
2919 	BPF_FD_TYPE_URETPROBE,		/* filename + offset */
2920 };
2921 
2922 struct bpf_flow_keys {
2923 	__u16	nhoff;
2924 	__u16	thoff;
2925 	__u16	addr_proto;			/* ETH_P_* of valid addrs */
2926 	__u8	is_frag;
2927 	__u8	is_first_frag;
2928 	__u8	is_encap;
2929 	__u8	ip_proto;
2930 	__be16	n_proto;
2931 	__be16	sport;
2932 	__be16	dport;
2933 	union {
2934 		struct {
2935 			__be32	ipv4_src;
2936 			__be32	ipv4_dst;
2937 		};
2938 		struct {
2939 			__u32	ipv6_src[4];	/* in6_addr; network order */
2940 			__u32	ipv6_dst[4];	/* in6_addr; network order */
2941 		};
2942 	};
2943 };
2944 
2945 #endif /* _UAPI__LINUX_BPF_H__ */
2946