1--[[
2Copyright 2016 Marek Vavrusa <mvavrusa@cloudflare.com>
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15]]
16local ffi = require('ffi')
17local bit = require('bit')
18local cdef = require('bpf.cdef')
19
20local BPF, HELPER = ffi.typeof('struct bpf'), ffi.typeof('struct bpf_func_id')
21local const_width = {
22	[1] = BPF.B, [2] = BPF.H, [4] = BPF.W, [8] = BPF.DW,
23}
24local const_width_type = {
25	[1] = ffi.typeof('uint8_t'), [2] = ffi.typeof('uint16_t'), [4] = ffi.typeof('uint32_t'), [8] = ffi.typeof('uint64_t'),
26}
27
28-- Built-ins that will be translated into BPF instructions
29-- i.e. bit.bor(0xf0, 0x0f) becomes {'alu64, or, k', reg(0xf0), reg(0x0f), 0, 0}
30local builtins = {
31	[bit.lshift]  = 'LSH',
32	[bit.rshift]  = 'RSH',
33	[bit.band]    = 'AND',
34	[bit.bnot]    = 'NEG',
35	[bit.bor]     = 'OR',
36	[bit.bxor]    = 'XOR',
37	[bit.arshift] = 'ARSH',
38	-- Extensions and intrinsics
39}
40
41local function width_type(w)
42	-- Note: ffi.typeof doesn't accept '?' as template
43	return const_width_type[w] or ffi.typeof(string.format('uint8_t [%d]', w))
44end
45builtins.width_type = width_type
46
47-- Return struct member size/type (requires LuaJIT 2.1+)
48-- I am ashamed that there's no easier way around it.
49local function sizeofattr(ct, name)
50	if not ffi.typeinfo then error('LuaJIT 2.1+ is required for ffi.typeinfo') end
51	local cinfo = ffi.typeinfo(ct)
52	while true do
53		cinfo = ffi.typeinfo(cinfo.sib)
54		if not cinfo then return end
55		if cinfo.name == name then break end
56	end
57	local size = math.max(1, ffi.typeinfo(cinfo.sib or ct).size - cinfo.size)
58	-- Guess type name
59	return size, builtins.width_type(size)
60end
61builtins.sizeofattr = sizeofattr
62
63-- Byte-order conversions for little endian
64local function ntoh(x, w)
65	if w then x = ffi.cast(const_width_type[w/8], x) end
66	return bit.bswap(x)
67end
68local function hton(x, w) return ntoh(x, w) end
69builtins.ntoh = ntoh
70builtins.hton = hton
71builtins[ntoh] = function (e, dst, a, w)
72	-- This is trickery, but TO_LE means cpu_to_le(),
73	-- and we want exactly the opposite as network is always 'be'
74	w = w or ffi.sizeof(e.V[a].type)*8
75	if w == 8 then return end -- NOOP
76	assert(w <= 64, 'NYI: hton(a[, width]) - operand larger than register width')
77	-- Allocate registers and execute
78	e.vcopy(dst, a)
79	e.emit(BPF.ALU + BPF.END + BPF.TO_BE, e.vreg(dst), 0, 0, w)
80end
81builtins[hton] = function (e, dst, a, w)
82	w = w or ffi.sizeof(e.V[a].type)*8
83	if w == 8 then return end -- NOOP
84	assert(w <= 64, 'NYI: hton(a[, width]) - operand larger than register width')
85	-- Allocate registers and execute
86	e.vcopy(dst, a)
87	e.emit(BPF.ALU + BPF.END + BPF.TO_LE, e.vreg(dst), 0, 0, w)
88end
89-- Byte-order conversions for big endian are no-ops
90if ffi.abi('be') then
91	ntoh = function (x, w)
92		return w and ffi.cast(const_width_type[w/8], x) or x
93	end
94	hton = ntoh
95	builtins[ntoh] = function(_, _, _) return end
96	builtins[hton] = function(_, _, _) return end
97end
98-- Other built-ins
99local function xadd() error('NYI') end
100builtins.xadd = xadd
101builtins[xadd] = function (e, ret, a, b, off)
102	local vinfo = e.V[a].const
103	assert(vinfo and vinfo.__dissector, 'xadd(a, b[, offset]) called on non-pointer')
104	local w = ffi.sizeof(vinfo.__dissector)
105	-- Calculate structure attribute offsets
106	if e.V[off] and type(e.V[off].const) == 'string' then
107		local ct, field = vinfo.__dissector, e.V[off].const
108		off = ffi.offsetof(ct, field)
109		assert(off, 'xadd(a, b, offset) - offset is not valid in given structure')
110		w = sizeofattr(ct, field)
111	end
112	assert(w == 4 or w == 8, 'NYI: xadd() - 1 and 2 byte atomic increments are not supported')
113	-- Allocate registers and execute
114	local src_reg = e.vreg(b)
115	local dst_reg = e.vreg(a)
116	-- Set variable for return value and call
117	e.vset(ret)
118	e.vreg(ret, 0, true, ffi.typeof('int32_t'))
119	-- Optimize the NULL check away if provably not NULL
120	if not e.V[a].source or e.V[a].source:find('_or_null', 1, true) then
121		e.emit(BPF.JMP + BPF.JEQ + BPF.K, dst_reg, 0, 1, 0) -- if (dst != NULL)
122	end
123	e.emit(BPF.XADD + BPF.STX + const_width[w], dst_reg, src_reg, off or 0, 0)
124end
125
126local function probe_read() error('NYI') end
127builtins.probe_read = probe_read
128builtins[probe_read] = function (e, ret, dst, src, vtype, ofs)
129	e.reg_alloc(e.tmpvar, 1)
130	-- Load stack pointer to dst, since only load to stack memory is supported
131	-- we have to use allocated stack memory or create a new allocation and convert
132	-- to pointer type
133	e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 10, 0, 0)
134	if not e.V[dst].const or not e.V[dst].const.__base > 0 then
135		builtins[ffi.new](e, dst, vtype) -- Allocate stack memory
136	end
137	e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].const.__base)
138	-- Set stack memory maximum size bound
139	e.reg_alloc(e.tmpvar, 2)
140	if not vtype then
141		vtype = cdef.typename(e.V[dst].type)
142		-- Dereference pointer type to pointed type for size calculation
143		if vtype:sub(-1) == '*' then vtype = vtype:sub(0, -2) end
144	end
145	local w = ffi.sizeof(vtype)
146	e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 2, 0, 0, w)
147	-- Set source pointer
148	if e.V[src].reg then
149		e.reg_alloc(e.tmpvar, 3) -- Copy from original register
150		e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 3, e.V[src].reg, 0, 0)
151	else
152		e.vreg(src, 3)
153		e.reg_spill(src) -- Spill to avoid overwriting
154	end
155	if ofs and ofs > 0 then
156		e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 3, 0, 0, ofs)
157	end
158	-- Call probe read helper
159	ret = ret or e.tmpvar
160	e.vset(ret)
161	e.vreg(ret, 0, true, ffi.typeof('int32_t'))
162	e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.probe_read)
163	e.V[e.tmpvar].reg = nil  -- Free temporary registers
164end
165
166builtins[ffi.cast] = function (e, dst, ct, x)
167	assert(e.V[ct].const, 'ffi.cast(ctype, x) called with bad ctype')
168	e.vcopy(dst, x)
169	if e.V[x].const and type(e.V[x].const) == 'table' then
170		e.V[dst].const.__dissector = ffi.typeof(e.V[ct].const)
171	end
172	e.V[dst].type = ffi.typeof(e.V[ct].const)
173	-- Specific types also encode source of the data
174	-- This is because BPF has different helpers for reading
175	-- different data sources, so variables must track origins.
176	-- struct pt_regs - source of the data is probe
177	-- struct skb     - source of the data is socket buffer
178	-- struct X       - source of the data is probe/tracepoint
179	if ffi.typeof(e.V[ct].const) == ffi.typeof('struct pt_regs') then
180		e.V[dst].source = 'ptr_to_probe'
181	end
182end
183
184builtins[ffi.new] = function (e, dst, ct, x)
185	if type(ct) == 'number' then
186		ct = ffi.typeof(e.V[ct].const) -- Get ctype from variable
187	end
188	assert(not x, 'NYI: ffi.new(ctype, ...) - initializer is not supported')
189	assert(not cdef.isptr(ct, true), 'NYI: ffi.new(ctype, ...) - ctype MUST NOT be a pointer')
190	e.vset(dst, nil, ct)
191	e.V[dst].source = 'ptr_to_stack'
192	e.V[dst].const = {__base = e.valloc(ffi.sizeof(ct), true), __dissector = ct}
193	-- Set array dissector if created an array
194	-- e.g. if ct is 'char [2]', then dissector is 'char'
195	local elem_type = tostring(ct):match('ctype<(.+)%s%[(%d+)%]>')
196	if elem_type then
197		e.V[dst].const.__dissector = ffi.typeof(elem_type)
198	end
199end
200
201builtins[ffi.copy] = function (e, ret, dst, src)
202	assert(cdef.isptr(e.V[dst].type), 'ffi.copy(dst, src) - dst MUST be a pointer type')
203	assert(cdef.isptr(e.V[src].type), 'ffi.copy(dst, src) - src MUST be a pointer type')
204	-- Specific types also encode source of the data
205	-- struct pt_regs - source of the data is probe
206	-- struct skb     - source of the data is socket buffer
207	if e.V[src].source and e.V[src].source:find('ptr_to_probe', 1, true) then
208		e.reg_alloc(e.tmpvar, 1)
209		-- Load stack pointer to dst, since only load to stack memory is supported
210		-- we have to either use spilled variable or allocated stack memory offset
211		e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 10, 0, 0)
212		if e.V[dst].spill then
213			e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].spill)
214		elseif e.V[dst].const.__base then
215			e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].const.__base)
216		else error('ffi.copy(dst, src) - can\'t get stack offset of dst') end
217		-- Set stack memory maximum size bound
218		local dst_tname = cdef.typename(e.V[dst].type)
219		if dst_tname:sub(-1) == '*' then dst_tname = dst_tname:sub(0, -2) end
220		e.reg_alloc(e.tmpvar, 2)
221		e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 2, 0, 0, ffi.sizeof(dst_tname))
222		-- Set source pointer
223		if e.V[src].reg then
224			e.reg_alloc(e.tmpvar, 3) -- Copy from original register
225			e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 3, e.V[src].reg, 0, 0)
226		else
227			e.vreg(src, 3)
228			e.reg_spill(src) -- Spill to avoid overwriting
229		end
230		-- Call probe read helper
231		e.vset(ret)
232		e.vreg(ret, 0, true, ffi.typeof('int32_t'))
233		e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.probe_read)
234		e.V[e.tmpvar].reg = nil  -- Free temporary registers
235	elseif e.V[src].const and e.V[src].const.__map then
236		error('NYI: ffi.copy(dst, src) - src is backed by BPF map')
237	elseif e.V[src].const and e.V[src].const.__dissector then
238		error('NYI: ffi.copy(dst, src) - src is backed by socket buffer')
239	else
240		-- TODO: identify cheap register move
241		-- TODO: identify copy to/from stack
242		error('NYI: ffi.copy(dst, src) - src is neither BPF map/socket buffer or probe')
243	end
244end
245-- print(format, ...) builtin changes semantics from Lua print(...)
246-- the first parameter has to be format and only reduced set of conversion specificers
247-- is allowed: %d %u %x %ld %lu %lx %lld %llu %llx %p %s
248builtins[print] = function (e, ret, fmt, a1, a2, a3)
249	-- Load format string and length
250	e.reg_alloc(e.V[e.tmpvar], 1)
251	e.reg_alloc(e.V[e.tmpvar+1], 1)
252	if type(e.V[fmt].const) == 'string' then
253		local src = e.V[fmt].const
254		local len = #src + 1
255		local dst = e.valloc(len, src)
256		-- TODO: this is materialize step
257		e.V[fmt].const = {__base=dst}
258		e.V[fmt].type = ffi.typeof('char ['..len..']')
259	elseif e.V[fmt].const.__base then -- luacheck: ignore
260		-- NOP
261	else error('NYI: print(fmt, ...) - format variable is not literal/stack memory') end
262	-- Prepare helper call
263	e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 10, 0, 0)
264	e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[fmt].const.__base)
265	e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 2, 0, 0, ffi.sizeof(e.V[fmt].type))
266	if a1 then
267		local args = {a1, a2, a3}
268		assert(#args <= 3, 'print(fmt, ...) - maximum of 3 arguments supported')
269		for i, arg in ipairs(args) do
270			e.vcopy(e.tmpvar, arg)  -- Copy variable
271			e.vreg(e.tmpvar, 3+i-1) -- Materialize it in arg register
272		end
273	end
274	-- Call helper
275	e.vset(ret)
276	e.vreg(ret, 0, true, ffi.typeof('int32_t')) -- Return is integer
277	e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.trace_printk)
278	e.V[e.tmpvar].reg = nil  -- Free temporary registers
279end
280
281-- Implements bpf_perf_event_output(ctx, map, flags, var, vlen) on perf event map
282local function perf_submit(e, dst, map_var, src)
283	-- Set R2 = map fd (indirect load)
284	local map = e.V[map_var].const
285	e.vcopy(e.tmpvar, map_var)
286	e.vreg(e.tmpvar, 2, true, ffi.typeof('uint64_t'))
287	e.LD_IMM_X(2, BPF.PSEUDO_MAP_FD, map.fd, ffi.sizeof('uint64_t'))
288	-- Set R1 = ctx
289	e.reg_alloc(e.tmpvar, 1) -- Spill anything in R1 (unnamed tmp variable)
290	e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 6, 0, 0) -- CTX is always in R6, copy
291	-- Set R3 = flags
292	e.vset(e.tmpvar, nil, 0) -- BPF_F_CURRENT_CPU
293	e.vreg(e.tmpvar, 3, false, ffi.typeof('uint64_t'))
294	-- Set R4 = pointer to src on stack
295	assert(e.V[src].const.__base, 'NYI: submit(map, var) - variable is not on stack')
296	e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 4, 10, 0, 0)
297	e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 4, 0, 0, -e.V[src].const.__base)
298	-- Set R5 = src length
299	e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 5, 0, 0, ffi.sizeof(e.V[src].type))
300	-- Set R0 = ret and call
301	e.vset(dst)
302	e.vreg(dst, 0, true, ffi.typeof('int32_t')) -- Return is integer
303	e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.perf_event_output)
304	e.V[e.tmpvar].reg = nil  -- Free temporary registers
305end
306
307-- Implements bpf_skb_load_bytes(ctx, off, var, vlen) on skb->data
308local function load_bytes(e, dst, off, var)
309	-- Set R2 = offset
310	e.vset(e.tmpvar, nil, off)
311	e.vreg(e.tmpvar, 2, false, ffi.typeof('uint64_t'))
312	-- Set R1 = ctx
313	e.reg_alloc(e.tmpvar, 1) -- Spill anything in R1 (unnamed tmp variable)
314	e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 6, 0, 0) -- CTX is always in R6, copy
315	-- Set R3 = pointer to var on stack
316	assert(e.V[var].const.__base, 'NYI: load_bytes(off, var, len) - variable is not on stack')
317	e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 3, 10, 0, 0)
318	e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 3, 0, 0, -e.V[var].const.__base)
319	-- Set R4 = var length
320	e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 4, 0, 0, ffi.sizeof(e.V[var].type))
321	-- Set R0 = ret and call
322	e.vset(dst)
323	e.vreg(dst, 0, true, ffi.typeof('int32_t')) -- Return is integer
324	e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.skb_load_bytes)
325	e.V[e.tmpvar].reg = nil  -- Free temporary registers
326end
327
328-- Implements bpf_get_stack_id()
329local function stack_id(e, ret, map_var, key)
330	-- Set R2 = map fd (indirect load)
331	local map = e.V[map_var].const
332	e.vcopy(e.tmpvar, map_var)
333	e.vreg(e.tmpvar, 2, true, ffi.typeof('uint64_t'))
334	e.LD_IMM_X(2, BPF.PSEUDO_MAP_FD, map.fd, ffi.sizeof('uint64_t'))
335	-- Set R1 = ctx
336	e.reg_alloc(e.tmpvar, 1) -- Spill anything in R1 (unnamed tmp variable)
337	e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 6, 0, 0) -- CTX is always in R6, copy
338	-- Load flags in R2 (immediate value or key)
339	local imm = e.V[key].const
340	assert(tonumber(imm), 'NYI: stack_id(map, var), var must be constant number')
341	e.reg_alloc(e.tmpvar, 3) -- Spill anything in R2 (unnamed tmp variable)
342	e.LD_IMM_X(3, 0, imm, 8)
343	-- Return R0 as signed integer
344	e.vset(ret)
345	e.vreg(ret, 0, true, ffi.typeof('int32_t'))
346	e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.get_stackid)
347	e.V[e.tmpvar].reg = nil  -- Free temporary registers
348end
349
350-- table.insert(table, value) keeps semantics with the exception of BPF maps
351-- map `perf_event` -> submit inserted value
352builtins[table.insert] = function (e, dst, map_var, value)
353	assert(e.V[map_var].const.__map, 'NYI: table.insert() supported only on BPF maps')
354	return perf_submit(e, dst, map_var, value)
355end
356
357-- bpf_get_current_comm(buffer) - write current process name to byte buffer
358local function comm() error('NYI') end
359builtins[comm] = function (e, ret, dst)
360	-- Set R1 = buffer
361	assert(e.V[dst].const.__base, 'NYI: comm(buffer) - buffer variable is not on stack')
362	e.reg_alloc(e.tmpvar, 1) -- Spill
363	e.emit(BPF.ALU64 + BPF.MOV + BPF.X, 1, 10, 0, 0)
364	e.emit(BPF.ALU64 + BPF.ADD + BPF.K, 1, 0, 0, -e.V[dst].const.__base)
365	-- Set R2 = length
366	e.reg_alloc(e.tmpvar, 2) -- Spill
367	e.emit(BPF.ALU64 + BPF.MOV + BPF.K, 2, 0, 0, ffi.sizeof(e.V[dst].type))
368	-- Return is integer
369	e.vset(ret)
370	e.vreg(ret, 0, true, ffi.typeof('int32_t'))
371	e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, HELPER.get_current_comm)
372	e.V[e.tmpvar].reg = nil  -- Free temporary registers
373end
374
375-- Math library built-ins
376math.log2 = function () error('NYI') end
377builtins[math.log2] = function (e, dst, x)
378	-- Classic integer bits subdivison algorithm to find the position
379	-- of the highest bit set, adapted for BPF bytecode-friendly operations.
380	-- https://graphics.stanford.edu/~seander/bithacks.html
381	-- r = 0
382	local r = e.vreg(dst, nil, true)
383	e.emit(BPF.ALU64 + BPF.MOV + BPF.K, r, 0, 0, 0)
384	-- v = x
385	e.vcopy(e.tmpvar, x)
386	local v = e.vreg(e.tmpvar, 2)
387	if cdef.isptr(e.V[x].const) then -- No pointer arithmetics, dereference
388		e.vderef(v, v, {const = {__dissector=ffi.typeof('uint64_t')}})
389	end
390	-- Invert value to invert all tests, otherwise we would need and+jnz
391	e.emit(BPF.ALU64 + BPF.NEG + BPF.K, v, 0, 0, 0)        -- v = ~v
392	-- Unrolled test cases, converted masking to arithmetic as we don't have "if !(a & b)"
393	-- As we're testing inverted value, we have to use arithmetic shift to copy MSB
394	for i=4,0,-1 do
395		local k = bit.lshift(1, i)
396		e.emit(BPF.JMP + BPF.JGT + BPF.K, v, 0, 2, bit.bnot(bit.lshift(1, k))) -- if !upper_half(x)
397		e.emit(BPF.ALU64 + BPF.ARSH + BPF.K, v, 0, 0, k)                       --     v >>= k
398		e.emit(BPF.ALU64 + BPF.OR + BPF.K, r, 0, 0, k)                         --     r |= k
399	end
400	-- No longer constant, cleanup tmpvars
401	e.V[dst].const = nil
402	e.V[e.tmpvar].reg = nil
403end
404builtins[math.log10] = function (e, dst, x)
405	-- Compute log2(x) and transform
406	builtins[math.log2](e, dst, x)
407	-- Relationship: log10(v) = log2(v) / log2(10)
408	local r = e.V[dst].reg
409	e.emit(BPF.ALU64 + BPF.ADD + BPF.K, r, 0, 0, 1)    -- Compensate round-down
410	e.emit(BPF.ALU64 + BPF.MUL + BPF.K, r, 0, 0, 1233) -- log2(10) ~ 1233>>12
411	e.emit(BPF.ALU64 + BPF.RSH + BPF.K, r, 0, 0, 12)
412end
413builtins[math.log] = function (e, dst, x)
414	-- Compute log2(x) and transform
415	builtins[math.log2](e, dst, x)
416	-- Relationship: ln(v) = log2(v) / log2(e)
417	local r = e.V[dst].reg
418	e.emit(BPF.ALU64 + BPF.ADD + BPF.K, r, 0, 0, 1)    -- Compensate round-down
419	e.emit(BPF.ALU64 + BPF.MUL + BPF.K, r, 0, 0, 2839) -- log2(e) ~ 2839>>12
420	e.emit(BPF.ALU64 + BPF.RSH + BPF.K, r, 0, 0, 12)
421end
422
423-- Call-type helpers
424local function call_helper(e, dst, h, vtype)
425	e.vset(dst)
426	e.vreg(dst, 0, true, vtype or ffi.typeof('uint64_t'))
427	e.emit(BPF.JMP + BPF.CALL, 0, 0, 0, h)
428	e.V[dst].const = nil -- Target is not a function anymore
429end
430local function cpu() error('NYI') end
431local function rand() error('NYI') end
432local function time() error('NYI') end
433local function pid_tgid() error('NYI') end
434local function uid_gid() error('NYI') end
435
436-- Export helpers and builtin variants
437builtins.cpu = cpu
438builtins.time = time
439builtins.pid_tgid = pid_tgid
440builtins.uid_gid = uid_gid
441builtins.comm = comm
442builtins.perf_submit = perf_submit
443builtins.stack_id = stack_id
444builtins.load_bytes = load_bytes
445builtins[cpu] = function (e, dst) return call_helper(e, dst, HELPER.get_smp_processor_id) end
446builtins[rand] = function (e, dst) return call_helper(e, dst, HELPER.get_prandom_u32, ffi.typeof('uint32_t')) end
447builtins[time] = function (e, dst) return call_helper(e, dst, HELPER.ktime_get_ns) end
448builtins[pid_tgid] = function (e, dst) return call_helper(e, dst, HELPER.get_current_pid_tgid) end
449builtins[uid_gid] = function (e, dst) return call_helper(e, dst, HELPER.get_current_uid_gid) end
450builtins[perf_submit] = function (e, dst, map, value) return perf_submit(e, dst, map, value) end
451builtins[stack_id] = function (e, dst, map, key) return stack_id(e, dst, map, key) end
452builtins[load_bytes] = function (e, dst, off, var, len) return load_bytes(e, dst, off, var, len) end
453
454return builtins
455