xref: /aosp_15_r20/external/linux-kselftest/tools/testing/selftests/bpf/verifier/spill_fill.c (revision 053f45be4e351dfd5e965df293cd45b779f579ee)
1 {
2 	"check valid spill/fill",
3 	.insns = {
4 	/* spill R1(ctx) into stack */
5 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
6 	/* fill it back into R2 */
7 	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
8 	/* should be able to access R0 = *(R2 + 8) */
9 	/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
10 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11 	BPF_EXIT_INSN(),
12 	},
13 	.errstr_unpriv = "R0 leaks addr",
14 	.result = ACCEPT,
15 	.result_unpriv = REJECT,
16 	.retval = POINTER_VALUE,
17 },
18 {
19 	"check valid spill/fill, skb mark",
20 	.insns = {
21 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
22 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
23 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
24 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
25 		    offsetof(struct __sk_buff, mark)),
26 	BPF_EXIT_INSN(),
27 	},
28 	.result = ACCEPT,
29 	.result_unpriv = ACCEPT,
30 },
31 {
32 	"check valid spill/fill, ptr to mem",
33 	.insns = {
34 	/* reserve 8 byte ringbuf memory */
35 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
36 	BPF_LD_MAP_FD(BPF_REG_1, 0),
37 	BPF_MOV64_IMM(BPF_REG_2, 8),
38 	BPF_MOV64_IMM(BPF_REG_3, 0),
39 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
40 	/* store a pointer to the reserved memory in R6 */
41 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
42 	/* check whether the reservation was successful */
43 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
44 	/* spill R6(mem) into the stack */
45 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
46 	/* fill it back in R7 */
47 	BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
48 	/* should be able to access *(R7) = 0 */
49 	BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
50 	/* submit the reserved ringbuf memory */
51 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
52 	BPF_MOV64_IMM(BPF_REG_2, 0),
53 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
54 	BPF_MOV64_IMM(BPF_REG_0, 0),
55 	BPF_EXIT_INSN(),
56 	},
57 	.fixup_map_ringbuf = { 1 },
58 	.result = ACCEPT,
59 	.result_unpriv = ACCEPT,
60 },
61 {
62 	"check with invalid reg offset 0",
63 	.insns = {
64 	/* reserve 8 byte ringbuf memory */
65 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
66 	BPF_LD_MAP_FD(BPF_REG_1, 0),
67 	BPF_MOV64_IMM(BPF_REG_2, 8),
68 	BPF_MOV64_IMM(BPF_REG_3, 0),
69 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
70 	/* store a pointer to the reserved memory in R6 */
71 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
72 	/* add invalid offset to memory or NULL */
73 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
74 	/* check whether the reservation was successful */
75 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
76 	/* should not be able to access *(R7) = 0 */
77 	BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0),
78 	/* submit the reserved ringbuf memory */
79 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
80 	BPF_MOV64_IMM(BPF_REG_2, 0),
81 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
82 	BPF_MOV64_IMM(BPF_REG_0, 0),
83 	BPF_EXIT_INSN(),
84 	},
85 	.fixup_map_ringbuf = { 1 },
86 	.result = REJECT,
87 	.errstr = "R0 pointer arithmetic on alloc_mem_or_null prohibited",
88 },
89 {
90 	"check corrupted spill/fill",
91 	.insns = {
92 	/* spill R1(ctx) into stack */
93 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
94 	/* mess up with R1 pointer on stack */
95 	BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
96 	/* fill back into R0 is fine for priv.
97 	 * R0 now becomes SCALAR_VALUE.
98 	 */
99 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
100 	/* Load from R0 should fail. */
101 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
102 	BPF_EXIT_INSN(),
103 	},
104 	.errstr_unpriv = "attempt to corrupt spilled",
105 	.errstr = "R0 invalid mem access 'scalar'",
106 	.result = REJECT,
107 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
108 },
109 {
110 	"check corrupted spill/fill, LSB",
111 	.insns = {
112 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
113 	BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
114 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
115 	BPF_EXIT_INSN(),
116 	},
117 	.errstr_unpriv = "attempt to corrupt spilled",
118 	.result_unpriv = REJECT,
119 	.result = ACCEPT,
120 	.retval = POINTER_VALUE,
121 },
122 {
123 	"check corrupted spill/fill, MSB",
124 	.insns = {
125 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
126 	BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
127 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
128 	BPF_EXIT_INSN(),
129 	},
130 	.errstr_unpriv = "attempt to corrupt spilled",
131 	.result_unpriv = REJECT,
132 	.result = ACCEPT,
133 	.retval = POINTER_VALUE,
134 },
135 {
136 	"Spill and refill a u32 const scalar.  Offset to skb->data",
137 	.insns = {
138 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
139 		    offsetof(struct __sk_buff, data)),
140 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
141 		    offsetof(struct __sk_buff, data_end)),
142 	/* r4 = 20 */
143 	BPF_MOV32_IMM(BPF_REG_4, 20),
144 	/* *(u32 *)(r10 -8) = r4 */
145 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
146 	/* r4 = *(u32 *)(r10 -8) */
147 	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
148 	/* r0 = r2 */
149 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
150 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */
151 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
152 	/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */
153 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
154 	/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */
155 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
156 	BPF_MOV64_IMM(BPF_REG_0, 0),
157 	BPF_EXIT_INSN(),
158 	},
159 	.result = ACCEPT,
160 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
161 },
162 {
163 	"Spill a u32 const, refill from another half of the uninit u32 from the stack",
164 	.insns = {
165 	/* r4 = 20 */
166 	BPF_MOV32_IMM(BPF_REG_4, 20),
167 	/* *(u32 *)(r10 -8) = r4 */
168 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
169 	/* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/
170 	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
171 	BPF_MOV64_IMM(BPF_REG_0, 0),
172 	BPF_EXIT_INSN(),
173 	},
174 	.result = REJECT,
175 	.errstr = "invalid read from stack off -4+0 size 4",
176 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
177 },
178 {
179 	"Spill a u32 const scalar.  Refill as u16.  Offset to skb->data",
180 	.insns = {
181 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
182 		    offsetof(struct __sk_buff, data)),
183 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
184 		    offsetof(struct __sk_buff, data_end)),
185 	/* r4 = 20 */
186 	BPF_MOV32_IMM(BPF_REG_4, 20),
187 	/* *(u32 *)(r10 -8) = r4 */
188 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
189 	/* r4 = *(u16 *)(r10 -8) */
190 	BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
191 	/* r0 = r2 */
192 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
193 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
194 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
195 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
196 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
197 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
198 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
199 	BPF_MOV64_IMM(BPF_REG_0, 0),
200 	BPF_EXIT_INSN(),
201 	},
202 	.result = REJECT,
203 	.errstr = "invalid access to packet",
204 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
205 },
206 {
207 	"Spill u32 const scalars.  Refill as u64.  Offset to skb->data",
208 	.insns = {
209 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
210 		    offsetof(struct __sk_buff, data)),
211 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
212 		    offsetof(struct __sk_buff, data_end)),
213 	/* r6 = 0 */
214 	BPF_MOV32_IMM(BPF_REG_6, 0),
215 	/* r7 = 20 */
216 	BPF_MOV32_IMM(BPF_REG_7, 20),
217 	/* *(u32 *)(r10 -4) = r6 */
218 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
219 	/* *(u32 *)(r10 -8) = r7 */
220 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
221 	/* r4 = *(u64 *)(r10 -8) */
222 	BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
223 	/* r0 = r2 */
224 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
225 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
226 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
227 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
228 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
229 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
230 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
231 	BPF_MOV64_IMM(BPF_REG_0, 0),
232 	BPF_EXIT_INSN(),
233 	},
234 	.result = REJECT,
235 	.errstr = "invalid access to packet",
236 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
237 },
238 {
239 	"Spill a u32 const scalar.  Refill as u16 from fp-6.  Offset to skb->data",
240 	.insns = {
241 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
242 		    offsetof(struct __sk_buff, data)),
243 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
244 		    offsetof(struct __sk_buff, data_end)),
245 	/* r4 = 20 */
246 	BPF_MOV32_IMM(BPF_REG_4, 20),
247 	/* *(u32 *)(r10 -8) = r4 */
248 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
249 	/* r4 = *(u16 *)(r10 -6) */
250 	BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
251 	/* r0 = r2 */
252 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
253 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
254 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
255 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
256 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
257 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
258 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
259 	BPF_MOV64_IMM(BPF_REG_0, 0),
260 	BPF_EXIT_INSN(),
261 	},
262 	.result = REJECT,
263 	.errstr = "invalid access to packet",
264 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
265 },
266 {
267 	"Spill and refill a u32 const scalar at non 8byte aligned stack addr.  Offset to skb->data",
268 	.insns = {
269 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
270 		    offsetof(struct __sk_buff, data)),
271 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
272 		    offsetof(struct __sk_buff, data_end)),
273 	/* r4 = 20 */
274 	BPF_MOV32_IMM(BPF_REG_4, 20),
275 	/* *(u32 *)(r10 -8) = r4 */
276 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
277 	/* *(u32 *)(r10 -4) = r4 */
278 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
279 	/* r4 = *(u32 *)(r10 -4),  */
280 	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
281 	/* r0 = r2 */
282 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
283 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */
284 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
285 	/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
286 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
287 	/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
288 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
289 	BPF_MOV64_IMM(BPF_REG_0, 0),
290 	BPF_EXIT_INSN(),
291 	},
292 	.result = REJECT,
293 	.errstr = "invalid access to packet",
294 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
295 },
296 {
297 	"Spill and refill a umax=40 bounded scalar.  Offset to skb->data",
298 	.insns = {
299 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
300 		    offsetof(struct __sk_buff, data)),
301 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
302 		    offsetof(struct __sk_buff, data_end)),
303 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1,
304 		    offsetof(struct __sk_buff, tstamp)),
305 	BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
306 	BPF_MOV64_IMM(BPF_REG_0, 0),
307 	BPF_EXIT_INSN(),
308 	/* *(u32 *)(r10 -8) = r4 R4=umax=40 */
309 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
310 	/* r4 = (*u32 *)(r10 - 8) */
311 	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
312 	/* r2 += r4 R2=pkt R4=umax=40 */
313 	BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
314 	/* r0 = r2 R2=pkt,umax=40 R4=umax=40 */
315 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
316 	/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
317 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
318 	/* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */
319 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1),
320 	/* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */
321 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
322 	BPF_MOV64_IMM(BPF_REG_0, 0),
323 	BPF_EXIT_INSN(),
324 	},
325 	.result = ACCEPT,
326 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 },
328 {
329 	"Spill a u32 scalar at fp-4 and then at fp-8",
330 	.insns = {
331 	/* r4 = 4321 */
332 	BPF_MOV32_IMM(BPF_REG_4, 4321),
333 	/* *(u32 *)(r10 -4) = r4 */
334 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
335 	/* *(u32 *)(r10 -8) = r4 */
336 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
337 	/* r4 = *(u64 *)(r10 -8) */
338 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
339 	BPF_MOV64_IMM(BPF_REG_0, 0),
340 	BPF_EXIT_INSN(),
341 	},
342 	.result = ACCEPT,
343 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
344 },
345