xref: /aosp_15_r20/external/kernel-headers/original/uapi/linux/android/binder.h (revision f80ad8b4341604f5951dab671d41019a6d7087ce)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  * Copyright (C) 2008 Google, Inc.
4  *
5  * Based on, but no longer compatible with, the original
6  * OpenBinder.org binder driver interface, which is:
7  *
8  * Copyright (c) 2005 Palmsource, Inc.
9  *
10  * This software is licensed under the terms of the GNU General Public
11  * License version 2, as published by the Free Software Foundation, and
12  * may be copied, distributed, and modified under those terms.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  */
20 
21 #ifndef _UAPI_LINUX_BINDER_H
22 #define _UAPI_LINUX_BINDER_H
23 
24 #include <linux/types.h>
25 #include <linux/ioctl.h>
26 
27 #define B_PACK_CHARS(c1, c2, c3, c4) \
28 	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
29 #define B_TYPE_LARGE 0x85
30 
31 enum {
32 	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
33 	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
34 	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
35 	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
36 	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
37 	BINDER_TYPE_FDA		= B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
38 	BINDER_TYPE_PTR		= B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
39 };
40 
41 /**
42  * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
43  * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
44  *
45  */
46 enum flat_binder_object_shifts {
47 	FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
48 };
49 
50 /**
51  * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
52  */
53 enum flat_binder_object_flags {
54 	/**
55 	 * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
56 	 *
57 	 * These bits can be used to set the minimum scheduler priority
58 	 * at which transactions into this node should run. Valid values
59 	 * in these bits depend on the scheduler policy encoded in
60 	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
61 	 *
62 	 * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
63 	 * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
64 	 */
65 	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
66 	/**
67 	 * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
68 	 */
69 	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
70 
71 	/**
72 	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
73 	 *
74 	 * These two bits can be used to set the min scheduling policy at which
75 	 * transactions on this node should run. These match the UAPI
76 	 * scheduler policy values, eg:
77 	 * 00b: SCHED_NORMAL
78 	 * 01b: SCHED_FIFO
79 	 * 10b: SCHED_RR
80 	 * 11b: SCHED_BATCH
81 	 */
82 	FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
83 		3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
84 
85 	/**
86 	 * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
87 	 *
88 	 * Only when set, calls into this node will inherit a real-time
89 	 * scheduling policy from the caller (for synchronous transactions).
90 	 */
91 	FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
92 
93 	/**
94 	 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
95 	 *
96 	 * Only when set, causes senders to include their security
97 	 * context
98 	 */
99 	FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
100 };
101 
102 #ifdef BINDER_IPC_32BIT
103 typedef __u32 binder_size_t;
104 typedef __u32 binder_uintptr_t;
105 #else
106 typedef __u64 binder_size_t;
107 typedef __u64 binder_uintptr_t;
108 #endif
109 
110 /**
111  * struct binder_object_header - header shared by all binder metadata objects.
112  * @type:	type of the object
113  */
114 struct binder_object_header {
115 	__u32        type;
116 };
117 
118 /*
119  * This is the flattened representation of a Binder object for transfer
120  * between processes.  The 'offsets' supplied as part of a binder transaction
121  * contains offsets into the data where these structures occur.  The Binder
122  * driver takes care of re-writing the structure type and data as it moves
123  * between processes.
124  */
125 struct flat_binder_object {
126 	struct binder_object_header	hdr;
127 	__u32				flags;
128 
129 	/* 8 bytes of data. */
130 	union {
131 		binder_uintptr_t	binder;	/* local object */
132 		__u32			handle;	/* remote object */
133 	};
134 
135 	/* extra data associated with local object */
136 	binder_uintptr_t	cookie;
137 };
138 
139 /**
140  * struct binder_fd_object - describes a filedescriptor to be fixed up.
141  * @hdr:	common header structure
142  * @pad_flags:	padding to remain compatible with old userspace code
143  * @pad_binder:	padding to remain compatible with old userspace code
144  * @fd:		file descriptor
145  * @cookie:	opaque data, used by user-space
146  */
147 struct binder_fd_object {
148 	struct binder_object_header	hdr;
149 	__u32				pad_flags;
150 	union {
151 		binder_uintptr_t	pad_binder;
152 		__u32			fd;
153 	};
154 
155 	binder_uintptr_t		cookie;
156 };
157 
158 /* struct binder_buffer_object - object describing a userspace buffer
159  * @hdr:		common header structure
160  * @flags:		one or more BINDER_BUFFER_* flags
161  * @buffer:		address of the buffer
162  * @length:		length of the buffer
163  * @parent:		index in offset array pointing to parent buffer
164  * @parent_offset:	offset in @parent pointing to this buffer
165  *
166  * A binder_buffer object represents an object that the
167  * binder kernel driver can copy verbatim to the target
168  * address space. A buffer itself may be pointed to from
169  * within another buffer, meaning that the pointer inside
170  * that other buffer needs to be fixed up as well. This
171  * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
172  * flag in @flags, by setting @parent buffer to the index
173  * in the offset array pointing to the parent binder_buffer_object,
174  * and by setting @parent_offset to the offset in the parent buffer
175  * at which the pointer to this buffer is located.
176  */
177 struct binder_buffer_object {
178 	struct binder_object_header	hdr;
179 	__u32				flags;
180 	binder_uintptr_t		buffer;
181 	binder_size_t			length;
182 	binder_size_t			parent;
183 	binder_size_t			parent_offset;
184 };
185 
186 enum {
187 	BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
188 };
189 
190 /* struct binder_fd_array_object - object describing an array of fds in a buffer
191  * @hdr:		common header structure
192  * @pad:		padding to ensure correct alignment
193  * @num_fds:		number of file descriptors in the buffer
194  * @parent:		index in offset array to buffer holding the fd array
195  * @parent_offset:	start offset of fd array in the buffer
196  *
197  * A binder_fd_array object represents an array of file
198  * descriptors embedded in a binder_buffer_object. It is
199  * different from a regular binder_buffer_object because it
200  * describes a list of file descriptors to fix up, not an opaque
201  * blob of memory, and hence the kernel needs to treat it differently.
202  *
203  * An example of how this would be used is with Android's
204  * native_handle_t object, which is a struct with a list of integers
205  * and a list of file descriptors. The native_handle_t struct itself
206  * will be represented by a struct binder_buffer_objct, whereas the
207  * embedded list of file descriptors is represented by a
208  * struct binder_fd_array_object with that binder_buffer_object as
209  * a parent.
210  */
211 struct binder_fd_array_object {
212 	struct binder_object_header	hdr;
213 	__u32				pad;
214 	binder_size_t			num_fds;
215 	binder_size_t			parent;
216 	binder_size_t			parent_offset;
217 };
218 
219 /*
220  * On 64-bit platforms where user code may run in 32-bits the driver must
221  * translate the buffer (and local binder) addresses appropriately.
222  */
223 
224 struct binder_write_read {
225 	binder_size_t		write_size;	/* bytes to write */
226 	binder_size_t		write_consumed;	/* bytes consumed by driver */
227 	binder_uintptr_t	write_buffer;
228 	binder_size_t		read_size;	/* bytes to read */
229 	binder_size_t		read_consumed;	/* bytes consumed by driver */
230 	binder_uintptr_t	read_buffer;
231 };
232 
233 /* Use with BINDER_VERSION, driver fills in fields. */
234 struct binder_version {
235 	/* driver protocol version -- increment with incompatible change */
236 	__s32       protocol_version;
237 };
238 
239 /* This is the current protocol version. */
240 #ifdef BINDER_IPC_32BIT
241 #define BINDER_CURRENT_PROTOCOL_VERSION 7
242 #else
243 #define BINDER_CURRENT_PROTOCOL_VERSION 8
244 #endif
245 
246 /*
247  * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
248  * Set ptr to NULL for the first call to get the info for the first node, and
249  * then repeat the call passing the previously returned value to get the next
250  * nodes.  ptr will be 0 when there are no more nodes.
251  */
252 struct binder_node_debug_info {
253 	binder_uintptr_t ptr;
254 	binder_uintptr_t cookie;
255 	__u32            has_strong_ref;
256 	__u32            has_weak_ref;
257 };
258 
259 struct binder_node_info_for_ref {
260 	__u32            handle;
261 	__u32            strong_count;
262 	__u32            weak_count;
263 	__u32            reserved1;
264 	__u32            reserved2;
265 	__u32            reserved3;
266 };
267 
268 struct binder_freeze_info {
269 	__u32            pid;
270 	__u32            enable;
271 	__u32            timeout_ms;
272 };
273 
274 struct binder_frozen_status_info {
275 	__u32            pid;
276 
277 	/* process received sync transactions since last frozen
278 	 * bit 0: received sync transaction after being frozen
279 	 * bit 1: new pending sync transaction during freezing
280 	 */
281 	__u32            sync_recv;
282 
283 	/* process received async transactions since last frozen */
284 	__u32            async_recv;
285 };
286 
287 /* struct binder_extened_error - extended error information
288  * @id:		identifier for the failed operation
289  * @command:	command as defined by binder_driver_return_protocol
290  * @param:	parameter holding a negative errno value
291  *
292  * Used with BINDER_GET_EXTENDED_ERROR. This extends the error information
293  * returned by the driver upon a failed operation. Userspace can pull this
294  * data to properly handle specific error scenarios.
295  */
296 struct binder_extended_error {
297 	__u32	id;
298 	__u32	command;
299 	__s32	param;
300 };
301 
302 enum {
303 	BINDER_WRITE_READ		= _IOWR('b', 1, struct binder_write_read),
304 	BINDER_SET_IDLE_TIMEOUT		= _IOW('b', 3, __s64),
305 	BINDER_SET_MAX_THREADS		= _IOW('b', 5, __u32),
306 	BINDER_SET_IDLE_PRIORITY	= _IOW('b', 6, __s32),
307 	BINDER_SET_CONTEXT_MGR		= _IOW('b', 7, __s32),
308 	BINDER_THREAD_EXIT		= _IOW('b', 8, __s32),
309 	BINDER_VERSION			= _IOWR('b', 9, struct binder_version),
310 	BINDER_GET_NODE_DEBUG_INFO	= _IOWR('b', 11, struct binder_node_debug_info),
311 	BINDER_GET_NODE_INFO_FOR_REF	= _IOWR('b', 12, struct binder_node_info_for_ref),
312 	BINDER_SET_CONTEXT_MGR_EXT	= _IOW('b', 13, struct flat_binder_object),
313 	BINDER_FREEZE			= _IOW('b', 14, struct binder_freeze_info),
314 	BINDER_GET_FROZEN_INFO		= _IOWR('b', 15, struct binder_frozen_status_info),
315 	BINDER_ENABLE_ONEWAY_SPAM_DETECTION	= _IOW('b', 16, __u32),
316 	BINDER_GET_EXTENDED_ERROR	= _IOWR('b', 17, struct binder_extended_error),
317 };
318 
319 /*
320  * NOTE: Two special error codes you should check for when calling
321  * in to the driver are:
322  *
323  * EINTR -- The operation has been interupted.  This should be
324  * handled by retrying the ioctl() until a different error code
325  * is returned.
326  *
327  * ECONNREFUSED -- The driver is no longer accepting operations
328  * from your process.  That is, the process is being destroyed.
329  * You should handle this by exiting from your process.  Note
330  * that once this error code is returned, all further calls to
331  * the driver from any thread will return this same code.
332  */
333 
334 enum transaction_flags {
335 	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */
336 	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */
337 	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */
338 	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */
339 	TF_CLEAR_BUF	= 0x20,	/* clear buffer on txn complete */
340 	TF_UPDATE_TXN	= 0x40,	/* update the outdated pending async txn */
341 };
342 
343 struct binder_transaction_data {
344 	/* The first two are only used for bcTRANSACTION and brTRANSACTION,
345 	 * identifying the target and contents of the transaction.
346 	 */
347 	union {
348 		/* target descriptor of command transaction */
349 		__u32	handle;
350 		/* target descriptor of return transaction */
351 		binder_uintptr_t ptr;
352 	} target;
353 	binder_uintptr_t	cookie;	/* target object cookie */
354 	__u32		code;		/* transaction command */
355 
356 	/* General information about the transaction. */
357 	__u32	        flags;
358 	__kernel_pid_t	sender_pid;
359 	__kernel_uid32_t	sender_euid;
360 	binder_size_t	data_size;	/* number of bytes of data */
361 	binder_size_t	offsets_size;	/* number of bytes of offsets */
362 
363 	/* If this transaction is inline, the data immediately
364 	 * follows here; otherwise, it ends with a pointer to
365 	 * the data buffer.
366 	 */
367 	union {
368 		struct {
369 			/* transaction data */
370 			binder_uintptr_t	buffer;
371 			/* offsets from buffer to flat_binder_object structs */
372 			binder_uintptr_t	offsets;
373 		} ptr;
374 		__u8	buf[8];
375 	} data;
376 };
377 
378 struct binder_transaction_data_secctx {
379 	struct binder_transaction_data transaction_data;
380 	binder_uintptr_t secctx;
381 };
382 
383 struct binder_transaction_data_sg {
384 	struct binder_transaction_data transaction_data;
385 	binder_size_t buffers_size;
386 };
387 
388 struct binder_ptr_cookie {
389 	binder_uintptr_t ptr;
390 	binder_uintptr_t cookie;
391 };
392 
393 struct binder_handle_cookie {
394 	__u32 handle;
395 	binder_uintptr_t cookie;
396 } __packed;
397 
398 struct binder_pri_desc {
399 	__s32 priority;
400 	__u32 desc;
401 };
402 
403 struct binder_pri_ptr_cookie {
404 	__s32 priority;
405 	binder_uintptr_t ptr;
406 	binder_uintptr_t cookie;
407 };
408 
409 enum binder_driver_return_protocol {
410 	BR_ERROR = _IOR('r', 0, __s32),
411 	/*
412 	 * int: error code
413 	 */
414 
415 	BR_OK = _IO('r', 1),
416 	/* No parameters! */
417 
418 	BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
419 				      struct binder_transaction_data_secctx),
420 	/*
421 	 * binder_transaction_data_secctx: the received command.
422 	 */
423 	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
424 	BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
425 	/*
426 	 * binder_transaction_data: the received command.
427 	 */
428 
429 	BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
430 	/*
431 	 * not currently supported
432 	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
433 	 * Else the remote object has acquired a primary reference.
434 	 */
435 
436 	BR_DEAD_REPLY = _IO('r', 5),
437 	/*
438 	 * The target of the last transaction (either a bcTRANSACTION or
439 	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
440 	 */
441 
442 	BR_TRANSACTION_COMPLETE = _IO('r', 6),
443 	/*
444 	 * No parameters... always refers to the last transaction requested
445 	 * (including replies).  Note that this will be sent even for
446 	 * asynchronous transactions.
447 	 */
448 
449 	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
450 	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
451 	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
452 	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
453 	/*
454 	 * void *:	ptr to binder
455 	 * void *: cookie for binder
456 	 */
457 
458 	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
459 	/*
460 	 * not currently supported
461 	 * int:	priority
462 	 * void *: ptr to binder
463 	 * void *: cookie for binder
464 	 */
465 
466 	BR_NOOP = _IO('r', 12),
467 	/*
468 	 * No parameters.  Do nothing and examine the next command.  It exists
469 	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
470 	 */
471 
472 	BR_SPAWN_LOOPER = _IO('r', 13),
473 	/*
474 	 * No parameters.  The driver has determined that a process has no
475 	 * threads waiting to service incoming transactions.  When a process
476 	 * receives this command, it must spawn a new service thread and
477 	 * register it via bcENTER_LOOPER.
478 	 */
479 
480 	BR_FINISHED = _IO('r', 14),
481 	/*
482 	 * not currently supported
483 	 * stop threadpool thread
484 	 */
485 
486 	BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
487 	/*
488 	 * void *: cookie
489 	 */
490 	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
491 	/*
492 	 * void *: cookie
493 	 */
494 
495 	BR_FAILED_REPLY = _IO('r', 17),
496 	/*
497 	 * The last transaction (either a bcTRANSACTION or
498 	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
499 	 */
500 
501 	BR_FROZEN_REPLY = _IO('r', 18),
502 	/*
503 	 * The target of the last sync transaction (either a bcTRANSACTION or
504 	 * a bcATTEMPT_ACQUIRE) is frozen.  No parameters.
505 	 */
506 
507 	BR_ONEWAY_SPAM_SUSPECT = _IO('r', 19),
508 	/*
509 	 * Current process sent too many oneway calls to target, and the last
510 	 * asynchronous transaction makes the allocated async buffer size exceed
511 	 * detection threshold.  No parameters.
512 	 */
513 
514 	BR_TRANSACTION_PENDING_FROZEN = _IO('r', 20),
515 	/*
516 	 * The target of the last async transaction is frozen.  No parameters.
517 	 */
518 };
519 
520 enum binder_driver_command_protocol {
521 	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
522 	BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
523 	/*
524 	 * binder_transaction_data: the sent command.
525 	 */
526 
527 	BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
528 	/*
529 	 * not currently supported
530 	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
531 	 * Else you have acquired a primary reference on the object.
532 	 */
533 
534 	BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
535 	/*
536 	 * void *: ptr to transaction data received on a read
537 	 */
538 
539 	BC_INCREFS = _IOW('c', 4, __u32),
540 	BC_ACQUIRE = _IOW('c', 5, __u32),
541 	BC_RELEASE = _IOW('c', 6, __u32),
542 	BC_DECREFS = _IOW('c', 7, __u32),
543 	/*
544 	 * int:	descriptor
545 	 */
546 
547 	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
548 	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
549 	/*
550 	 * void *: ptr to binder
551 	 * void *: cookie for binder
552 	 */
553 
554 	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
555 	/*
556 	 * not currently supported
557 	 * int: priority
558 	 * int: descriptor
559 	 */
560 
561 	BC_REGISTER_LOOPER = _IO('c', 11),
562 	/*
563 	 * No parameters.
564 	 * Register a spawned looper thread with the device.
565 	 */
566 
567 	BC_ENTER_LOOPER = _IO('c', 12),
568 	BC_EXIT_LOOPER = _IO('c', 13),
569 	/*
570 	 * No parameters.
571 	 * These two commands are sent as an application-level thread
572 	 * enters and exits the binder loop, respectively.  They are
573 	 * used so the binder can have an accurate count of the number
574 	 * of looping threads it has available.
575 	 */
576 
577 	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
578 						struct binder_handle_cookie),
579 	/*
580 	 * int: handle
581 	 * void *: cookie
582 	 */
583 
584 	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
585 						struct binder_handle_cookie),
586 	/*
587 	 * int: handle
588 	 * void *: cookie
589 	 */
590 
591 	BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
592 	/*
593 	 * void *: cookie
594 	 */
595 
596 	BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
597 	BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
598 	/*
599 	 * binder_transaction_data_sg: the sent command.
600 	 */
601 };
602 
603 #endif /* _UAPI_LINUX_BINDER_H */
604 
605