#ifndef __VMLINUX_H__ #define __VMLINUX_H__ #ifndef BPF_NO_PRESERVE_ACCESS_INDEX #pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) #endif typedef struct { int counter; } atomic_t; struct jump_entry; struct static_key_mod; struct static_key { atomic_t enabled; union { unsigned long type; struct jump_entry *entries; struct static_key_mod *next; }; }; struct static_call_key; struct tracepoint_func; struct tracepoint { const char *name; struct static_key key; struct static_call_key *static_call_key; void *static_call_tramp; void *iterator; void *probestub; int (*regfunc)(); void (*unregfunc)(); struct tracepoint_func __attribute__((btf_type_tag("rcu"))) *funcs; }; typedef int __s32; typedef __s32 s32; struct jump_entry { s32 code; s32 target; long key; }; struct static_call_key { void *func; }; struct tracepoint_func { void *func; void *data; int prio; }; enum trace_reg { TRACE_REG_REGISTER = 0, TRACE_REG_UNREGISTER = 1, TRACE_REG_PERF_REGISTER = 2, TRACE_REG_PERF_UNREGISTER = 3, TRACE_REG_PERF_OPEN = 4, TRACE_REG_PERF_CLOSE = 5, TRACE_REG_PERF_ADD = 6, TRACE_REG_PERF_DEL = 7, }; struct list_head { struct list_head *next; struct list_head *prev; }; struct trace_event_call; struct trace_event_fields; struct trace_event_class { const char *system; void *probe; void *perf_probe; int (*reg)(struct trace_event_call *, enum trace_reg, void *); struct trace_event_fields *fields_array; struct list_head * (*get_fields)(struct trace_event_call *); struct list_head fields; int (*raw_init)(struct trace_event_call *); }; struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; }; struct trace_event_functions; struct trace_event { struct hlist_node node; int type; struct trace_event_functions *funcs; }; struct event_filter; struct hlist_head; struct bpf_prog_array; struct perf_event; struct trace_event_call { struct list_head list; struct trace_event_class *class; union { char *name; struct tracepoint *tp; }; struct trace_event event; char *print_fmt; struct event_filter *filter; union { void *module; atomic_t refcnt; }; void *data; int flags; int perf_refcount; struct hlist_head __attribute__((btf_type_tag("percpu"))) *perf_events; struct bpf_prog_array __attribute__((btf_type_tag("rcu"))) *prog_array; int (*perf_perm)(struct trace_event_call *, struct perf_event *); }; enum print_line_t { TRACE_TYPE_PARTIAL_LINE = 0, TRACE_TYPE_HANDLED = 1, TRACE_TYPE_UNHANDLED = 2, TRACE_TYPE_NO_CONSUME = 3, }; struct trace_iterator; typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *); struct trace_event_functions { trace_print_func trace; trace_print_func raw; trace_print_func hex; trace_print_func binary; }; typedef long long __s64; typedef __s64 s64; typedef struct { s64 counter; } atomic64_t; typedef atomic64_t atomic_long_t; typedef unsigned char __u8; typedef __u8 u8; typedef unsigned short __u16; typedef __u16 u16; struct qspinlock { union { atomic_t val; struct { u8 locked; u8 pending; }; struct { u16 locked_pending; u16 tail; }; }; }; typedef struct qspinlock arch_spinlock_t; struct raw_spinlock { arch_spinlock_t raw_lock; }; typedef struct raw_spinlock raw_spinlock_t; struct optimistic_spin_queue { atomic_t tail; }; typedef unsigned long long __u64; typedef __u64 u64; struct mutex { atomic_long_t owner; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct list_head wait_list; u64 android_oem_data1[2]; }; typedef unsigned long __kernel_ulong_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_size_t size_t; typedef long long __kernel_loff_t; typedef __kernel_loff_t loff_t; struct seq_buf { char *buffer; size_t size; size_t len; loff_t readpos; }; struct trace_seq { char buffer[4096]; struct seq_buf seq; int full; }; struct cpumask { unsigned long bits[1]; }; typedef struct cpumask cpumask_var_t[1]; typedef _Bool bool; struct trace_array; struct tracer; struct array_buffer; struct ring_buffer_iter; struct trace_entry; struct trace_iterator { struct trace_array *tr; struct tracer *trace; struct array_buffer *array_buffer; void *private; int cpu_file; struct mutex mutex; struct ring_buffer_iter **buffer_iter; unsigned long iter_flags; void *temp; unsigned int temp_size; char *fmt; unsigned int fmt_size; long wait_index; struct trace_seq tmp_seq; cpumask_var_t started; bool snapshot; struct trace_seq seq; struct trace_entry *ent; unsigned long lost_events; int leftover; int ent_size; int cpu; u64 ts; loff_t pos; long idx; }; struct trace_entry { unsigned short type; unsigned char flags; unsigned char preempt_count; int pid; }; struct hlist_head { struct hlist_node *first; }; struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); }; struct bpf_prog; struct bpf_cgroup_storage; struct bpf_prog_array_item { struct bpf_prog *prog; union { struct bpf_cgroup_storage *cgroup_storage[2]; u64 bpf_cookie; }; }; struct bpf_prog_array { struct callback_head rcu; struct bpf_prog_array_item items[0]; }; struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; }; enum perf_event_state { PERF_EVENT_STATE_DEAD = -4, PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, PERF_EVENT_STATE_ACTIVE = 1, }; typedef struct { atomic_long_t a; } local_t; typedef struct { local_t a; } local64_t; typedef unsigned int __u32; struct perf_event_attr { __u32 type; __u32 size; __u64 config; union { __u64 sample_period; __u64 sample_freq; }; __u64 sample_type; __u64 read_format; __u64 disabled: 1; __u64 inherit: 1; __u64 pinned: 1; __u64 exclusive: 1; __u64 exclude_user: 1; __u64 exclude_kernel: 1; __u64 exclude_hv: 1; __u64 exclude_idle: 1; __u64 mmap: 1; __u64 comm: 1; __u64 freq: 1; __u64 inherit_stat: 1; __u64 enable_on_exec: 1; __u64 task: 1; __u64 watermark: 1; __u64 precise_ip: 2; __u64 mmap_data: 1; __u64 sample_id_all: 1; __u64 exclude_host: 1; __u64 exclude_guest: 1; __u64 exclude_callchain_kernel: 1; __u64 exclude_callchain_user: 1; __u64 mmap2: 1; __u64 comm_exec: 1; __u64 use_clockid: 1; __u64 context_switch: 1; __u64 write_backward: 1; __u64 namespaces: 1; __u64 ksymbol: 1; __u64 bpf_event: 1; __u64 aux_output: 1; __u64 cgroup: 1; __u64 text_poke: 1; __u64 build_id: 1; __u64 inherit_thread: 1; __u64 remove_on_exec: 1; __u64 sigtrap: 1; __u64 __reserved_1: 26; union { __u32 wakeup_events; __u32 wakeup_watermark; }; __u32 bp_type; union { __u64 bp_addr; __u64 kprobe_func; __u64 uprobe_path; __u64 config1; }; union { __u64 bp_len; __u64 kprobe_addr; __u64 probe_offset; __u64 config2; }; __u64 branch_sample_type; __u64 sample_regs_user; __u32 sample_stack_user; __s32 clockid; __u64 sample_regs_intr; __u32 aux_watermark; __u16 sample_max_stack; __u16 __reserved_2; __u32 aux_sample_size; __u32 __reserved_3; __u64 sig_data; __u64 config3; }; struct hw_perf_event_extra { u64 config; unsigned int reg; int alloc; int idx; }; typedef s64 ktime_t; struct timerqueue_node { struct rb_node node; ktime_t expires; }; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1, }; struct hrtimer_clock_base; struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; u8 is_soft; u8 is_hard; u64 android_kabi_reserved1; }; typedef __u32 u32; struct arch_hw_breakpoint_ctrl { u32 __reserved: 19; u32 len: 8; u32 type: 2; u32 privilege: 2; u32 enabled: 1; }; struct arch_hw_breakpoint { u64 address; u64 trigger; struct arch_hw_breakpoint_ctrl ctrl; }; struct rhash_head { struct rhash_head __attribute__((btf_type_tag("rcu"))) *next; }; struct rhlist_head { struct rhash_head rhead; struct rhlist_head __attribute__((btf_type_tag("rcu"))) *next; }; struct task_struct; struct hw_perf_event { union { struct { u64 config; u64 last_tag; unsigned long config_base; unsigned long event_base; int event_base_rdpmc; int idx; int last_cpu; int flags; struct hw_perf_event_extra extra_reg; struct hw_perf_event_extra branch_reg; }; struct { struct hrtimer hrtimer; }; struct { struct list_head tp_list; }; struct { u64 pwr_acc; u64 ptsc; }; struct { struct arch_hw_breakpoint info; struct rhlist_head bp_list; }; struct { u8 iommu_bank; u8 iommu_cntr; u16 padding; u64 conf; u64 conf1; }; }; struct task_struct *target; void *addr_filters; unsigned long addr_filters_gen; int state; local64_t prev_count; u64 sample_period; union { struct { u64 last_period; local64_t period_left; }; struct { u64 saved_metric; u64 saved_slots; }; }; u64 interrupts_seq; u64 interrupts; u64 freq_time_stamp; u64 freq_count_stamp; }; struct spinlock { union { struct raw_spinlock rlock; }; }; typedef struct spinlock spinlock_t; struct wait_queue_head { spinlock_t lock; struct list_head head; }; typedef struct wait_queue_head wait_queue_head_t; struct llist_node { struct llist_node *next; }; struct __call_single_node { struct llist_node llist; union { unsigned int u_flags; atomic_t a_flags; }; u16 src; u16 dst; }; struct rcuwait { struct task_struct __attribute__((btf_type_tag("rcu"))) *task; }; struct irq_work { struct __call_single_node node; void (*func)(struct irq_work *); struct rcuwait irqwait; }; struct perf_addr_filters_head { struct list_head list; raw_spinlock_t lock; unsigned int nr_file_filters; }; struct perf_sample_data; struct pt_regs; typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *); struct pmu; struct perf_event_context; struct perf_event_pmu_context; struct perf_buffer; struct fasync_struct; struct perf_addr_filter_range; struct pid_namespace; struct perf_event { struct list_head event_entry; struct list_head sibling_list; struct list_head active_list; struct rb_node group_node; u64 group_index; struct list_head migrate_entry; struct hlist_node hlist_entry; struct list_head active_entry; int nr_siblings; int event_caps; int group_caps; unsigned int group_generation; struct perf_event *group_leader; struct pmu *pmu; void *pmu_private; enum perf_event_state state; unsigned int attach_state; local64_t count; atomic64_t child_count; u64 total_time_enabled; u64 total_time_running; u64 tstamp; struct perf_event_attr attr; u16 header_size; u16 id_header_size; u16 read_size; struct hw_perf_event hw; struct perf_event_context *ctx; struct perf_event_pmu_context *pmu_ctx; atomic_long_t refcount; atomic64_t child_total_time_enabled; atomic64_t child_total_time_running; struct mutex child_mutex; struct list_head child_list; struct perf_event *parent; int oncpu; int cpu; struct list_head owner_entry; struct task_struct *owner; struct mutex mmap_mutex; atomic_t mmap_count; struct perf_buffer *rb; struct list_head rb_entry; unsigned long rcu_batches; int rcu_pending; wait_queue_head_t waitq; struct fasync_struct *fasync; unsigned int pending_wakeup; unsigned int pending_kill; unsigned int pending_disable; unsigned int pending_sigtrap; unsigned long pending_addr; struct irq_work pending_irq; struct callback_head pending_task; unsigned int pending_work; atomic_t event_limit; struct perf_addr_filters_head addr_filters; struct perf_addr_filter_range *addr_filter_ranges; unsigned long addr_filters_gen; struct perf_event *aux_event; void (*destroy)(struct perf_event *); struct callback_head callback_head; struct pid_namespace *ns; u64 id; atomic64_t lost_samples; u64 (*clock)(); perf_overflow_handler_t overflow_handler; void *overflow_handler_context; perf_overflow_handler_t orig_overflow_handler; struct bpf_prog *prog; u64 bpf_cookie; struct trace_event_call *tp_event; struct event_filter *filter; void *security; struct list_head sb_list; __u32 orig_type; }; struct module; struct device; struct attribute_group; struct perf_cpu_pmu_context; struct mm_struct; struct kmem_cache; struct perf_output_handle; struct pmu { struct list_head entry; struct module *module; struct device *dev; struct device *parent; const struct attribute_group **attr_groups; const struct attribute_group **attr_update; const char *name; int type; int capabilities; int __attribute__((btf_type_tag("percpu"))) *pmu_disable_count; struct perf_cpu_pmu_context __attribute__((btf_type_tag("percpu"))) *cpu_pmu_context; atomic_t exclusive_cnt; int task_ctx_nr; int hrtimer_interval_ms; unsigned int nr_addr_filters; void (*pmu_enable)(struct pmu *); void (*pmu_disable)(struct pmu *); int (*event_init)(struct perf_event *); void (*event_mapped)(struct perf_event *, struct mm_struct *); void (*event_unmapped)(struct perf_event *, struct mm_struct *); int (*add)(struct perf_event *, int); void (*del)(struct perf_event *, int); void (*start)(struct perf_event *, int); void (*stop)(struct perf_event *, int); void (*read)(struct perf_event *); void (*start_txn)(struct pmu *, unsigned int); int (*commit_txn)(struct pmu *); void (*cancel_txn)(struct pmu *); int (*event_idx)(struct perf_event *); void (*sched_task)(struct perf_event_pmu_context *, bool); struct kmem_cache *task_ctx_cache; void (*swap_task_ctx)(struct perf_event_pmu_context *, struct perf_event_pmu_context *); void * (*setup_aux)(struct perf_event *, void **, int, bool); void (*free_aux)(void *); long (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, unsigned long); int (*addr_filters_validate)(struct list_head *); void (*addr_filters_sync)(struct perf_event *); int (*aux_output_match)(struct perf_event *); bool (*filter)(struct pmu *, int); int (*check_period)(struct perf_event *, u64); }; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3, }; struct refcount_struct { atomic_t refs; }; typedef struct refcount_struct refcount_t; struct kref { refcount_t refcount; }; struct kset; struct kobj_type; struct kernfs_node; struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; const struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; unsigned int state_initialized: 1; unsigned int state_in_sysfs: 1; unsigned int state_add_uevent_sent: 1; unsigned int state_remove_uevent_sent: 1; unsigned int uevent_suppress: 1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct module_param_attrs; struct completion; struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; }; struct latch_tree_node { struct rb_node node[2]; }; struct mod_tree_node { struct module *mod; struct latch_tree_node node; }; struct module_memory { void *base; unsigned int size; struct mod_tree_node mtn; }; struct mod_plt_sec { int plt_shndx; int plt_num_entries; int plt_max_entries; }; struct pkvm_module_section { void *start; void *end; }; typedef s32 kvm_nvhe_reloc_t; struct hyp_event; struct pkvm_module_ops; struct pkvm_el2_module { struct pkvm_module_section text; struct pkvm_module_section bss; struct pkvm_module_section rodata; struct pkvm_module_section data; struct pkvm_module_section event_ids; struct pkvm_module_section sections; struct hyp_event *hyp_events; unsigned int nr_hyp_events; kvm_nvhe_reloc_t *relocs; struct list_head node; unsigned long token; unsigned int nr_relocs; int (*init)(const struct pkvm_module_ops *); }; struct plt_entry; struct mod_arch_specific { struct mod_plt_sec core; struct mod_plt_sec init; struct plt_entry *ftrace_trampolines; struct pkvm_el2_module hyp; }; struct elf64_sym; typedef struct elf64_sym Elf64_Sym; struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; char *typetab; }; typedef const int tracepoint_ptr_t; struct _ddebug; struct ddebug_class_map; struct _ddebug_info { struct _ddebug *descs; struct ddebug_class_map *classes; unsigned int num_descs; unsigned int num_classes; }; struct module_attribute; struct kernel_symbol; struct kernel_param; struct exception_table_entry; struct bug_entry; struct module_sect_attrs; struct module_notes_attrs; struct srcu_struct; struct bpf_raw_event_map; struct trace_eval_map; struct kunit_suite; struct module { enum module_state state; struct list_head list; char name[56]; unsigned char build_id[20]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; const char *scmversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const s32 *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const s32 *gpl_crcs; bool using_gplonly_symbols; bool sig_ok; bool async_probe_requested; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct module_memory mem[7]; struct mod_arch_specific arch; unsigned long taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms __attribute__((btf_type_tag("rcu"))) *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void __attribute__((btf_type_tag("percpu"))) *percpu; unsigned int percpu_size; void *noinstr_text_start; unsigned int noinstr_text_size; unsigned int num_tracepoints; tracepoint_ptr_t *tracepoints_ptrs; unsigned int num_srcu_structs; struct srcu_struct **srcu_struct_ptrs; unsigned int num_bpf_raw_events; struct bpf_raw_event_map *bpf_raw_events; unsigned int btf_data_size; void *btf_data; struct jump_entry *jump_entries; unsigned int num_jump_entries; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_eval_map **trace_evals; unsigned int num_trace_evals; void *kprobes_text_start; unsigned int kprobes_text_size; unsigned long *kprobe_blacklist; unsigned int num_kprobe_blacklist; int num_kunit_suites; struct kunit_suite **kunit_suites; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; struct _ddebug_info dyndbg_info; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct kset_uevent_ops; struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct kobj_uevent_env; struct kset_uevent_ops { int (* const filter)(const struct kobject *); const char * (* const name)(const struct kobject *); int (* const uevent)(const struct kobject *, struct kobj_uevent_env *); }; struct kobj_uevent_env { char *argv[3]; char *envp[64]; int envp_idx; char buf[2048]; int buflen; }; typedef unsigned int __kernel_uid32_t; typedef __kernel_uid32_t uid_t; typedef struct { uid_t val; } kuid_t; typedef unsigned int __kernel_gid32_t; typedef __kernel_gid32_t gid_t; typedef struct { gid_t val; } kgid_t; struct sysfs_ops; struct kobj_ns_type_operations; struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; const struct attribute_group **default_groups; const struct kobj_ns_type_operations * (*child_ns_type)(const struct kobject *); const void * (*namespace)(const struct kobject *); void (*get_ownership)(const struct kobject *, kuid_t *, kgid_t *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; typedef long __kernel_long_t; typedef __kernel_long_t __kernel_ssize_t; typedef __kernel_ssize_t ssize_t; struct attribute; struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); }; typedef unsigned short umode_t; struct attribute { const char *name; umode_t mode; }; struct bin_attribute; struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; }; struct address_space; struct file; struct vm_area_struct; struct bin_attribute { struct attribute attr; size_t size; void *private; struct address_space * (*f_mapping)(); ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); }; typedef unsigned int gfp_t; struct xarray { spinlock_t xa_lock; gfp_t xa_flags; void __attribute__((btf_type_tag("rcu"))) *xa_head; }; struct rw_semaphore { atomic_long_t count; atomic_long_t owner; struct optimistic_spin_queue osq; raw_spinlock_t wait_lock; struct list_head wait_list; u64 android_vendor_data1; u64 android_oem_data1[2]; }; struct rb_root { struct rb_node *rb_node; }; struct rb_root_cached { struct rb_root rb_root; struct rb_node *rb_leftmost; }; typedef u32 errseq_t; struct inode; struct address_space_operations; struct address_space { struct inode *host; struct xarray i_pages; struct rw_semaphore invalidate_lock; gfp_t gfp_mask; atomic_t i_mmap_writable; struct rb_root_cached i_mmap; unsigned long nrpages; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; struct rw_semaphore i_mmap_rwsem; errseq_t wb_err; spinlock_t private_lock; struct list_head private_list; void *private_data; }; typedef u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef __s64 time64_t; struct timespec64 { time64_t tv_sec; long tv_nsec; }; enum rw_hint { WRITE_LIFE_NOT_SET = 0, WRITE_LIFE_NONE = 1, WRITE_LIFE_SHORT = 2, WRITE_LIFE_MEDIUM = 3, WRITE_LIFE_LONG = 4, WRITE_LIFE_EXTREME = 5, } __attribute__((mode(byte))); typedef u64 blkcnt_t; struct posix_acl; struct inode_operations; struct super_block; struct bdi_writeback; struct file_operations; struct file_lock_context; struct pipe_inode_info; struct cdev; struct fsnotify_mark_connector; struct fscrypt_inode_info; struct fsverity_info; struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union { const unsigned int i_nlink; unsigned int __i_nlink; }; dev_t i_rdev; loff_t i_size; struct timespec64 i_atime; struct timespec64 i_mtime; struct timespec64 __i_ctime; spinlock_t i_lock; unsigned short i_bytes; u8 i_blkbits; enum rw_hint i_write_hint; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union { struct hlist_head i_dentry; struct callback_head i_rcu; }; atomic64_t i_version; atomic64_t i_sequence; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; union { const struct file_operations *i_fop; void (*free_inode)(struct inode *); }; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union { struct pipe_inode_info *i_pipe; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; }; __u32 i_generation; __u32 i_fsnotify_mask; struct fsnotify_mark_connector __attribute__((btf_type_tag("rcu"))) *i_fsnotify_marks; struct fscrypt_inode_info *i_crypt_info; struct fsverity_info *i_verity_info; void *i_private; }; struct dentry; struct delayed_call; struct mnt_idmap; struct iattr; struct path; struct kstat; struct fiemap_extent_info; struct fileattr; struct offset_ctx; struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct mnt_idmap *, struct inode *, int); struct posix_acl * (*get_inode_acl)(struct inode *, int, bool); int (*readlink)(struct dentry *, char __attribute__((btf_type_tag("user"))) *, int); int (*create)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, bool); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct mnt_idmap *, struct inode *, struct dentry *, const char *); int (*mkdir)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, dev_t); int (*rename)(struct mnt_idmap *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct mnt_idmap *, struct dentry *, struct iattr *); int (*getattr)(struct mnt_idmap *, const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr)(struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); int (*update_time)(struct inode *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t); int (*tmpfile)(struct mnt_idmap *, struct inode *, struct file *, umode_t); struct posix_acl * (*get_acl)(struct mnt_idmap *, struct dentry *, int); int (*set_acl)(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); int (*fileattr_set)(struct mnt_idmap *, struct dentry *, struct fileattr *); int (*fileattr_get)(struct dentry *, struct fileattr *); struct offset_ctx * (*get_offset_ctx)(struct inode *); long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; }; struct seqcount { unsigned int sequence; }; typedef struct seqcount seqcount_t; struct seqcount_spinlock { seqcount_t seqcount; }; typedef struct seqcount_spinlock seqcount_spinlock_t; struct qstr { union { struct { u32 hash; u32 len; }; u64 hash_len; }; const unsigned char *name; }; struct lockref { union { __u64 lock_count; struct { spinlock_t lock; int count; }; }; }; struct dentry_operations; struct dentry { unsigned int d_flags; seqcount_spinlock_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union { struct list_head d_lru; wait_queue_head_t *d_wait; }; struct list_head d_child; struct list_head d_subdirs; union { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } d_u; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct vfsmount; struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(const struct path *, bool); struct dentry * (*d_real)(struct dentry *, const struct inode *); int (*d_canonical_path)(const struct path *, struct path *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct vfsmount { struct dentry *mnt_root; struct super_block *mnt_sb; int mnt_flags; struct mnt_idmap *mnt_idmap; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct hlist_bl_head { struct hlist_bl_node *first; }; struct mtd_info; typedef long long qsize_t; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; }; struct quota_format_ops; struct quota_info { unsigned int flags; struct rw_semaphore dqio_sem; struct inode *files[3]; struct mem_dqinfo info[3]; const struct quota_format_ops *ops[3]; }; struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; struct callback_head cb_head; }; struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int __attribute__((btf_type_tag("percpu"))) *read_count; struct rcuwait writer; wait_queue_head_t waiters; atomic_t block; u64 android_oem_data1; }; struct sb_writers { unsigned short frozen; unsigned short freeze_holders; struct percpu_rw_semaphore rw_sem[3]; }; typedef struct { __u8 b[16]; } uuid_t; struct shrink_control; struct shrinker { unsigned long (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long (*scan_objects)(struct shrinker *, struct shrink_control *); long batch; int seeks; unsigned int flags; struct list_head list; int id; atomic_long_t *nr_deferred; }; struct list_lru_node; struct list_lru { struct list_lru_node *node; struct list_head list; int shrinker_id; bool memcg_aware; struct xarray xa; }; struct work_struct; typedef void (*work_func_t)(struct work_struct *); struct work_struct { atomic_long_t data; struct list_head entry; work_func_t func; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct file_system_type; struct super_operations; struct dquot_operations; struct quotactl_ops; struct export_operations; struct xattr_handler; struct fscrypt_operations; struct fscrypt_keyring; struct fsverity_operations; struct unicode_map; struct block_device; struct backing_dev_info; struct workqueue_struct; struct user_namespace; struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct fscrypt_keyring *s_master_keys; const struct fsverity_operations *s_vop; struct unicode_map *s_encoding; __u16 s_encoding_flags; struct hlist_bl_head s_roots; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; void *s_fs_info; u32 s_time_gran; time64_t s_time_min; time64_t s_time_max; __u32 s_fsnotify_mask; struct fsnotify_mark_connector __attribute__((btf_type_tag("rcu"))) *s_fsnotify_marks; char s_id[32]; uuid_t s_uuid; unsigned int s_max_links; struct mutex s_vfs_rename_mutex; const char *s_subtype; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; atomic_long_t s_fsnotify_connectors; int s_readonly_remount; errseq_t s_wb_err; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; long: 0; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; long: 64; long: 64; }; struct lock_class_key {}; struct fs_context; struct fs_parameter_spec; struct file_system_type { const char *name; int fs_flags; int (*init_fs_context)(struct fs_context *); const struct fs_parameter_spec *parameters; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key invalidate_lock_key; struct lock_class_key i_mutex_dir_key; }; struct fc_log; struct p_log { const char *prefix; struct fc_log *log; }; enum fs_context_purpose { FS_CONTEXT_FOR_MOUNT = 0, FS_CONTEXT_FOR_SUBMOUNT = 1, FS_CONTEXT_FOR_RECONFIGURE = 2, }; enum fs_context_phase { FS_CONTEXT_CREATE_PARAMS = 0, FS_CONTEXT_CREATING = 1, FS_CONTEXT_AWAITING_MOUNT = 2, FS_CONTEXT_AWAITING_RECONF = 3, FS_CONTEXT_RECONF_PARAMS = 4, FS_CONTEXT_RECONFIGURING = 5, FS_CONTEXT_FAILED = 6, }; struct fs_context_operations; struct net; struct cred; struct fs_context { const struct fs_context_operations *ops; struct mutex uapi_mutex; struct file_system_type *fs_type; void *fs_private; void *sget_key; struct dentry *root; struct user_namespace *user_ns; struct net *net_ns; const struct cred *cred; struct p_log log; const char *source; void *security; void *s_fs_info; unsigned int sb_flags; unsigned int sb_flags_mask; unsigned int s_iflags; enum fs_context_purpose purpose: 8; enum fs_context_phase phase: 8; bool need_free: 1; bool global: 1; bool oldapi: 1; bool exclusive: 1; }; struct fs_parameter; struct fs_context_operations { void (*free)(struct fs_context *); int (*dup)(struct fs_context *, struct fs_context *); int (*parse_param)(struct fs_context *, struct fs_parameter *); int (*parse_monolithic)(struct fs_context *, void *); int (*get_tree)(struct fs_context *); int (*reconfigure)(struct fs_context *); }; enum fs_value_type { fs_value_is_undefined = 0, fs_value_is_flag = 1, fs_value_is_string = 2, fs_value_is_blob = 3, fs_value_is_filename = 4, fs_value_is_file = 5, }; struct filename; struct fs_parameter { const char *key; enum fs_value_type type: 8; union { char *string; void *blob; struct filename *name; struct file *file; }; size_t size; int dirfd; }; struct audit_names; struct filename { const char *name; const char __attribute__((btf_type_tag("user"))) *uptr; atomic_t refcnt; struct audit_names *aname; const char iname[0]; }; typedef unsigned int fmode_t; struct qrwlock { union { atomic_t cnts; struct { u8 wlocked; u8 __lstate[3]; }; }; arch_spinlock_t wait_lock; }; typedef struct qrwlock arch_rwlock_t; typedef struct { arch_rwlock_t raw_lock; } rwlock_t; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_TGID = 1, PIDTYPE_PGID = 2, PIDTYPE_SID = 3, PIDTYPE_MAX = 4, }; struct pid; struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; }; struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; }; struct path { struct vfsmount *mnt; struct dentry *dentry; }; struct file { union { struct llist_node f_llist; struct callback_head f_rcuhead; unsigned int f_iocb_flags; }; spinlock_t f_lock; fmode_t f_mode; atomic_long_t f_count; struct mutex f_pos_lock; loff_t f_pos; unsigned int f_flags; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; u64 f_version; void *f_security; void *private_data; struct hlist_head *f_ep; struct address_space *f_mapping; errseq_t f_wb_err; errseq_t f_sb_err; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct upid { int nr; struct pid_namespace *ns; }; struct pid { refcount_t count; unsigned int level; spinlock_t lock; struct hlist_head tasks[4]; struct hlist_head inodes; wait_queue_head_t wait_pidfd; struct callback_head rcu; struct upid numbers[0]; }; struct idr { struct xarray idr_rt; unsigned int idr_base; unsigned int idr_next; }; struct proc_ns_operations; struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; refcount_t count; }; struct ucounts; struct pid_namespace { struct idr idr; struct callback_head rcu; unsigned int pid_allocated; struct task_struct *child_reaper; struct kmem_cache *pid_cachep; unsigned int level; struct pid_namespace *parent; struct user_namespace *user_ns; struct ucounts *ucounts; int reboot; struct ns_common ns; int memfd_noexec_scope; }; struct thread_info { unsigned long flags; u64 ttbr0; union { u64 preempt_count; struct { u32 count; u32 need_resched; } preempt; }; void *scs_base; void *scs_sp; u32 cpu; }; struct load_weight { unsigned long weight; u32 inv_weight; }; struct sched_avg { u64 last_update_time; u64 load_sum; u64 runnable_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long runnable_avg; unsigned long util_avg; unsigned int util_est; u32 reserved; }; struct cfs_rq; struct sched_entity { struct load_weight load; struct rb_node run_node; u64 deadline; u64 min_vruntime; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 prev_sum_exec_runtime; u64 vruntime; s64 vlag; u64 slice; u64 nr_migrations; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; unsigned long runnable_weight; long: 64; long: 64; struct sched_avg avg; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; long: 64; long: 64; long: 64; long: 64; }; struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; u64 dl_density; s64 runtime; u64 deadline; unsigned int flags; unsigned int dl_throttled: 1; unsigned int dl_yielded: 1; unsigned int dl_non_contending: 1; unsigned int dl_overrun: 1; struct hrtimer dl_timer; struct hrtimer inactive_timer; struct sched_dl_entity *pi_se; }; struct uclamp_se { unsigned int value: 11; unsigned int bucket_id: 5; unsigned int active: 1; unsigned int user_defined: 1; }; struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; s64 sum_block_runtime; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; long: 64; long: 64; long: 64; long: 64; }; typedef struct cpumask cpumask_t; union rcu_special { struct { u8 blocked; u8 need_qs; u8 exp_hint; u8 need_mb; } b; u32 s; }; struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; }; struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; }; typedef int __kernel_clockid_t; typedef __kernel_clockid_t clockid_t; enum timespec_type { TT_NONE = 0, TT_NATIVE = 1, TT_COMPAT = 2, }; struct __kernel_timespec; struct old_timespec32; struct pollfd; struct restart_block { unsigned long arch_data; long (*fn)(struct restart_block *); union { struct { u32 __attribute__((btf_type_tag("user"))) *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 __attribute__((btf_type_tag("user"))) *uaddr2; } futex; struct { clockid_t clockid; enum timespec_type type; union { struct __kernel_timespec __attribute__((btf_type_tag("user"))) *rmtp; struct old_timespec32 __attribute__((btf_type_tag("user"))) *compat_rmtp; }; u64 expires; } nanosleep; struct { struct pollfd __attribute__((btf_type_tag("user"))) *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } poll; }; }; typedef int __kernel_pid_t; typedef __kernel_pid_t pid_t; struct prev_cputime { u64 utime; u64 stime; raw_spinlock_t lock; }; struct timerqueue_head { struct rb_root_cached rb_root; }; struct posix_cputimer_base { u64 nextevt; struct timerqueue_head tqhead; }; struct posix_cputimers { struct posix_cputimer_base bases[3]; unsigned int timers_active; unsigned int expiry_active; }; struct posix_cputimers_work { struct callback_head work; struct mutex mutex; unsigned int scheduled; }; typedef struct { unsigned long sig[1]; } sigset_t; struct sigpending { struct list_head list; sigset_t signal; }; struct seccomp_filter; struct seccomp { int mode; atomic_t filter_count; struct seccomp_filter *filter; }; struct syscall_user_dispatch {}; struct wake_q_node { struct wake_q_node *next; }; struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 syscfs; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; }; typedef struct { unsigned long bits[1]; } nodemask_t; struct arch_tlbflush_unmap_batch {}; struct tlbflush_unmap_batch { struct arch_tlbflush_unmap_batch arch; bool flush_required; bool writable; }; struct page; struct page_frag { struct page *page; __u32 offset; __u32 size; }; struct kmap_ctrl {}; struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(struct timer_list *); u32 flags; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct llist_head { struct llist_node *first; }; struct cpu_context { unsigned long x19; unsigned long x20; unsigned long x21; unsigned long x22; unsigned long x23; unsigned long x24; unsigned long x25; unsigned long x26; unsigned long x27; unsigned long x28; unsigned long fp; unsigned long sp; unsigned long pc; }; typedef unsigned __int128 __uint128_t; struct user_fpsimd_state { __uint128_t vregs[32]; __u32 fpsr; __u32 fpcr; __u32 __reserved[2]; }; enum fp_type { FP_STATE_CURRENT = 0, FP_STATE_FPSIMD = 1, FP_STATE_SVE = 2, }; struct debug_info { int suspended_step; int bps_disabled; int wps_disabled; struct perf_event *hbp_break[16]; struct perf_event *hbp_watch[16]; }; struct ptrauth_key { unsigned long lo; unsigned long hi; }; struct ptrauth_keys_user { struct ptrauth_key apia; struct ptrauth_key apib; struct ptrauth_key apda; struct ptrauth_key apdb; struct ptrauth_key apga; }; struct ptrauth_keys_kernel { struct ptrauth_key apia; }; struct thread_struct { struct cpu_context cpu_context; long: 64; struct { unsigned long tp_value; unsigned long tp2_value; struct user_fpsimd_state fpsimd_state; } uw; enum fp_type fp_type; unsigned int fpsimd_cpu; void *sve_state; void *sme_state; unsigned int vl[2]; unsigned int vl_onexec[2]; unsigned long fault_address; unsigned long fault_code; struct debug_info debug; struct ptrauth_keys_user keys_user; struct ptrauth_keys_kernel keys_kernel; u64 mte_ctrl; u64 sctlr_user; u64 svcr; u64 tpidr2_el0; u64 android_vendor_data1; }; struct sched_class; struct task_group; struct rcu_node; struct key; struct nameidata; struct fs_struct; struct files_struct; struct io_uring_task; struct nsproxy; struct signal_struct; struct sighand_struct; struct audit_context; struct rt_mutex_waiter; struct bio_list; struct blk_plug; struct reclaim_state; struct io_context; struct capture_control; struct kernel_siginfo; typedef struct kernel_siginfo kernel_siginfo_t; struct css_set; struct robust_list_head; struct compat_robust_list_head; struct futex_pi_state; struct task_delay_info; struct kunit; struct mem_cgroup; struct gendisk; struct uprobe_task; struct vm_struct; struct bpf_local_storage; struct bpf_run_ctx; struct task_struct { struct thread_info thread_info; unsigned int __state; unsigned int saved_state; void *stack; refcount_t usage; unsigned int flags; unsigned int ptrace; int on_cpu; struct __call_single_node wake_entry; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int recent_used_cpu; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; long: 64; long: 64; long: 64; long: 64; long: 64; struct sched_entity se; struct sched_rt_entity rt; struct sched_dl_entity dl; const struct sched_class *sched_class; struct task_group *sched_task_group; struct uclamp_se uclamp_req[2]; struct uclamp_se uclamp[2]; long: 64; long: 64; long: 64; long: 64; struct sched_statistics stats; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; const cpumask_t *cpus_ptr; cpumask_t *user_cpus_ptr; cpumask_t cpus_mask; void *migration_pending; unsigned short migration_disabled; unsigned short migration_flags; int rcu_read_lock_nesting; union rcu_special rcu_read_unlock_special; struct list_head rcu_node_entry; struct rcu_node *rcu_blocked_node; unsigned long rcu_tasks_nvcsw; u8 rcu_tasks_holdout; u8 rcu_tasks_idx; int rcu_tasks_idle_cpu; struct list_head rcu_tasks_holdout_list; int trc_reader_nesting; int trc_ipi_to_cpu; union rcu_special trc_reader_special; struct list_head trc_holdout_list; struct list_head trc_blkd_node; int trc_blkd_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned int sched_reset_on_fork: 1; unsigned int sched_contributes_to_load: 1; unsigned int sched_migrated: 1; long: 29; unsigned int sched_remote_wakeup: 1; unsigned int in_execve: 1; unsigned int in_iowait: 1; unsigned int in_user_fault: 1; unsigned int in_lru_fault: 1; unsigned int no_cgroup_migration: 1; unsigned int frozen: 1; unsigned int use_memdelay: 1; unsigned int in_memstall: 1; unsigned int in_page_owner: 1; unsigned int in_eventfd: 1; unsigned int in_thrashing: 1; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; unsigned long stack_canary; struct task_struct __attribute__((btf_type_tag("rcu"))) *real_parent; struct task_struct __attribute__((btf_type_tag("rcu"))) *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid *thread_pid; struct hlist_node pid_links[4]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int __attribute__((btf_type_tag("user"))) *set_child_tid; int __attribute__((btf_type_tag("user"))) *clear_child_tid; void *worker_private; u64 utime; u64 stime; u64 gtime; u64 *time_in_state; unsigned int max_state; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 start_boottime; unsigned long min_flt; unsigned long maj_flt; struct posix_cputimers posix_cputimers; struct posix_cputimers_work posix_cputimers_work; const struct cred __attribute__((btf_type_tag("rcu"))) *ptracer_cred; const struct cred __attribute__((btf_type_tag("rcu"))) *real_cred; const struct cred __attribute__((btf_type_tag("rcu"))) *cred; struct key *cached_requested_key; char comm[16]; struct nameidata *nameidata; unsigned long last_switch_count; unsigned long last_switch_time; struct fs_struct *fs; struct files_struct *files; struct io_uring_task *io_uring; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct __attribute__((btf_type_tag("rcu"))) *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; struct syscall_user_dispatch syscall_dispatch; u64 parent_exec_id; u64 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; int wake_q_count; struct rb_root_cached pi_waiters; struct task_struct *pi_top_task; struct rt_mutex_waiter *pi_blocked_on; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct io_context *io_context; struct capture_control *capture_control; unsigned long ptrace_message; kernel_siginfo_t *last_siginfo; struct task_io_accounting ioac; unsigned int psi_flags; u64 acct_rss_mem1; u64 acct_vm_mem1; u64 acct_timexpd; nodemask_t mems_allowed; seqcount_spinlock_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set __attribute__((btf_type_tag("rcu"))) *cgroups; struct list_head cg_list; struct robust_list_head __attribute__((btf_type_tag("user"))) *robust_list; struct compat_robust_list_head __attribute__((btf_type_tag("user"))) *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct mutex futex_exit_mutex; unsigned int futex_state; struct perf_event_context *perf_event_ctxp; struct mutex perf_event_mutex; struct list_head perf_event_list; struct tlbflush_unmap_batch tlb_ubc; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; u64 timer_slack_ns; u64 default_timer_slack_ns; struct kunit *kunit_test; unsigned long trace_recursion; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct mem_cgroup *active_memcg; struct gendisk *throttle_disk; struct uprobe_task *utask; struct kmap_ctrl kmap_ctrl; struct callback_head rcu; refcount_t rcu_users; int pagefault_disabled; struct task_struct *oom_reaper_list; struct timer_list oom_reaper_timer; struct vm_struct *stack_vm_area; refcount_t stack_refcount; void *security; struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) *bpf_storage; struct bpf_run_ctx *bpf_ctx; u64 android_vendor_data1[64]; u64 android_oem_data1[6]; struct llist_head kretprobe_instances; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; u64 android_kabi_reserved5; u64 android_kabi_reserved6; u64 android_kabi_reserved7; u64 android_kabi_reserved8; long: 64; struct thread_struct thread; long: 64; long: 64; long: 64; long: 64; }; struct seqcount_raw_spinlock { seqcount_t seqcount; }; typedef struct seqcount_raw_spinlock seqcount_raw_spinlock_t; struct hrtimer_cpu_base; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; unsigned int index; clockid_t clockid; seqcount_raw_spinlock_t seq; struct hrtimer *running; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; }; struct hrtimer_cpu_base { raw_spinlock_t lock; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; unsigned int hres_active: 1; unsigned int in_hrtirq: 1; unsigned int hang_detected: 1; unsigned int softirq_activated: 1; unsigned int online: 1; unsigned int nr_events; unsigned short nr_retries; unsigned short nr_hangs; unsigned int max_hang_time; ktime_t expires_next; struct hrtimer *next_timer; ktime_t softirq_expires_next; struct hrtimer *softirq_next_timer; struct hrtimer_clock_base clock_base[8]; }; struct rq; struct rq_flags; struct affinity_context; struct sched_class { int uclamp_enabled; void (*enqueue_task)(struct rq *, struct task_struct *, int); void (*dequeue_task)(struct rq *, struct task_struct *, int); void (*yield_task)(struct rq *); bool (*yield_to_task)(struct rq *, struct task_struct *); void (*check_preempt_curr)(struct rq *, struct task_struct *, int); struct task_struct * (*pick_next_task)(struct rq *); void (*put_prev_task)(struct rq *, struct task_struct *); void (*set_next_task)(struct rq *, struct task_struct *, bool); int (*balance)(struct rq *, struct task_struct *, struct rq_flags *); int (*select_task_rq)(struct task_struct *, int, int); struct task_struct * (*pick_task)(struct rq *); void (*migrate_task_rq)(struct task_struct *, int); void (*task_woken)(struct rq *, struct task_struct *); void (*set_cpus_allowed)(struct task_struct *, struct affinity_context *); void (*rq_online)(struct rq *); void (*rq_offline)(struct rq *); struct rq * (*find_lock_rq)(struct task_struct *, struct rq *); void (*task_tick)(struct rq *, struct task_struct *, int); void (*task_fork)(struct task_struct *); void (*task_dead)(struct task_struct *); void (*switched_from)(struct rq *, struct task_struct *); void (*switched_to)(struct rq *, struct task_struct *); void (*prio_changed)(struct rq *, struct task_struct *, int); unsigned int (*get_rr_interval)(struct rq *, struct task_struct *); void (*update_curr)(struct rq *); void (*task_change_group)(struct task_struct *); }; typedef struct {} lockdep_map_p; struct maple_tree { union { spinlock_t ma_lock; lockdep_map_p ma_external_lock; }; unsigned int ma_flags; void __attribute__((btf_type_tag("rcu"))) *ma_root; }; typedef u64 pgdval_t; typedef struct { pgdval_t pgd; } pgd_t; struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 __attribute__((btf_type_tag("percpu"))) *counters; }; typedef struct { atomic64_t id; void *sigpage; refcount_t pinned; void *vdso; unsigned long flags; } mm_context_t; struct xol_area; struct uprobes_state { struct xol_area *xol_area; }; struct linux_binfmt; struct kioctx_table; struct mmu_notifier_subscriptions; struct mm_struct { struct { struct { atomic_t mm_count; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct maple_tree mm_mt; unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; pgd_t *pgd; atomic_t membarrier_state; atomic_t mm_users; atomic_long_t pgtables_bytes; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_lock; struct list_head mmlist; int mm_lock_seq; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; atomic64_t pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; seqcount_t write_protect_seq; spinlock_t arg_lock; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[50]; struct percpu_counter rss_stat[4]; struct linux_binfmt *binfmt; mm_context_t context; unsigned long flags; spinlock_t ioctx_lock; struct kioctx_table __attribute__((btf_type_tag("rcu"))) *ioctx_table; struct task_struct __attribute__((btf_type_tag("rcu"))) *owner; struct user_namespace *user_ns; struct file __attribute__((btf_type_tag("rcu"))) *exe_file; struct mmu_notifier_subscriptions *notifier_subscriptions; atomic_t tlb_flush_pending; atomic_t tlb_flush_batched; struct uprobes_state uprobes_state; struct work_struct async_put_work; struct { struct list_head list; unsigned long bitmap; struct mem_cgroup *memcg; } lru_gen; u64 android_kabi_reserved1; u64 android_backport_reserved1; long: 64; long: 64; long: 64; long: 64; long: 64; }; unsigned long cpu_bitmap[0]; }; struct linux_binprm; struct coredump_params; struct linux_binfmt { struct list_head lh; struct module *module; int (*load_binary)(struct linux_binprm *); int (*load_shlib)(struct file *); int (*core_dump)(struct coredump_params *); unsigned long min_coredump; }; struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; }; struct linux_binprm { struct vm_area_struct *vma; unsigned long vma_pages; struct mm_struct *mm; unsigned long p; unsigned long argmin; unsigned int have_execfd: 1; unsigned int execfd_creds: 1; unsigned int secureexec: 1; unsigned int point_of_no_return: 1; struct file *executable; struct file *interpreter; struct file *file; struct cred *cred; int unsafe; unsigned int per_clear; int argc; int envc; const char *filename; const char *interp; const char *fdpath; unsigned int interp_flags; int execfd; unsigned long loader; unsigned long exec; struct rlimit rlim_stack; char buf[256]; }; typedef unsigned long vm_flags_t; typedef u64 pteval_t; typedef struct { pteval_t pgprot; } pgprot_t; struct userfaultfd_ctx; struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; }; struct vma_lock; struct anon_vma; struct vm_operations_struct; struct anon_vma_name; struct vm_area_struct { union { struct { unsigned long vm_start; unsigned long vm_end; }; struct callback_head vm_rcu; }; struct mm_struct *vm_mm; pgprot_t vm_page_prot; union { const vm_flags_t vm_flags; vm_flags_t __vm_flags; }; bool detached; int vm_lock_seq; struct vma_lock *vm_lock; struct { struct rb_node rb; unsigned long rb_subtree_last; } shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct anon_vma_name *anon_name; atomic_long_t swap_readahead_info; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct vma_lock { struct rw_semaphore lock; }; struct anon_vma { struct anon_vma *root; struct rw_semaphore rwsem; atomic_t refcount; unsigned long num_children; unsigned long num_active_vmas; struct anon_vma *parent; struct rb_root_cached rb_root; }; typedef unsigned int vm_fault_t; struct vm_fault; struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*may_split)(struct vm_area_struct *, unsigned long); int (*mremap)(struct vm_area_struct *); int (*mprotect)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); vm_fault_t (*fault)(struct vm_fault *); vm_fault_t (*huge_fault)(struct vm_fault *, unsigned int); vm_fault_t (*map_pages)(struct vm_fault *, unsigned long, unsigned long); unsigned long (*pagesize)(struct vm_area_struct *); vm_fault_t (*page_mkwrite)(struct vm_fault *); vm_fault_t (*pfn_mkwrite)(struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; typedef struct { pteval_t pte; } pte_t; typedef u64 pmdval_t; typedef struct { pmdval_t pmd; } pmd_t; enum fault_flag { FAULT_FLAG_WRITE = 1, FAULT_FLAG_MKWRITE = 2, FAULT_FLAG_ALLOW_RETRY = 4, FAULT_FLAG_RETRY_NOWAIT = 8, FAULT_FLAG_KILLABLE = 16, FAULT_FLAG_TRIED = 32, FAULT_FLAG_USER = 64, FAULT_FLAG_REMOTE = 128, FAULT_FLAG_INSTRUCTION = 256, FAULT_FLAG_INTERRUPTIBLE = 512, FAULT_FLAG_UNSHARE = 1024, FAULT_FLAG_ORIG_PTE_VALID = 2048, FAULT_FLAG_VMA_LOCK = 4096, }; typedef struct { pgd_t pgd; } p4d_t; typedef struct { p4d_t p4d; } pud_t; typedef struct page *pgtable_t; struct vm_fault { struct { struct vm_area_struct *vma; gfp_t gfp_mask; unsigned long pgoff; unsigned long address; unsigned long real_address; }; enum fault_flag flags; pmd_t *pmd; pud_t *pud; union { pte_t orig_pte; pmd_t orig_pmd; }; struct page *cow_page; struct page *page; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; }; struct page_pool; struct dev_pagemap; struct page { unsigned long flags; union { struct { union { struct list_head lru; struct { void *__filler; unsigned int mlock_count; }; struct list_head buddy_list; struct list_head pcp_list; }; struct address_space *mapping; union { unsigned long index; unsigned long share; }; unsigned long private; }; struct { unsigned long pp_magic; struct page_pool *pp; unsigned long _pp_mapping_pad; unsigned long dma_addr; union { unsigned long dma_addr_upper; atomic_long_t pp_frag_count; }; }; struct { unsigned long compound_head; }; struct { struct dev_pagemap *pgmap; void *zone_device_data; }; struct callback_head callback_head; }; union { atomic_t _mapcount; unsigned int page_type; }; atomic_t _refcount; unsigned long memcg_data; }; struct range { u64 start; u64 end; }; struct vmem_altmap { unsigned long base_pfn; const unsigned long end_pfn; const unsigned long reserve; unsigned long free; unsigned long align; unsigned long alloc; }; struct percpu_ref_data; struct percpu_ref { unsigned long percpu_count_ptr; struct percpu_ref_data *data; }; struct swait_queue_head { raw_spinlock_t lock; struct list_head task_list; }; struct completion { unsigned int done; struct swait_queue_head wait; }; enum memory_type { MEMORY_DEVICE_PRIVATE = 1, MEMORY_DEVICE_COHERENT = 2, MEMORY_DEVICE_FS_DAX = 3, MEMORY_DEVICE_GENERIC = 4, MEMORY_DEVICE_PCI_P2PDMA = 5, }; struct dev_pagemap_ops; struct dev_pagemap { struct vmem_altmap altmap; struct percpu_ref ref; struct completion done; enum memory_type type; unsigned int flags; unsigned long vmemmap_shift; const struct dev_pagemap_ops *ops; void *owner; int nr_range; union { struct range range; struct { struct {} __empty_ranges; struct range ranges[0]; }; }; }; typedef void percpu_ref_func_t(struct percpu_ref *); struct percpu_ref_data { atomic_long_t count; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic: 1; bool allow_reinit: 1; struct callback_head rcu; struct percpu_ref *ref; }; struct dev_pagemap_ops { void (*page_free)(struct page *); vm_fault_t (*migrate_to_ram)(struct vm_fault *); int (*memory_failure)(struct dev_pagemap *, unsigned long, unsigned long, int); }; struct anon_vma_name { struct kref kref; char name[0]; }; struct userfaultfd_ctx { wait_queue_head_t fault_pending_wqh; wait_queue_head_t fault_wqh; wait_queue_head_t fd_wqh; wait_queue_head_t event_wqh; seqcount_spinlock_t refile_seq; refcount_t refcount; unsigned int flags; unsigned int features; bool released; struct rw_semaphore map_changing_lock; atomic_t mmap_changing; struct mm_struct *mm; }; typedef struct { u64 val; } kernel_cap_t; struct user_struct; struct group_info; struct cred { atomic_long_t usage; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct ucounts *ucounts; struct group_info *group_info; union { int non_rcu; struct callback_head rcu; }; }; struct key_type; struct key_tag; struct keyring_index_key { unsigned long hash; union { struct { u16 desc_len; char desc[6]; }; unsigned long x; }; struct key_type *type; struct key_tag *domain_tag; const char *description; }; struct assoc_array_ptr; struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; }; union key_payload { void __attribute__((btf_type_tag("rcu"))) *rcu_data0; void *data[4]; }; typedef s32 int32_t; typedef int32_t key_serial_t; typedef u32 uint32_t; typedef uint32_t key_perm_t; struct key_user; struct key_restriction; struct key { refcount_t usage; key_serial_t serial; union { struct list_head graveyard_link; struct rb_node serial_node; }; struct rw_semaphore sem; struct key_user *user; void *security; union { time64_t expiry; time64_t revoked_at; }; time64_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; short state; unsigned long flags; union { struct keyring_index_key index_key; struct { unsigned long hash; unsigned long len_desc; struct key_type *type; struct key_tag *domain_tag; char *description; }; }; union { union key_payload payload; struct { struct list_head name_link; struct assoc_array keys; }; }; struct key_restriction *restrict_link; }; struct key_tag { struct callback_head rcu; refcount_t usage; bool removed; }; typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *); struct key_restriction { key_restrict_link_func_t check; struct key *key; struct key_type *keytype; }; typedef int (*request_key_actor_t)(struct key *, void *); struct key_preparsed_payload; struct key_match_data; struct seq_file; struct kernel_pkey_params; struct kernel_pkey_query; struct key_type { const char *name; size_t def_datalen; unsigned int flags; int (*vet_description)(const char *); int (*preparse)(struct key_preparsed_payload *); void (*free_preparse)(struct key_preparsed_payload *); int (*instantiate)(struct key *, struct key_preparsed_payload *); int (*update)(struct key *, struct key_preparsed_payload *); int (*match_preparse)(struct key_match_data *); void (*match_free)(struct key_match_data *); void (*revoke)(struct key *); void (*destroy)(struct key *); void (*describe)(const struct key *, struct seq_file *); long (*read)(const struct key *, char *, size_t); request_key_actor_t request_key; struct key_restriction * (*lookup_restriction)(const char *); int (*asym_query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); int (*asym_eds_op)(struct kernel_pkey_params *, const void *, void *); int (*asym_verify_signature)(struct kernel_pkey_params *, const void *, const void *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; struct list_head link; struct lock_class_key lock_class; }; struct ratelimit_state { raw_spinlock_t lock; int interval; int burst; int printed; int missed; unsigned long begin; unsigned long flags; }; struct user_struct { refcount_t __count; struct percpu_counter epoll_watches; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; struct ratelimit_state ratelimit; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_oem_data1[2]; }; struct uid_gid_extent { u32 first; u32 lower_first; u32 count; }; struct uid_gid_map { u32 nr_extents; union { struct uid_gid_extent extent[5]; struct { struct uid_gid_extent *forward; struct uid_gid_extent *reverse; }; }; }; struct ctl_table; struct ctl_table_root; struct ctl_table_set; struct ctl_dir; struct ctl_node; struct ctl_table_header { union { struct { struct ctl_table *ctl_table; int ctl_table_size; int used; int count; int nreg; }; struct callback_head rcu; }; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; struct hlist_head inodes; }; struct ctl_dir { struct ctl_table_header header; struct rb_root root; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; }; struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; bool parent_could_setfcap; struct list_head keyring_name_list; struct key *user_keyring_register; struct rw_semaphore keyring_sem; struct work_struct work; struct ctl_table_set set; struct ctl_table_header *sysctls; struct ucounts *ucounts; long ucount_max[10]; long rlimit_max[4]; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct nsset; struct proc_ns_operations { const char *name; const char *real_ns_name; int type; struct ns_common * (*get)(struct task_struct *); void (*put)(struct ns_common *); int (*install)(struct nsset *, struct ns_common *); struct user_namespace * (*owner)(struct ns_common *); struct ns_common * (*get_parent)(struct ns_common *); }; struct nsset { unsigned int flags; struct nsproxy *nsproxy; struct fs_struct *fs; const struct cred *cred; }; struct uts_namespace; struct ipc_namespace; struct mnt_namespace; struct time_namespace; struct cgroup_namespace; struct nsproxy { refcount_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct time_namespace *time_ns; struct time_namespace *time_ns_for_children; struct cgroup_namespace *cgroup_ns; }; struct new_utsname { char sysname[65]; char nodename[65]; char release[65]; char version[65]; char machine[65]; char domainname[65]; }; struct uts_namespace { struct new_utsname name; struct user_namespace *user_ns; struct ucounts *ucounts; struct ns_common ns; }; struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; atomic_t count; atomic_long_t ucount[10]; atomic_long_t rlimit[4]; }; struct ref_tracker_dir {}; struct notifier_block; struct raw_notifier_head { struct notifier_block __attribute__((btf_type_tag("rcu"))) *head; }; struct prot_inuse; struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; u8 sysctl_txrehash; struct prot_inuse __attribute__((btf_type_tag("percpu"))) *prot_inuse; struct cpumask *rps_default_mask; }; struct ipstats_mib; struct tcp_mib; struct linux_mib; struct udp_mib; struct linux_xfrm_mib; struct icmp_mib; struct icmpmsg_mib; struct icmpv6_mib; struct icmpv6msg_mib; struct proc_dir_entry; struct netns_mib { struct ipstats_mib __attribute__((btf_type_tag("percpu"))) *ip_statistics; struct ipstats_mib __attribute__((btf_type_tag("percpu"))) *ipv6_statistics; struct tcp_mib __attribute__((btf_type_tag("percpu"))) *tcp_statistics; struct linux_mib __attribute__((btf_type_tag("percpu"))) *net_statistics; struct udp_mib __attribute__((btf_type_tag("percpu"))) *udp_statistics; struct udp_mib __attribute__((btf_type_tag("percpu"))) *udp_stats_in6; struct linux_xfrm_mib __attribute__((btf_type_tag("percpu"))) *xfrm_statistics; struct udp_mib __attribute__((btf_type_tag("percpu"))) *udplite_statistics; struct udp_mib __attribute__((btf_type_tag("percpu"))) *udplite_stats_in6; struct icmp_mib __attribute__((btf_type_tag("percpu"))) *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct icmpv6_mib __attribute__((btf_type_tag("percpu"))) *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct proc_dir_entry *proc_net_devsnmp6; }; struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; }; struct unix_table { spinlock_t *locks; struct hlist_head *buckets; }; struct netns_unix { struct unix_table table; int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; }; struct blocking_notifier_head { struct rw_semaphore rwsem; struct notifier_block __attribute__((btf_type_tag("rcu"))) *head; }; struct netns_nexthop { struct rb_root rb_root; struct hlist_head *devhash; unsigned int seq; u32 last_id_allocated; struct blocking_notifier_head notifier_chain; }; struct inet_hashinfo; struct inet_timewait_death_row { refcount_t tw_refcount; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct inet_hashinfo *hashinfo; int sysctl_max_tw_buckets; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; typedef struct { seqcount_spinlock_t seqcount; spinlock_t lock; } seqlock_t; struct local_ports { seqlock_t lock; int range[2]; bool warned; }; struct ping_group_range { seqlock_t lock; kgid_t range[2]; }; typedef struct { u64 key[2]; } siphash_key_t; struct udp_table; struct ipv4_devconf; struct ip_ra_chain; struct fib_rules_ops; struct fib_table; struct sock; struct inet_peer_base; struct fqdir; struct tcp_congestion_ops; struct tcp_fastopen_context; struct fib_notifier_ops; struct netns_ipv4 { struct inet_timewait_death_row tcp_death_row; struct udp_table *udp_table; struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct ip_ra_chain __attribute__((btf_type_tag("rcu"))) *ra_chain; struct mutex ra_mutex; struct fib_rules_ops *rules_ops; struct fib_table __attribute__((btf_type_tag("rcu"))) *fib_main; struct fib_table __attribute__((btf_type_tag("rcu"))) *fib_default; unsigned int fib_rules_require_fldissect; bool fib_has_custom_rules; bool fib_has_custom_local_routes; bool fib_offload_disabled; u8 sysctl_tcp_shrink_window; struct hlist_head *fib_table_hash; struct sock *fibnl; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct fqdir *fqdir; u8 sysctl_icmp_echo_ignore_all; u8 sysctl_icmp_echo_enable_probe; u8 sysctl_icmp_echo_ignore_broadcasts; u8 sysctl_icmp_ignore_bogus_error_responses; u8 sysctl_icmp_errors_use_inbound_ifaddr; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; u32 ip_rt_min_pmtu; int ip_rt_mtu_expires; int ip_rt_min_advmss; struct local_ports ip_local_ports; u8 sysctl_tcp_ecn; u8 sysctl_tcp_ecn_fallback; u8 sysctl_ip_default_ttl; u8 sysctl_ip_no_pmtu_disc; u8 sysctl_ip_fwd_use_pmtu; u8 sysctl_ip_fwd_update_priority; u8 sysctl_ip_nonlocal_bind; u8 sysctl_ip_autobind_reuse; u8 sysctl_ip_dynaddr; u8 sysctl_ip_early_demux; u8 sysctl_tcp_early_demux; u8 sysctl_udp_early_demux; u8 sysctl_nexthop_compat_mode; u8 sysctl_fwmark_reflect; u8 sysctl_tcp_fwmark_accept; u8 sysctl_tcp_mtu_probing; int sysctl_tcp_mtu_probe_floor; int sysctl_tcp_base_mss; int sysctl_tcp_min_snd_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_intvl; u8 sysctl_tcp_keepalive_probes; u8 sysctl_tcp_syn_retries; u8 sysctl_tcp_synack_retries; u8 sysctl_tcp_syncookies; u8 sysctl_tcp_migrate_req; u8 sysctl_tcp_comp_sack_nr; int sysctl_tcp_reordering; u8 sysctl_tcp_retries1; u8 sysctl_tcp_retries2; u8 sysctl_tcp_orphan_retries; u8 sysctl_tcp_tw_reuse; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; u8 sysctl_tcp_sack; u8 sysctl_tcp_window_scaling; u8 sysctl_tcp_timestamps; u8 sysctl_tcp_early_retrans; u8 sysctl_tcp_recovery; u8 sysctl_tcp_thin_linear_timeouts; u8 sysctl_tcp_slow_start_after_idle; u8 sysctl_tcp_retrans_collapse; u8 sysctl_tcp_stdurg; u8 sysctl_tcp_rfc1337; u8 sysctl_tcp_abort_on_overflow; u8 sysctl_tcp_fack; int sysctl_tcp_max_reordering; int sysctl_tcp_adv_win_scale; u8 sysctl_tcp_dsack; u8 sysctl_tcp_app_win; u8 sysctl_tcp_frto; u8 sysctl_tcp_nometrics_save; u8 sysctl_tcp_no_ssthresh_metrics_save; u8 sysctl_tcp_moderate_rcvbuf; u8 sysctl_tcp_tso_win_divisor; u8 sysctl_tcp_workaround_signed_windows; int sysctl_tcp_limit_output_bytes; int sysctl_tcp_challenge_ack_limit; int sysctl_tcp_min_rtt_wlen; u8 sysctl_tcp_min_tso_segs; u8 sysctl_tcp_tso_rtt_log; u8 sysctl_tcp_autocorking; u8 sysctl_tcp_reflect_tos; int sysctl_tcp_invalid_ratelimit; int sysctl_tcp_pacing_ss_ratio; int sysctl_tcp_pacing_ca_ratio; int sysctl_tcp_wmem[3]; int sysctl_tcp_rmem[3]; unsigned int sysctl_tcp_child_ehash_entries; unsigned long sysctl_tcp_comp_sack_delay_ns; unsigned long sysctl_tcp_comp_sack_slack_ns; int sysctl_max_syn_backlog; int sysctl_tcp_fastopen; const struct tcp_congestion_ops __attribute__((btf_type_tag("rcu"))) *tcp_congestion_control; struct tcp_fastopen_context __attribute__((btf_type_tag("rcu"))) *tcp_fastopen_ctx; unsigned int sysctl_tcp_fastopen_blackhole_timeout; atomic_t tfo_active_disable_times; unsigned long tfo_active_disable_stamp; u32 tcp_challenge_timestamp; u32 tcp_challenge_count; u8 sysctl_tcp_plb_enabled; u8 sysctl_tcp_plb_idle_rehash_rounds; u8 sysctl_tcp_plb_rehash_rounds; u8 sysctl_tcp_plb_suspend_rto_sec; int sysctl_tcp_plb_cong_thresh; int sysctl_udp_wmem_min; int sysctl_udp_rmem_min; u8 sysctl_fib_notify_on_flag_change; u8 sysctl_tcp_syn_linear_timeouts; u8 sysctl_igmp_llm_reports; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned int sysctl_udp_child_hash_entries; unsigned long *sysctl_local_reserved_ports; int sysctl_ip_prot_sock; struct fib_notifier_ops *notifier_ops; unsigned int fib_seq; struct fib_notifier_ops *ipmr_notifier_ops; unsigned int ipmr_seq; atomic_t rt_genid; siphash_key_t ip_id_key; u64 android_kabi_reserved1; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct dst_entry; struct net_device; struct sk_buff; struct neighbour; struct dst_ops { unsigned short family; unsigned int gc_thresh; void (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); void (*confirm_neigh)(const struct dst_entry *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; long: 64; long: 64; long: 64; }; struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; u32 multipath_hash_fields; u8 multipath_hash_policy; u8 bindv6only; u8 flowlabel_consistency; u8 auto_flowlabels; int icmpv6_time; u8 icmpv6_echo_ignore_all; u8 icmpv6_echo_ignore_multicast; u8 icmpv6_echo_ignore_anycast; unsigned long icmpv6_ratemask[4]; unsigned long *icmpv6_ratemask_ptr; u8 anycast_src_echo_reply; u8 ip_nonlocal_bind; u8 fwmark_reflect; u8 flowlabel_state_ranges; int idgen_retries; int idgen_delay; int flowlabel_reflect; int max_dst_opts_cnt; int max_hbh_opts_cnt; int max_dst_opts_len; int max_hbh_opts_len; int seg6_flowlabel; u32 ioam6_id; u64 ioam6_id_wide; u8 skip_notify_on_dev_down; u8 fib_notify_on_flag_change; u8 icmpv6_error_anycast_as_unicast; u64 android_kabi_reserved1; }; struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct ipv6_devconf; struct fib6_info; struct rt6_info; struct rt6_statistics; struct fib6_table; struct seg6_pernet_data; struct ioam6_pernet_data; struct netns_ipv6 { struct dst_ops ip6_dst_ops; struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct fqdir *fqdir; struct fib6_info *fib6_null_entry; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; atomic_t ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; unsigned char flowlabel_has_excl; bool fib6_has_custom_rules; unsigned int fib6_rules_require_fldissect; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct hlist_head *inet6_addr_lst; spinlock_t addrconf_hash_lock; struct delayed_work addr_chk_work; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; struct seg6_pernet_data *seg6_data; struct fib_notifier_ops *notifier_ops; struct fib_notifier_ops *ip6mr_notifier_ops; unsigned int ipmr_seq; struct { struct hlist_head head; spinlock_t lock; u32 seq; } ip6addrlbl_table; struct ioam6_pernet_data *ioam6_data; u64 android_kabi_reserved1; long: 64; long: 64; long: 64; }; struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; }; struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct fqdir *fqdir; }; struct nf_logger; struct nf_hook_entries; struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_logger __attribute__((btf_type_tag("rcu"))) *nf_loggers[11]; struct ctl_table_header *nf_log_dir_header; struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) *hooks_ipv4[5]; struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) *hooks_ipv6[5]; struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) *hooks_arp[3]; unsigned int defrag_ipv4_users; unsigned int defrag_ipv6_users; u64 android_kabi_reserved1; }; struct nf_generic_net { unsigned int timeout; }; struct nf_tcp_net { unsigned int timeouts[14]; u8 tcp_loose; u8 tcp_be_liberal; u8 tcp_max_retrans; u8 tcp_ignore_invalid_rst; }; struct nf_udp_net { unsigned int timeouts[2]; }; struct nf_icmp_net { unsigned int timeout; }; struct nf_dccp_net { u8 dccp_loose; unsigned int dccp_timeout[10]; }; struct nf_sctp_net { unsigned int timeouts[10]; }; struct nf_gre_net { struct list_head keymap_list; unsigned int timeouts[2]; }; struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct nf_dccp_net dccp; struct nf_sctp_net sctp; struct nf_gre_net gre; }; struct ip_conntrack_stat; struct nf_ct_event_notifier; struct netns_ct { bool ecache_dwork_pending; u8 sysctl_log_invalid; u8 sysctl_events; u8 sysctl_acct; u8 sysctl_tstamp; u8 sysctl_checksum; struct ip_conntrack_stat __attribute__((btf_type_tag("percpu"))) *stat; struct nf_ct_event_notifier __attribute__((btf_type_tag("rcu"))) *nf_conntrack_event_cb; struct nf_ip_net nf_ct_proto; }; struct sk_buff_list { struct sk_buff *next; struct sk_buff *prev; }; struct sk_buff_head { union { struct { struct sk_buff *next; struct sk_buff *prev; }; struct sk_buff_list list; }; __u32 qlen; spinlock_t lock; }; struct netns_bpf { struct bpf_prog_array __attribute__((btf_type_tag("rcu"))) *run_array[2]; struct bpf_prog *progs[2]; struct list_head links[2]; }; struct xfrm_policy_hash { struct hlist_head __attribute__((btf_type_tag("rcu"))) *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; }; struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; }; struct netns_xfrm { struct list_head state_all; struct hlist_head __attribute__((btf_type_tag("rcu"))) *state_bydst; struct hlist_head __attribute__((btf_type_tag("rcu"))) *state_bysrc; struct hlist_head __attribute__((btf_type_tag("rcu"))) *state_byspi; struct hlist_head __attribute__((btf_type_tag("rcu"))) *state_byseq; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; unsigned int idx_generator; struct hlist_head policy_inexact[3]; struct xfrm_policy_hash policy_bydst[3]; unsigned int policy_count[6]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct list_head inexact_bins; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; u8 policy_default[3]; struct ctl_table_header *sysctl_hdr; long: 64; long: 64; long: 64; long: 64; long: 64; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; seqcount_spinlock_t xfrm_state_hash_generation; seqcount_spinlock_t xfrm_policy_hash_generation; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; u64 android_kabi_reserved1; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct can_dev_rcv_lists; struct can_pkg_stats; struct can_rcv_lists_stats; struct netns_can { struct proc_dir_entry *proc_dir; struct proc_dir_entry *pde_stats; struct proc_dir_entry *pde_reset_stats; struct proc_dir_entry *pde_rcvlist_all; struct proc_dir_entry *pde_rcvlist_fil; struct proc_dir_entry *pde_rcvlist_inv; struct proc_dir_entry *pde_rcvlist_sff; struct proc_dir_entry *pde_rcvlist_eff; struct proc_dir_entry *pde_rcvlist_err; struct proc_dir_entry *bcmproc_dir; struct can_dev_rcv_lists *rx_alldev_list; spinlock_t rcvlists_lock; struct timer_list stattimer; struct can_pkg_stats *pkg_stats; struct can_rcv_lists_stats *rcv_lists_stats; struct hlist_head cgw_list; u64 android_kabi_reserved1; }; struct netns_xdp { struct mutex lock; struct hlist_head list; }; struct uevent_sock; struct net_generic; struct net { refcount_t passive; spinlock_t rules_mod_lock; atomic_t dev_unreg_count; unsigned int dev_base_seq; u32 ifindex; spinlock_t nsid_lock; atomic_t fnhe_genid; struct list_head list; struct list_head exit_list; struct llist_node cleanup_list; struct key_tag *key_domain; struct user_namespace *user_ns; struct ucounts *ucounts; struct idr netns_ids; struct ns_common ns; struct ref_tracker_dir refcnt_tracker; struct ref_tracker_dir notrefcnt_tracker; struct list_head dev_base_head; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct uevent_sock *uevent_sock; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; struct xarray dev_by_index; struct raw_notifier_head netdev_chain; u32 hash_mix; struct net_device *loopback_dev; struct list_head rules_ops; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_nexthop nexthop; long: 64; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_nf nf; struct netns_ct ct; struct sk_buff_head wext_nlevents; struct net_generic __attribute__((btf_type_tag("rcu"))) *gen; struct netns_bpf bpf; long: 64; long: 64; long: 64; struct netns_xfrm xfrm; u64 net_cookie; struct netns_can can; struct netns_xdp xdp; struct sock *diag_nlsk; }; typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); struct ctl_table_poll; struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; enum { SYSCTL_TABLE_TYPE_DEFAULT = 0, SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY = 1, } type; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; }; struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; }; struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *); void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); }; struct ctl_node { struct rb_node node; struct ctl_table_header *header; }; typedef int (*notifier_fn_t)(struct notifier_block *, unsigned long, void *); struct notifier_block { notifier_fn_t notifier_call; struct notifier_block __attribute__((btf_type_tag("rcu"))) *next; int priority; }; struct prot_inuse { int all; int val[64]; }; struct u64_stats_sync {}; struct ipstats_mib { u64 mibs[38]; struct u64_stats_sync syncp; }; struct tcp_mib { unsigned long mibs[16]; }; struct linux_mib { unsigned long mibs[127]; }; struct udp_mib { unsigned long mibs[10]; }; struct linux_xfrm_mib { unsigned long mibs[29]; }; struct icmp_mib { unsigned long mibs[30]; }; struct icmpmsg_mib { atomic_long_t mibs[512]; }; struct icmpv6_mib { unsigned long mibs[7]; }; struct icmpv6msg_mib { atomic_long_t mibs[512]; }; struct ip_ra_chain { struct ip_ra_chain __attribute__((btf_type_tag("rcu"))) *next; struct sock *sk; union { void (*destructor)(struct sock *); struct sock *saved_sk; }; struct callback_head rcu; }; struct fib_table { struct hlist_node tb_hlist; u32 tb_id; int tb_num_default; struct callback_head rcu; unsigned long *tb_data; unsigned long __data[0]; }; typedef u32 (*rht_hashfn_t)(const void *, u32, u32); typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); struct rhashtable_compare_arg; typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); struct rhashtable_params { u16 nelem_hint; u16 key_len; u16 key_offset; u16 head_offset; unsigned int max_size; u16 min_size; bool automatic_shrinking; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; rht_obj_cmpfn_t obj_cmpfn; }; struct bucket_table; struct rhashtable { struct bucket_table __attribute__((btf_type_tag("rcu"))) *tbl; unsigned int key_len; unsigned int max_elems; struct rhashtable_params p; bool rhlist; struct work_struct run_work; struct mutex mutex; spinlock_t lock; atomic_t nelems; }; struct inet_frags; struct fqdir { long high_thresh; long low_thresh; int timeout; int max_dist; struct inet_frags *f; struct net *net; bool dead; long: 64; long: 64; struct rhashtable rhashtable; long: 64; long: 64; long: 64; atomic_long_t mem; struct work_struct destroy_work; struct llist_node free_list; }; struct inet_frag_queue; struct inet_frags { unsigned int qsize; void (*constructor)(struct inet_frag_queue *, const void *); void (*destructor)(struct inet_frag_queue *); void (*frag_expire)(struct timer_list *); struct kmem_cache *frags_cachep; const char *frags_cache_name; struct rhashtable_params rhash_params; refcount_t refcnt; struct completion completion; }; typedef __u32 __be32; typedef __u16 __be16; struct frag_v4_compare_key { __be32 saddr; __be32 daddr; u32 user; u32 vif; __be16 id; u16 protocol; }; struct in6_addr { union { __u8 u6_addr8[16]; __be16 u6_addr16[8]; __be32 u6_addr32[4]; } in6_u; }; struct frag_v6_compare_key { struct in6_addr saddr; struct in6_addr daddr; u32 user; __be32 id; u32 iif; }; struct inet_frag_queue { struct rhash_head node; union { struct frag_v4_compare_key v4; struct frag_v6_compare_key v6; } key; struct timer_list timer; spinlock_t lock; refcount_t refcnt; struct rb_root rb_fragments; struct sk_buff *fragments_tail; struct sk_buff *last_run_head; ktime_t stamp; int len; int meat; u8 mono_delivery_time; __u8 flags; u16 max_size; struct fqdir *fqdir; struct callback_head rcu; }; typedef __u32 __wsum; typedef unsigned int sk_buff_data_t; struct skb_ext; struct sk_buff { union { struct { struct sk_buff *next; struct sk_buff *prev; union { struct net_device *dev; unsigned long dev_scratch; }; }; struct rb_node rbnode; struct list_head list; struct llist_node ll_node; }; struct sock *sk; union { ktime_t tstamp; u64 skb_mstamp_ns; }; char cb[48]; union { struct { unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); }; struct list_head tcp_tsorted_anchor; unsigned long _sk_redir; }; unsigned long _nfct; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; __u8 __cloned_offset[0]; __u8 cloned: 1; __u8 nohdr: 1; __u8 fclone: 2; __u8 peeked: 1; __u8 head_frag: 1; __u8 pfmemalloc: 1; __u8 pp_recycle: 1; __u8 active_extensions; union { struct { __u8 __pkt_type_offset[0]; __u8 pkt_type: 3; __u8 ignore_df: 1; __u8 dst_pending_confirm: 1; __u8 ip_summed: 2; __u8 ooo_okay: 1; __u8 __mono_tc_offset[0]; __u8 mono_delivery_time: 1; __u8 tc_at_ingress: 1; __u8 tc_skip_classify: 1; __u8 remcsum_offload: 1; __u8 csum_complete_sw: 1; __u8 csum_level: 2; __u8 inner_protocol_type: 1; __u8 l4_hash: 1; __u8 sw_hash: 1; __u8 wifi_acked_valid: 1; __u8 wifi_acked: 1; __u8 no_fcs: 1; __u8 encapsulation: 1; __u8 encap_hdr_csum: 1; __u8 csum_valid: 1; __u8 ndisc_nodetype: 2; __u8 nf_trace: 1; __u8 redirected: 1; __u8 from_ingress: 1; __u8 nf_skip_egress: 1; __u8 slow_gro: 1; __u16 tc_index; u16 alloc_cpu; union { __wsum csum; struct { __u16 csum_start; __u16 csum_offset; }; }; __u32 priority; int skb_iif; __u32 hash; union { u32 vlan_all; struct { __be16 vlan_proto; __u16 vlan_tci; }; }; union { unsigned int napi_id; unsigned int sender_cpu; }; __u32 secmark; union { __u32 mark; __u32 reserved_tailroom; }; union { __be16 inner_protocol; __u8 inner_ipproto; }; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct { __u8 __pkt_type_offset[0]; __u8 pkt_type: 3; __u8 ignore_df: 1; __u8 dst_pending_confirm: 1; __u8 ip_summed: 2; __u8 ooo_okay: 1; __u8 __mono_tc_offset[0]; __u8 mono_delivery_time: 1; __u8 tc_at_ingress: 1; __u8 tc_skip_classify: 1; __u8 remcsum_offload: 1; __u8 csum_complete_sw: 1; __u8 csum_level: 2; __u8 inner_protocol_type: 1; __u8 l4_hash: 1; __u8 sw_hash: 1; __u8 wifi_acked_valid: 1; __u8 wifi_acked: 1; __u8 no_fcs: 1; __u8 encapsulation: 1; __u8 encap_hdr_csum: 1; __u8 csum_valid: 1; __u8 ndisc_nodetype: 2; __u8 nf_trace: 1; __u8 redirected: 1; __u8 from_ingress: 1; __u8 nf_skip_egress: 1; __u8 slow_gro: 1; __u16 tc_index; u16 alloc_cpu; union { __wsum csum; struct { __u16 csum_start; __u16 csum_offset; }; }; __u32 priority; int skb_iif; __u32 hash; union { u32 vlan_all; struct { __be16 vlan_proto; __u16 vlan_tci; }; }; union { unsigned int napi_id; unsigned int sender_cpu; }; __u32 secmark; union { __u32 mark; __u32 reserved_tailroom; }; union { __be16 inner_protocol; __u8 inner_ipproto; }; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; u64 android_kabi_reserved1; u64 android_kabi_reserved2; } headers; }; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; refcount_t users; struct skb_ext *extensions; }; struct skb_ext { refcount_t refcnt; u8 offset[1]; u8 chunks; long: 0; char data[0]; }; struct rhashtable_compare_arg { struct rhashtable *ht; const void *key; }; struct lockdep_map {}; struct rhash_lock_head; struct bucket_table { unsigned int size; unsigned int nest; u32 hash_rnd; struct list_head walkers; struct callback_head rcu; struct bucket_table __attribute__((btf_type_tag("rcu"))) *future_tbl; struct lockdep_map dep_map; long: 64; struct rhash_lock_head __attribute__((btf_type_tag("rcu"))) *buckets[0]; }; enum tcp_ca_event { CA_EVENT_TX_START = 0, CA_EVENT_CWND_RESTART = 1, CA_EVENT_COMPLETE_CWR = 2, CA_EVENT_LOSS = 3, CA_EVENT_ECN_NO_CE = 4, CA_EVENT_ECN_IS_CE = 5, }; struct ack_sample; struct rate_sample; union tcp_cc_info; struct tcp_congestion_ops { u32 (*ssthresh)(struct sock *); void (*cong_avoid)(struct sock *, u32, u32); void (*set_state)(struct sock *, u8); void (*cwnd_event)(struct sock *, enum tcp_ca_event); void (*in_ack_event)(struct sock *, u32); void (*pkts_acked)(struct sock *, const struct ack_sample *); u32 (*min_tso_segs)(struct sock *); void (*cong_control)(struct sock *, const struct rate_sample *); u32 (*undo_cwnd)(struct sock *); u32 (*sndbuf_expand)(struct sock *); size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); char name[16]; struct module *owner; struct list_head list; u32 key; u32 flags; void (*init)(struct sock *); void (*release)(struct sock *); long: 64; long: 64; long: 64; long: 64; long: 64; }; struct tcp_fastopen_context { siphash_key_t key[2]; int num; struct callback_head rcu; }; typedef struct { atomic_t refcnt; } rcuref_t; typedef struct {} netdevice_tracker; struct xfrm_state; struct uncached_list; struct lwtunnel_state; struct dst_entry { struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; unsigned long expires; struct xfrm_state *xfrm; int (*input)(struct sk_buff *); int (*output)(struct net *, struct sock *, struct sk_buff *); unsigned short flags; short obsolete; unsigned short header_len; unsigned short trailer_len; rcuref_t __rcuref; int __use; unsigned long lastuse; struct callback_head callback_head; short error; short __pad; __u32 tclassid; netdevice_tracker dev_tracker; struct list_head rt_uncached; struct uncached_list *rt_uncached_list; struct lwtunnel_state *lwtstate; }; enum nf_log_type { NF_LOG_TYPE_LOG = 0, NF_LOG_TYPE_ULOG = 1, NF_LOG_TYPE_MAX = 2, }; typedef u8 u_int8_t; struct nf_loginfo; typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *); struct nf_logger { char *name; enum nf_log_type type; nf_logfn *logfn; struct module *me; }; struct nf_hook_state; typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *); struct nf_hook_entry { nf_hookfn *hook; void *priv; }; struct nf_hook_entries { u16 num_hook_entries; struct nf_hook_entry hooks[0]; }; struct ip_conntrack_stat { unsigned int found; unsigned int invalid; unsigned int insert; unsigned int insert_failed; unsigned int clash_resolve; unsigned int drop; unsigned int early_drop; unsigned int error; unsigned int expect_new; unsigned int expect_create; unsigned int expect_delete; unsigned int search_restart; unsigned int chaintoolong; }; struct nf_ct_event; struct nf_exp_event; struct nf_ct_event_notifier { int (*ct_event)(unsigned int, const struct nf_ct_event *); int (*exp_event)(unsigned int, const struct nf_exp_event *); }; struct net_generic { union { struct { unsigned int len; struct callback_head rcu; } s; struct { struct {} __empty_ptr; void *ptr[0]; }; }; }; struct cgroup_namespace { struct ns_common ns; struct user_namespace *user_ns; struct ucounts *ucounts; struct css_set *root_cset; }; struct cgroup_subsys_state; struct cgroup; struct css_set { struct cgroup_subsys_state *subsys[7]; refcount_t refcount; struct css_set *dom_cset; struct cgroup *dfl_cgrp; int nr_tasks; struct list_head tasks; struct list_head mg_tasks; struct list_head dying_tasks; struct list_head task_iters; struct list_head e_cset_node[7]; struct list_head threaded_csets; struct list_head threaded_csets_node; struct hlist_node hlist; struct list_head cgrp_links; struct list_head mg_src_preload_node; struct list_head mg_dst_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; bool dead; struct callback_head callback_head; }; struct rcu_work { struct work_struct work; struct callback_head rcu; struct workqueue_struct *wq; }; struct cgroup_subsys; struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct list_head sibling; struct list_head children; struct list_head rstat_css_node; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct work_struct destroy_work; struct rcu_work destroy_rwork; struct cgroup_subsys_state *parent; }; struct cgroup_file { struct kernfs_node *kn; unsigned long notified_at; struct timer_list notify_timer; }; struct cacheline_padding { char x[0]; }; struct task_cputime { u64 stime; u64 utime; unsigned long long sum_exec_runtime; }; struct cgroup_base_stat { struct task_cputime cputime; }; struct cgroup_bpf { struct bpf_prog_array __attribute__((btf_type_tag("rcu"))) *effective[33]; struct hlist_head progs[33]; u8 flags[33]; struct list_head storages; struct bpf_prog_array *inactive; struct percpu_ref refcnt; struct work_struct release_work; }; struct cgroup_freezer_state { bool freeze; int e_freeze; int nr_frozen_descendants; int nr_frozen_tasks; }; struct cgroup_root; struct cgroup_rstat_cpu; struct psi_group; struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int level; int max_depth; int nr_descendants; int nr_dying_descendants; int max_descendants; int nr_populated_csets; int nr_populated_domain_children; int nr_populated_threaded_children; int nr_threaded_children; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; struct cgroup_file psi_files[4]; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state __attribute__((btf_type_tag("rcu"))) *subsys[7]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[7]; struct cgroup *dom_cgrp; struct cgroup *old_dom_cgrp; struct cgroup_rstat_cpu __attribute__((btf_type_tag("percpu"))) *rstat_cpu; struct list_head rstat_css_list; struct cacheline_padding _pad_; struct cgroup *rstat_flush_next; struct cgroup_base_stat last_bstat; struct cgroup_base_stat bstat; struct prev_cputime prev_cputime; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; struct psi_group *psi; struct cgroup_bpf bpf; atomic_t congestion_count; struct cgroup_freezer_state freezer; struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) *bpf_cgrp_storage; u64 android_backport_reserved1; struct cgroup *ancestors[0]; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct kernfs_root; struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; unsigned long rev; }; struct kernfs_elem_symlink { struct kernfs_node *target_kn; }; struct kernfs_ops; struct kernfs_open_node; struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node __attribute__((btf_type_tag("rcu"))) *open; loff_t size; struct kernfs_node *notify_next; }; struct kernfs_iattrs; struct kernfs_node { atomic_t count; atomic_t active; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; }; void *priv; u64 id; unsigned short flags; umode_t mode; struct kernfs_iattrs *iattr; struct callback_head rcu; u64 android_kabi_reserved1; }; typedef unsigned int __poll_t; struct kernfs_open_file; struct poll_table_struct; struct kernfs_ops { int (*open)(struct kernfs_open_file *); void (*release)(struct kernfs_open_file *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct kernfs_open_file { struct kernfs_node *kn; struct file *file; struct seq_file *seq_file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped: 1; bool released: 1; const struct vm_operations_struct *vm_ops; u64 android_kabi_reserved1; }; struct seq_operations; struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; }; struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); }; typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); struct poll_table_struct { poll_queue_proc _qproc; __poll_t _key; }; struct kernfs_open_node { struct callback_head callback_head; atomic_t event; wait_queue_head_t poll; struct list_head files; unsigned int nr_mmapped; unsigned int nr_to_release; }; struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct cgroup cgrp; struct cgroup *cgrp_ancestor_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; char release_agent_path[4096]; char name[64]; long: 0; u8 android_backport_reserved1[28]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct cgroup_rstat_cpu { struct u64_stats_sync bsync; struct cgroup_base_stat bstat; struct cgroup_base_stat last_bstat; struct cgroup_base_stat subtree_bstat; struct cgroup_base_stat last_subtree_bstat; struct cgroup *updated_children; struct cgroup *updated_next; }; struct psi_group_cpu; struct psi_group { struct psi_group *parent; bool enabled; struct mutex avgs_lock; struct psi_group_cpu __attribute__((btf_type_tag("percpu"))) *pcpu; u64 avg_total[7]; u64 avg_last_update; u64 avg_next_update; struct delayed_work avgs_work; struct list_head avg_triggers; u32 avg_nr_triggers[7]; u64 total[14]; unsigned long avg[21]; struct task_struct __attribute__((btf_type_tag("rcu"))) *rtpoll_task; struct timer_list rtpoll_timer; wait_queue_head_t rtpoll_wait; atomic_t rtpoll_wakeup; atomic_t rtpoll_scheduled; struct mutex rtpoll_trigger_lock; struct list_head rtpoll_triggers; u32 rtpoll_nr_triggers[7]; u32 rtpoll_states; u64 rtpoll_min_period; u64 rtpoll_total[7]; u64 rtpoll_next_update; u64 rtpoll_until; }; struct psi_group_cpu { seqcount_t seq; unsigned int tasks[4]; u32 state_mask; u32 times[8]; u64 state_start; u32 times_prev[16]; }; struct bpf_local_storage_data; struct bpf_local_storage_map; struct bpf_local_storage { struct bpf_local_storage_data __attribute__((btf_type_tag("rcu"))) *cache[16]; struct bpf_local_storage_map __attribute__((btf_type_tag("rcu"))) *smap; struct hlist_head list; void *owner; struct callback_head rcu; raw_spinlock_t lock; }; struct cgroup_taskset; struct cftype; struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); void (*css_rstat_flush)(struct cgroup_subsys_state *, int); int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); int (*css_local_stat_show)(struct seq_file *, struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *, struct css_set *); void (*cancel_fork)(struct task_struct *, struct css_set *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*release)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init: 1; bool implicit_on_dfl: 1; bool threaded: 1; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; }; struct cftype { char name[64]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; int (*open)(struct kernfs_open_file *); void (*release)(struct kernfs_open_file *); u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); }; struct group_info { atomic_t usage; int ngroups; kgid_t gid[0]; }; struct kioctx; struct kioctx_table { struct callback_head rcu; unsigned int nr; struct kioctx __attribute__((btf_type_tag("rcu"))) *table[0]; }; struct page_counter { atomic_long_t usage; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct cacheline_padding _pad1_; unsigned long emin; atomic_long_t min_usage; atomic_long_t children_min_usage; unsigned long elow; atomic_long_t low_usage; atomic_long_t children_low_usage; unsigned long watermark; unsigned long failcnt; struct cacheline_padding _pad2_; unsigned long min; unsigned long low; unsigned long high; unsigned long max; struct page_counter *parent; long: 64; long: 64; long: 64; }; struct mem_cgroup_id { int id; refcount_t ref; }; struct vmpressure { unsigned long scanned; unsigned long reclaimed; unsigned long tree_scanned; unsigned long tree_reclaimed; spinlock_t sr_lock; struct list_head events; struct mutex events_lock; struct work_struct work; }; struct mem_cgroup_threshold_ary; struct mem_cgroup_thresholds { struct mem_cgroup_threshold_ary *primary; struct mem_cgroup_threshold_ary *spare; }; struct fprop_global { struct percpu_counter events; unsigned int period; seqcount_t sequence; }; struct wb_domain { spinlock_t lock; struct fprop_global completions; struct timer_list period_timer; unsigned long period_time; unsigned long dirty_limit_tstamp; unsigned long dirty_limit; }; struct wb_completion { atomic_t cnt; wait_queue_head_t *waitq; }; struct memcg_cgwb_frn { u64 bdi_id; int memcg_id; u64 at; struct wb_completion done; }; struct deferred_split { spinlock_t split_queue_lock; struct list_head split_queue; unsigned long split_queue_len; }; struct lru_gen_mm_list { struct list_head fifo; spinlock_t lock; }; struct memcg_vmstats; struct obj_cgroup; struct memcg_vmstats_percpu; struct mem_cgroup_per_node; struct mem_cgroup { struct cgroup_subsys_state css; struct mem_cgroup_id id; long: 64; long: 64; struct page_counter memory; union { struct page_counter swap; struct page_counter memsw; }; struct page_counter kmem; struct page_counter tcpmem; struct work_struct high_work; unsigned long soft_limit; struct vmpressure vmpressure; bool oom_group; bool oom_lock; int under_oom; int swappiness; int oom_kill_disable; struct cgroup_file events_file; struct cgroup_file events_local_file; struct cgroup_file swap_events_file; struct mutex thresholds_lock; struct mem_cgroup_thresholds thresholds; struct mem_cgroup_thresholds memsw_thresholds; struct list_head oom_notify; unsigned long move_charge_at_immigrate; spinlock_t move_lock; unsigned long move_lock_flags; long: 64; long: 64; struct cacheline_padding _pad1_; struct memcg_vmstats *vmstats; atomic_long_t memory_events[9]; atomic_long_t memory_events_local[9]; unsigned long socket_pressure; bool tcpmem_active; int tcpmem_pressure; int kmemcg_id; struct obj_cgroup __attribute__((btf_type_tag("rcu"))) *objcg; struct list_head objcg_list; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct cacheline_padding _pad2_; atomic_t moving_account; struct task_struct *move_lock_task; struct memcg_vmstats_percpu __attribute__((btf_type_tag("percpu"))) *vmstats_percpu; struct list_head cgwb_list; struct wb_domain cgwb_domain; struct memcg_cgwb_frn cgwb_frn[4]; struct list_head event_list; spinlock_t event_list_lock; struct deferred_split deferred_split_queue; struct lru_gen_mm_list mm_list; u64 android_backport_reserved1; u64 android_backport_reserved2; u64 android_oem_data1[2]; struct mem_cgroup_per_node *nodeinfo[0]; }; struct eventfd_ctx; struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; }; struct mem_cgroup_threshold_ary { int current_threshold; unsigned int size; struct mem_cgroup_threshold entries[0]; }; struct obj_cgroup { struct percpu_ref refcnt; struct mem_cgroup *memcg; atomic_t nr_charged_bytes; union { struct list_head list; struct callback_head rcu; }; }; struct memcg_vmstats_percpu { unsigned int stats_updates; struct memcg_vmstats_percpu *parent; struct memcg_vmstats *vmstats; long state[49]; unsigned long events[17]; long state_prev[49]; unsigned long events_prev[17]; unsigned long nr_page_events; unsigned long targets[2]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; }; struct lru_gen_folio { unsigned long max_seq; unsigned long min_seq[2]; unsigned long timestamps[4]; struct list_head folios[40]; long nr_pages[40]; unsigned long avg_refaulted[8]; unsigned long avg_total[8]; unsigned long protected[6]; atomic_long_t evicted[8]; atomic_long_t refaulted[8]; bool enabled; u8 gen; u8 seg; struct hlist_nulls_node list; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct lru_gen_mm_state { unsigned long seq; struct list_head *head; struct list_head *tail; unsigned long *filters[2]; unsigned long stats[6]; u64 android_kabi_reserved1; }; struct pglist_data; struct lruvec { struct list_head lists[5]; spinlock_t lru_lock; unsigned long anon_cost; unsigned long file_cost; atomic_long_t nonresident_age; unsigned long refaults[2]; unsigned long flags; struct lru_gen_folio lrugen; struct lru_gen_mm_state mm_state; struct pglist_data *pgdat; }; struct lruvec_stats { long state[42]; long state_local[42]; long state_pending[42]; }; struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; unsigned int generation; }; struct lruvec_stats_percpu; struct shrinker_info; struct mem_cgroup_per_node { struct lruvec lruvec; struct lruvec_stats_percpu __attribute__((btf_type_tag("percpu"))) *lruvec_stats_percpu; struct lruvec_stats lruvec_stats; unsigned long lru_zone_size[25]; struct mem_cgroup_reclaim_iter iter; struct shrinker_info __attribute__((btf_type_tag("rcu"))) *shrinker_info; struct rb_node tree_node; unsigned long usage_in_excess; bool on_tree; struct mem_cgroup *memcg; u64 android_backport_reserved1; }; struct free_area { struct list_head free_list[6]; unsigned long nr_free; }; struct per_cpu_pages; struct per_cpu_zonestat; struct zone { unsigned long _watermark[4]; unsigned long watermark_boost; unsigned long nr_reserved_highatomic; long lowmem_reserve[5]; struct pglist_data *zone_pgdat; struct per_cpu_pages __attribute__((btf_type_tag("percpu"))) *per_cpu_pageset; struct per_cpu_zonestat __attribute__((btf_type_tag("percpu"))) *per_cpu_zonestats; int pageset_high; int pageset_batch; unsigned long zone_start_pfn; atomic_long_t managed_pages; unsigned long spanned_pages; unsigned long present_pages; unsigned long present_early_pages; unsigned long cma_pages; const char *name; unsigned long nr_isolate_pageblock; seqlock_t span_seqlock; int order; int initialized; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct cacheline_padding _pad1_; struct free_area free_area[11]; unsigned long flags; spinlock_t lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct cacheline_padding _pad2_; unsigned long percpu_drift_mark; unsigned long compact_cached_free_pfn; unsigned long compact_cached_migrate_pfn[2]; unsigned long compact_init_migrate_pfn; unsigned long compact_init_free_pfn; unsigned int compact_considered; unsigned int compact_defer_shift; int compact_order_failed; bool compact_blockskip_flush; bool contiguous; long: 0; struct cacheline_padding _pad3_; atomic_long_t vm_stat[11]; atomic_long_t vm_numa_event[0]; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct zoneref { struct zone *zone; int zone_idx; }; struct zonelist { struct zoneref _zonerefs[6]; }; enum zone_type { ZONE_DMA32 = 0, ZONE_NORMAL = 1, ZONE_MOVABLE = 2, ZONE_NOSPLIT = 3, ZONE_NOMERGE = 4, __MAX_NR_ZONES = 5, LAST_PHYS_ZONE = 1, LAST_VIRT_ZONE = 4, }; struct lru_gen_mm_walk { struct lruvec *lruvec; unsigned long max_seq; unsigned long next_addr; int nr_pages[40]; int mm_stats[6]; int batched; bool can_swap; bool force_scan; u64 android_kabi_reserved1; }; struct hlist_nulls_head { struct hlist_nulls_node *first; }; struct lru_gen_memcg { unsigned long seq; unsigned long nr_memcgs[3]; struct hlist_nulls_head fifo[24]; spinlock_t lock; }; struct per_cpu_nodestat; struct pglist_data { struct zone node_zones[5]; struct zonelist node_zonelists[1]; int nr_zones; spinlock_t node_size_lock; unsigned long node_start_pfn; unsigned long node_present_pages; unsigned long node_spanned_pages; int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; wait_queue_head_t reclaim_wait[4]; atomic_t nr_writeback_throttled; unsigned long nr_reclaim_start; struct mutex kswapd_lock; struct task_struct *kswapd; int kswapd_order; enum zone_type kswapd_highest_zoneidx; int kswapd_failures; u64 android_oem_data1; int kcompactd_max_order; enum zone_type kcompactd_highest_zoneidx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; bool proactive_compact_trigger; unsigned long totalreserve_pages; long: 64; long: 64; struct cacheline_padding _pad1_; struct deferred_split deferred_split_queue; struct lruvec __lruvec; unsigned long flags; struct lru_gen_mm_walk mm_walk; struct lru_gen_memcg memcg_lru; long: 64; long: 64; struct cacheline_padding _pad2_; struct per_cpu_nodestat __attribute__((btf_type_tag("percpu"))) *per_cpu_nodestats; atomic_long_t vm_stat[42]; u64 android_kabi_reserved1; long: 64; long: 64; long: 64; long: 64; }; struct per_cpu_pages { spinlock_t lock; int count; int high; int batch; short free_factor; struct list_head lists[17]; long: 64; long: 64; long: 64; }; typedef signed char __s8; typedef __s8 s8; struct per_cpu_zonestat { s8 vm_stat_diff[11]; s8 stat_threshold; }; struct per_cpu_nodestat { s8 stat_threshold; s8 vm_node_stat_diff[42]; }; struct lruvec_stats_percpu { long state[42]; long state_prev[42]; }; struct shrinker_info { struct callback_head rcu; atomic_long_t *nr_deferred; unsigned long *map; int map_nr_max; }; typedef long long __kernel_time64_t; struct __kernel_timespec { __kernel_time64_t tv_sec; long long tv_nsec; }; typedef s32 old_time32_t; struct old_timespec32 { old_time32_t tv_sec; s32 tv_nsec; }; struct pollfd { int fd; short events; short revents; }; struct cpu_itimer { u64 expires; u64 incr; }; struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; }; struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; }; struct core_state; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { refcount_t sigcnt; atomic_t live; int nr_threads; int quick_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; struct hlist_head multiprocess; int group_exit_code; int notify_count; struct task_struct *group_exec_task; int group_stop_count; unsigned int flags; struct core_state *core_state; unsigned int is_child_subreaper: 1; unsigned int has_child_subreaper: 1; unsigned int next_posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; ktime_t it_real_incr; struct cpu_itimer it[2]; struct thread_group_cputimer cputimer; struct posix_cputimers posix_cputimers; struct pid *pids[4]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; seqlock_t stats_lock; u64 utime; u64 stime; u64 cutime; u64 cstime; u64 gtime; u64 cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16]; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mm_struct *oom_mm; struct mutex cred_guard_mutex; struct rw_semaphore exec_update_lock; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; u64 android_oem_data1; }; struct core_thread { struct task_struct *task; struct core_thread *next; }; struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; }; struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32]; __u8 ac_sched; __u8 ac_pad[3]; long: 0; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; __u64 thrashing_count; __u64 thrashing_delay_total; __u64 ac_btime64; __u64 compact_count; __u64 compact_delay_total; __u32 ac_tgid; __u64 ac_tgetime; __u64 ac_exe_dev; __u64 ac_exe_inode; __u64 wpcopy_count; __u64 wpcopy_delay_total; __u64 irq_count; __u64 irq_delay_total; }; typedef void __signalfn_t(int); typedef __signalfn_t __attribute__((btf_type_tag("user"))) *__sighandler_t; typedef void __restorefn_t(); typedef __restorefn_t __attribute__((btf_type_tag("user"))) *__sigrestore_t; struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; }; struct k_sigaction { struct sigaction sa; }; struct sighand_struct { spinlock_t siglock; refcount_t count; wait_queue_head_t signalfd_wqh; struct k_sigaction action[64]; }; struct bio; struct bio_list { struct bio *head; struct bio *tail; }; typedef __u32 blk_opf_t; typedef u8 blk_status_t; typedef u64 sector_t; struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } __attribute__((packed)); typedef unsigned int blk_qc_t; typedef void bio_end_io_t(struct bio *); struct bio_issue { u64 value; }; struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; }; struct blkcg_gq; struct bio_crypt_ctx; struct bio_set; struct bio { struct bio *bi_next; struct block_device *bi_bdev; blk_opf_t bi_opf; unsigned short bi_flags; unsigned short bi_ioprio; enum rw_hint bi_write_hint; blk_status_t bi_status; atomic_t __bi_remaining; struct bvec_iter bi_iter; blk_qc_t bi_cookie; bio_end_io_t *bi_end_io; void *bi_private; struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; u64 bi_iocost_cost; struct bio_crypt_ctx *bi_crypt_context; bool bi_skip_dm_default_key; union {}; unsigned short bi_vcnt; unsigned short bi_max_vecs; atomic_t __bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; u64 android_oem_data1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; struct bio_vec bi_inline_vecs[0]; }; enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING = 1, DL_DEV_DRIVER_BOUND = 2, DL_DEV_UNBINDING = 3, }; struct dev_links_info { struct list_head suppliers; struct list_head consumers; struct list_head defer_sync; enum dl_dev_state status; }; struct pm_message { int event; }; typedef struct pm_message pm_message_t; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4, }; enum rpm_status { RPM_INVALID = -1, RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3, }; struct wakeup_source; struct wake_irq; struct pm_subsys_data; struct dev_pm_qos; struct dev_pm_info { pm_message_t power_state; unsigned int can_wakeup: 1; unsigned int async_suspend: 1; bool in_dpm_list: 1; bool is_prepared: 1; bool is_suspended: 1; bool is_noirq_suspended: 1; bool is_late_suspended: 1; bool no_pm: 1; bool early_init: 1; bool direct_complete: 1; u32 driver_flags; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path: 1; bool syscore: 1; bool no_pm_callbacks: 1; unsigned int must_resume: 1; unsigned int may_skip_resume: 1; struct hrtimer suspend_timer; u64 timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned int disable_depth: 3; unsigned int idle_notification: 1; unsigned int request_pending: 1; unsigned int deferred_resume: 1; unsigned int needs_force_resume: 1; unsigned int runtime_auto: 1; bool ignore_children: 1; unsigned int no_callbacks: 1; unsigned int irq_safe: 1; unsigned int use_autosuspend: 1; unsigned int timer_autosuspends: 1; unsigned int memalloc_noio: 1; unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; enum rpm_status last_status; int runtime_error; int autosuspend_delay; u64 last_busy; u64 active_time; u64 suspended_time; u64 accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32); struct dev_pm_qos *qos; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct irq_domain; struct msi_device_data; struct dev_msi_info { struct irq_domain *domain; struct msi_device_data *data; }; struct dev_archdata {}; enum device_removable { DEVICE_REMOVABLE_NOT_SUPPORTED = 0, DEVICE_REMOVABLE_UNKNOWN = 1, DEVICE_FIXED = 2, DEVICE_REMOVABLE = 3, }; struct device_private; struct device_type; struct bus_type; struct device_driver; struct dev_pm_domain; struct em_perf_domain; struct dev_pin_info; struct dma_map_ops; struct bus_dma_region; struct device_dma_parameters; struct dma_coherent_mem; struct cma; struct io_tlb_mem; struct device_node; struct fwnode_handle; struct class; struct iommu_group; struct dev_iommu; struct device_physical_location; struct device { struct kobject kobj; struct device *parent; struct device_private *p; const char *init_name; const struct device_type *type; const struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct mutex mutex; struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct em_perf_domain *em_pd; struct dev_pin_info *pins; struct dev_msi_info msi; const struct dma_map_ops *dma_ops; u64 *dma_mask; u64 coherent_dma_mask; u64 bus_dma_limit; const struct bus_dma_region *dma_range_map; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct io_tlb_mem *dma_io_tlb_mem; struct list_head dma_io_tlb_pools; spinlock_t dma_io_tlb_lock; bool dma_uses_io_tlb; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; const struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct dev_iommu *iommu; struct device_physical_location *physical_location; enum device_removable removable; bool offline_disabled: 1; bool offline: 1; bool of_node_reused: 1; bool state_synced: 1; bool can_match: 1; bool dma_coherent: 1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; u64 android_kabi_reserved5; u64 android_kabi_reserved6; u64 android_kabi_reserved7; u64 android_kabi_reserved8; }; struct request_queue; struct disk_stats; struct blk_holder_ops; struct partition_meta_info; struct block_device { sector_t bd_start_sect; sector_t bd_nr_sectors; struct gendisk *bd_disk; struct request_queue *bd_queue; struct disk_stats __attribute__((btf_type_tag("percpu"))) *bd_stats; unsigned long bd_stamp; bool bd_read_only; u8 bd_partno; bool bd_write_holder; bool bd_has_submit_bio; dev_t bd_dev; atomic_t bd_openers; spinlock_t bd_size_lock; struct inode *bd_inode; void *bd_claiming; void *bd_holder; const struct blk_holder_ops *bd_holder_ops; struct mutex bd_holder_lock; int bd_fsfreeze_count; int bd_holders; struct kobject *bd_holder_dir; struct mutex bd_fsfreeze_mutex; struct super_block *bd_fsfreeze_sb; struct partition_meta_info *bd_meta_info; bool bd_ro_warned; struct device bd_device; }; typedef void *mempool_alloc_t(gfp_t, void *); typedef void mempool_free_t(void *, void *); struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; }; typedef struct mempool_s mempool_t; struct bio_alloc_cache; struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; struct bio_alloc_cache __attribute__((btf_type_tag("percpu"))) *cache; mempool_t bio_pool; mempool_t bvec_pool; unsigned int back_pad; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; struct hlist_node cpuhp_dead; }; typedef unsigned int blk_mode_t; struct block_device_operations; struct timer_rand_state; struct disk_events; struct badblocks; struct blk_independent_access_ranges; struct gendisk { int major; int first_minor; int minors; char disk_name[32]; unsigned short events; unsigned short event_flags; struct xarray part_tbl; struct block_device *part0; const struct block_device_operations *fops; struct request_queue *queue; void *private_data; struct bio_set bio_split; int flags; unsigned long state; struct mutex open_mutex; unsigned int open_partitions; struct backing_dev_info *bdi; struct kobject queue_kobj; struct kobject *slave_dir; struct list_head slave_bdevs; struct timer_rand_state *random; atomic_t sync_io; struct disk_events *ev; unsigned int nr_zones; unsigned int max_open_zones; unsigned int max_active_zones; unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; u64 diskseq; blk_mode_t open_mode; struct blk_independent_access_ranges *ia_ranges; u64 android_oem_data1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct blk_zone; typedef int (*report_zones_cb)(struct blk_zone *, unsigned int, void *); enum blk_unique_id { BLK_UID_T10 = 1, BLK_UID_EUI64 = 2, BLK_UID_NAA = 3, }; struct io_comp_batch; struct hd_geometry; struct pr_ops; struct block_device_operations { void (*submit_bio)(struct bio *); int (*poll_bio)(struct bio *, struct io_comp_batch *, unsigned int); int (*open)(struct gendisk *, blk_mode_t); void (*release)(struct gendisk *); int (*ioctl)(struct block_device *, blk_mode_t, unsigned int, unsigned long); int (*compat_ioctl)(struct block_device *, blk_mode_t, unsigned int, unsigned long); unsigned int (*check_events)(struct gendisk *, unsigned int); void (*unlock_native_capacity)(struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); int (*set_read_only)(struct block_device *, bool); void (*free_disk)(struct gendisk *); void (*swap_slot_free_notify)(struct block_device *, unsigned long); int (*report_zones)(struct gendisk *, sector_t, unsigned int, report_zones_cb, void *); char * (*devnode)(struct gendisk *, umode_t *); int (*get_unique_id)(struct gendisk *, u8 *, enum blk_unique_id); struct module *owner; const struct pr_ops *pr_ops; int (*alternative_gpt_sector)(struct gendisk *, sector_t *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct request; struct io_comp_batch { struct request *req_list; bool need_ts; void (*complete)(struct io_comp_batch *); }; struct blk_zone { __u64 start; __u64 len; __u64 wp; __u8 type; __u8 cond; __u8 non_seq; __u8 reset; __u8 resv[4]; __u64 capacity; __u8 reserved[24]; }; enum pr_type { PR_WRITE_EXCLUSIVE = 1, PR_EXCLUSIVE_ACCESS = 2, PR_WRITE_EXCLUSIVE_REG_ONLY = 3, PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, PR_WRITE_EXCLUSIVE_ALL_REGS = 5, PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, }; struct pr_keys; struct pr_held_reservation; struct pr_ops { int (*pr_register)(struct block_device *, u64, u64, u32); int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32); int (*pr_release)(struct block_device *, u64, enum pr_type); int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool); int (*pr_clear)(struct block_device *, u64); int (*pr_read_keys)(struct block_device *, struct pr_keys *); int (*pr_read_reservation)(struct block_device *, struct pr_held_reservation *); }; enum blk_bounce { BLK_BOUNCE_NONE = 0, BLK_BOUNCE_HIGH = 1, }; enum blk_zoned_model { BLK_ZONED_NONE = 0, BLK_ZONED_HA = 1, BLK_ZONED_HM = 2, }; struct queue_limits { enum blk_bounce bounce; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; unsigned int max_hw_sectors; unsigned int max_dev_sectors; unsigned int chunk_sectors; unsigned int max_sectors; unsigned int max_user_sectors; unsigned int max_segment_size; unsigned int physical_block_size; unsigned int logical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; unsigned int max_secure_erase_sectors; unsigned int max_write_zeroes_sectors; unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; unsigned int zone_write_granularity; unsigned short max_segments; unsigned short max_integrity_segments; unsigned short max_discard_segments; unsigned char misaligned; unsigned char discard_misaligned; unsigned char raid_partial_stripes_expensive; bool sub_page_limits; enum blk_zoned_model zoned; unsigned int dma_alignment; u64 android_oem_data1; u64 android_kabi_reserved1; }; struct elevator_queue; struct blk_queue_stats; struct rq_qos; struct blk_mq_ops; struct blk_mq_ctx; struct blk_crypto_profile; struct blk_mq_tags; struct blk_flush_queue; struct throtl_data; struct blk_mq_tag_set; struct request_queue { struct request *last_merge; struct elevator_queue *elevator; struct percpu_ref q_usage_counter; struct blk_queue_stats *stats; struct rq_qos *rq_qos; struct mutex rq_qos_mutex; const struct blk_mq_ops *mq_ops; struct blk_mq_ctx __attribute__((btf_type_tag("percpu"))) *queue_ctx; unsigned int queue_depth; struct xarray hctx_table; unsigned int nr_hw_queues; void *queuedata; unsigned long queue_flags; atomic_t pm_only; int id; spinlock_t queue_lock; struct gendisk *disk; refcount_t refs; struct kobject *mq_kobj; struct device *dev; enum rpm_status rpm_status; unsigned long nr_requests; unsigned int dma_pad_mask; struct blk_crypto_profile *crypto_profile; struct kobject *crypto_kobject; unsigned int rq_timeout; struct timer_list timeout; struct work_struct timeout_work; atomic_t nr_active_requests_shared_tags; struct blk_mq_tags *sched_shared_tags; struct list_head icq_list; unsigned long blkcg_pols[1]; struct blkcg_gq *root_blkg; struct list_head blkg_list; struct mutex blkcg_mutex; struct queue_limits limits; unsigned int required_elevator_features; int node; struct blk_flush_queue *fq; struct list_head flush_list; struct list_head requeue_list; spinlock_t requeue_lock; struct delayed_work requeue_work; struct mutex sysfs_lock; struct mutex sysfs_dir_lock; struct list_head unused_hctx_list; spinlock_t unused_hctx_lock; int mq_freeze_depth; struct throtl_data *td; struct callback_head callback_head; wait_queue_head_t mq_freeze_wq; struct mutex mq_freeze_lock; int quiesce_depth; struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct dentry *debugfs_dir; struct dentry *sched_debugfs_dir; struct dentry *rqos_debugfs_dir; struct mutex debugfs_mutex; bool mq_sysfs_init_done; u64 android_oem_data1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; enum blk_eh_timer_return { BLK_EH_DONE = 0, BLK_EH_RESET_TIMER = 1, }; struct blk_mq_hw_ctx; struct blk_mq_queue_data; struct blk_mq_ops { blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); void (*commit_rqs)(struct blk_mq_hw_ctx *); void (*queue_rqs)(struct request **); int (*get_budget)(struct request_queue *); void (*put_budget)(struct request_queue *, int); void (*set_rq_budget_token)(struct request *, int); int (*get_rq_budget_token)(struct request *); enum blk_eh_timer_return (*timeout)(struct request *); int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); void (*complete)(struct request *); int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); int (*init_request)(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int); void (*exit_request)(struct blk_mq_tag_set *, struct request *, unsigned int); void (*cleanup_rq)(struct request *); bool (*busy)(struct request_queue *); void (*map_queues)(struct blk_mq_tag_set *); void (*show_rq)(struct seq_file *, struct request *); u64 android_kabi_reserved1; }; struct blk_mq_ctxs; struct blk_mq_ctx { struct { spinlock_t lock; struct list_head rq_lists[3]; long: 64; }; unsigned int cpu; unsigned short index_hw[3]; struct blk_mq_hw_ctx *hctxs[3]; struct request_queue *queue; struct blk_mq_ctxs *ctxs; struct kobject kobj; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct dev_pm_ops; struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(const struct device *, struct kobj_uevent_env *); char * (*devnode)(const struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; }; struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); u64 android_kabi_reserved1; }; struct iommu_ops; struct bus_type { const char *name; const char *dev_name; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(const struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); void (*sync_state)(struct device *); void (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t); int (*resume)(struct device *); int (*num_vf)(struct device *); int (*dma_configure)(struct device *); void (*dma_cleanup)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; bool need_parent_lock; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2, }; struct of_device_id; struct acpi_device_id; struct driver_private; struct device_driver { const char *name; const struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); void (*sync_state)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t); int (*resume)(struct device *); const struct attribute_group **groups; const struct attribute_group **dev_groups; const struct dev_pm_ops *pm; void (*coredump)(struct device *); struct driver_private *p; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct of_device_id { char name[32]; char type[32]; char compatible[128]; const void *data; }; typedef unsigned long kernel_ulong_t; struct acpi_device_id { __u8 id[16]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; }; enum iommu_cap { IOMMU_CAP_CACHE_COHERENCY = 0, IOMMU_CAP_NOEXEC = 1, IOMMU_CAP_PRE_BOOT_PROTECTION = 2, IOMMU_CAP_ENFORCE_CACHE_COHERENCY = 3, IOMMU_CAP_DEFERRED_FLUSH = 4, }; enum iommu_dev_features { IOMMU_DEV_FEAT_SVA = 0, IOMMU_DEV_FEAT_IOPF = 1, }; typedef unsigned int ioasid_t; struct iommu_domain; struct iommu_device; struct of_phandle_args; struct iommu_fault_event; struct iommu_page_response; struct iommu_domain_ops; struct iommu_ops { bool (*capable)(struct device *, enum iommu_cap); void * (*hw_info)(struct device *, u32 *, u32 *); struct iommu_domain * (*domain_alloc)(unsigned int); struct iommu_device * (*probe_device)(struct device *); void (*release_device)(struct device *); void (*probe_finalize)(struct device *); void (*set_platform_dma_ops)(struct device *); struct iommu_group * (*device_group)(struct device *); void (*get_resv_regions)(struct device *, struct list_head *); int (*of_xlate)(struct device *, struct of_phandle_args *); bool (*is_attach_deferred)(struct device *); int (*dev_enable_feat)(struct device *, enum iommu_dev_features); int (*dev_disable_feat)(struct device *, enum iommu_dev_features); int (*page_response)(struct device *, struct iommu_fault_event *, struct iommu_page_response *); int (*def_domain_type)(struct device *); void (*remove_dev_pasid)(struct device *, ioasid_t); const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; }; struct wakeup_source { const char *name; int id; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; struct device *dev; bool active: 1; bool autosleep_enabled: 1; }; struct pm_domain_data; struct pm_subsys_data { spinlock_t lock; unsigned int refcount; unsigned int clock_op_might_sleep; struct mutex clock_mutex; struct list_head clock_list; struct pm_domain_data *domain_data; }; struct dev_pm_domain { struct dev_pm_ops ops; int (*start)(struct device *); void (*detach)(struct device *, bool); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); int (*set_performance_state)(struct device *, unsigned int); u64 android_kabi_reserved1; }; struct em_perf_table; struct em_perf_domain { struct em_perf_table __attribute__((btf_type_tag("rcu"))) *em_table; int nr_perf_states; int min_ps; int max_ps; unsigned long flags; unsigned long cpus[0]; }; struct em_perf_state { unsigned long performance; unsigned long frequency; unsigned long power; unsigned long cost; unsigned long flags; }; struct em_perf_table { struct callback_head rcu; struct kref kref; struct em_perf_state state[0]; }; typedef u64 dma_addr_t; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3, }; typedef u64 phys_addr_t; struct sg_table; struct scatterlist; struct dma_map_ops { unsigned int flags; void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, unsigned long); void (*free)(struct device *, size_t, void *, dma_addr_t, unsigned long); struct page * (*alloc_pages)(struct device *, size_t, dma_addr_t *, enum dma_data_direction, gfp_t); void (*free_pages)(struct device *, size_t, struct page *, dma_addr_t, enum dma_data_direction); struct sg_table * (*alloc_noncontiguous)(struct device *, size_t, enum dma_data_direction, gfp_t, unsigned long); void (*free_noncontiguous)(struct device *, size_t, struct sg_table *, enum dma_data_direction); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction, unsigned long); void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, unsigned long); dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, unsigned long); void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction); void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction); void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction); int (*dma_supported)(struct device *, u64); u64 (*get_required_mask)(struct device *); size_t (*max_mapping_size)(struct device *); size_t (*opt_mapping_size)(); unsigned long (*get_merge_boundary)(struct device *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct bus_dma_region { phys_addr_t cpu_start; dma_addr_t dma_start; u64 size; u64 offset; }; struct device_dma_parameters { unsigned int max_segment_size; unsigned int min_align_mask; unsigned long segment_boundary_mask; }; typedef u32 phandle; struct fwnode_operations; struct fwnode_handle { struct fwnode_handle *secondary; const struct fwnode_operations *ops; struct device *dev; struct list_head suppliers; struct list_head consumers; u8 flags; u64 android_kabi_reserved1; }; struct property; struct device_node { const char *name; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; }; enum dev_dma_attr { DEV_DMA_NOT_SUPPORTED = 0, DEV_DMA_NON_COHERENT = 1, DEV_DMA_COHERENT = 2, }; struct fwnode_reference_args; struct fwnode_endpoint; struct fwnode_operations { struct fwnode_handle * (*get)(struct fwnode_handle *); void (*put)(struct fwnode_handle *); bool (*device_is_available)(const struct fwnode_handle *); const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *); bool (*device_dma_supported)(const struct fwnode_handle *); enum dev_dma_attr (*device_get_dma_attr)(const struct fwnode_handle *); bool (*property_present)(const struct fwnode_handle *, const char *); int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t); int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t); const char * (*get_name)(const struct fwnode_handle *); const char * (*get_name_prefix)(const struct fwnode_handle *); struct fwnode_handle * (*get_parent)(const struct fwnode_handle *); struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *); struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *); int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *); struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *); struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *); struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *); int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); void * (*iomap)(struct fwnode_handle *, int); int (*irq_get)(const struct fwnode_handle *, unsigned int); int (*add_links)(struct fwnode_handle *); }; struct fwnode_reference_args { struct fwnode_handle *fwnode; unsigned int nargs; u64 args[8]; }; struct fwnode_endpoint { unsigned int port; unsigned int id; const struct fwnode_handle *local_fwnode; }; struct property { char *name; int length; void *value; struct property *next; struct bin_attribute attr; }; struct class { const char *name; const struct attribute_group **class_groups; const struct attribute_group **dev_groups; int (*dev_uevent)(const struct device *, struct kobj_uevent_env *); char * (*devnode)(const struct device *, umode_t *); void (*class_release)(const struct class *); void (*dev_release)(struct device *); int (*shutdown_pre)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(const struct device *); void (*get_ownership)(const struct device *, kuid_t *, kgid_t *); const struct dev_pm_ops *pm; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2, }; struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); }; enum device_physical_location_panel { DEVICE_PANEL_TOP = 0, DEVICE_PANEL_BOTTOM = 1, DEVICE_PANEL_LEFT = 2, DEVICE_PANEL_RIGHT = 3, DEVICE_PANEL_FRONT = 4, DEVICE_PANEL_BACK = 5, DEVICE_PANEL_UNKNOWN = 6, }; enum device_physical_location_vertical_position { DEVICE_VERT_POS_UPPER = 0, DEVICE_VERT_POS_CENTER = 1, DEVICE_VERT_POS_LOWER = 2, }; enum device_physical_location_horizontal_position { DEVICE_HORI_POS_LEFT = 0, DEVICE_HORI_POS_CENTER = 1, DEVICE_HORI_POS_RIGHT = 2, }; struct device_physical_location { enum device_physical_location_panel panel; enum device_physical_location_vertical_position vertical_position; enum device_physical_location_horizontal_position horizontal_position; bool dock; bool lid; }; struct bio_alloc_cache { struct bio *free_list; struct bio *free_list_irq; unsigned int nr; unsigned int nr_irq; }; struct fprop_local_percpu { struct percpu_counter events; unsigned int period; raw_spinlock_t lock; }; enum wb_reason { WB_REASON_BACKGROUND = 0, WB_REASON_VMSCAN = 1, WB_REASON_SYNC = 2, WB_REASON_PERIODIC = 3, WB_REASON_LAPTOP_TIMER = 4, WB_REASON_FS_FREE_SPACE = 5, WB_REASON_FORKER_THREAD = 6, WB_REASON_FOREIGN_FLUSH = 7, WB_REASON_MAX = 8, }; struct bdi_writeback { struct backing_dev_info *bdi; unsigned long state; unsigned long last_old_flush; struct list_head b_dirty; struct list_head b_io; struct list_head b_more_io; struct list_head b_dirty_time; spinlock_t list_lock; atomic_t writeback_inodes; struct percpu_counter stat[4]; unsigned long bw_time_stamp; unsigned long dirtied_stamp; unsigned long written_stamp; unsigned long write_bandwidth; unsigned long avg_write_bandwidth; unsigned long dirty_ratelimit; unsigned long balanced_dirty_ratelimit; struct fprop_local_percpu completions; int dirty_exceeded; enum wb_reason start_all_reason; spinlock_t work_lock; struct list_head work_list; struct delayed_work dwork; struct delayed_work bw_dwork; struct list_head bdi_node; struct percpu_ref refcnt; struct fprop_local_percpu memcg_completions; struct cgroup_subsys_state *memcg_css; struct cgroup_subsys_state *blkcg_css; struct list_head memcg_node; struct list_head blkcg_node; struct list_head b_attached; struct list_head offline_node; union { struct work_struct release_work; struct callback_head rcu; }; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_oem_data1[2]; }; struct backing_dev_info { u64 id; struct rb_node rb_node; struct list_head bdi_list; unsigned long ra_pages; unsigned long io_pages; struct kref refcnt; unsigned int capabilities; unsigned int min_ratio; unsigned int max_ratio; unsigned int max_prop_frac; atomic_long_t tot_write_bandwidth; unsigned long last_bdp_sleep; struct bdi_writeback wb; struct list_head wb_list; struct xarray cgwb_tree; struct mutex cgwb_release_mutex; struct rw_semaphore wb_switch_rwsem; wait_queue_head_t wb_waitq; struct device *dev; char dev_name[64]; struct device *owner; struct timer_list laptop_mode_wb_timer; struct dentry *debug_dir; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct blk_independent_access_range { struct kobject kobj; sector_t sector; sector_t nr_sectors; }; struct blk_independent_access_ranges { struct kobject kobj; bool sysfs_registered; unsigned int nr_ia_ranges; struct blk_independent_access_range ia_range[0]; }; struct disk_stats { u64 nsecs[4]; unsigned long sectors[4]; unsigned long ios[4]; unsigned long merges[4]; unsigned long io_ticks; local_t in_flight[2]; }; struct blk_holder_ops { void (*mark_dead)(struct block_device *, bool); void (*sync)(struct block_device *); }; struct partition_meta_info { char uuid[37]; u8 volname[64]; }; struct blk_plug { struct request *mq_list; struct request *cached_rq; unsigned short nr_ios; unsigned short rq_count; bool multiple_queues; bool has_elevator; struct list_head cb_list; }; struct reclaim_state { unsigned long reclaimed; struct lru_gen_mm_walk *mm_walk; }; struct io_cq; struct io_context { atomic_long_t refcount; atomic_t active_ref; unsigned short ioprio; spinlock_t lock; struct xarray icq_tree; struct io_cq __attribute__((btf_type_tag("rcu"))) *icq_hint; struct hlist_head icq_list; struct work_struct release_work; }; struct io_cq { struct request_queue *q; struct io_context *ioc; union { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; }; union { struct hlist_node ioc_node; struct callback_head __rcu_head; }; unsigned int flags; }; typedef int __kernel_timer_t; union sigval { int sival_int; void __attribute__((btf_type_tag("user"))) *sival_ptr; }; typedef union sigval sigval_t; typedef __kernel_long_t __kernel_clock_t; union __sifields { struct { __kernel_pid_t _pid; __kernel_uid32_t _uid; } _kill; struct { __kernel_timer_t _tid; int _overrun; sigval_t _sigval; int _sys_private; } _timer; struct { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } _rt; struct { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } _sigchld; struct { void __attribute__((btf_type_tag("user"))) *_addr; union { int _trapno; short _addr_lsb; struct { char _dummy_bnd[8]; void __attribute__((btf_type_tag("user"))) *_lower; void __attribute__((btf_type_tag("user"))) *_upper; } _addr_bnd; struct { char _dummy_pkey[8]; __u32 _pkey; } _addr_pkey; struct { unsigned long _data; __u32 _type; __u32 _flags; } _perf; }; } _sigfault; struct { long _band; int _fd; } _sigpoll; struct { void __attribute__((btf_type_tag("user"))) *_call_addr; int _syscall; unsigned int _arch; } _sigsys; }; struct kernel_siginfo { struct { int si_signo; int si_errno; int si_code; union __sifields _sifields; }; }; struct robust_list { struct robust_list __attribute__((btf_type_tag("user"))) *next; }; struct robust_list_head { struct robust_list list; long futex_offset; struct robust_list __attribute__((btf_type_tag("user"))) *list_op_pending; }; typedef u32 compat_uptr_t; struct compat_robust_list { compat_uptr_t next; }; typedef s32 compat_long_t; struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; }; struct perf_event_groups { struct rb_root tree; u64 index; }; struct perf_event_context { raw_spinlock_t lock; struct mutex mutex; struct list_head pmu_ctx_list; struct perf_event_groups pinned_groups; struct perf_event_groups flexible_groups; struct list_head event_list; int nr_events; int nr_user; int is_active; int nr_task_data; int nr_stat; int nr_freq; int rotate_disable; refcount_t refcount; struct task_struct *task; u64 time; u64 timestamp; u64 timeoffset; struct perf_event_context *parent_ctx; u64 parent_gen; u64 generation; int pin_count; struct callback_head callback_head; local_t nr_pending; }; struct task_delay_info { raw_spinlock_t lock; u64 blkio_start; u64 blkio_delay; u64 swapin_start; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u64 thrashing_start; u64 thrashing_delay; u64 compact_start; u64 compact_delay; u64 wpcopy_start; u64 wpcopy_delay; u64 irq_delay; u32 freepages_count; u32 thrashing_count; u32 compact_count; u32 wpcopy_count; u32 irq_count; }; typedef void (*kunit_try_catch_func_t)(void *); struct kunit_try_catch { struct kunit *test; struct completion *try_completion; int try_result; kunit_try_catch_func_t try; kunit_try_catch_func_t catch; void *context; }; enum kunit_status { KUNIT_SUCCESS = 0, KUNIT_FAILURE = 1, KUNIT_SKIPPED = 2, }; struct kunit { void *priv; const char *name; char *log; struct kunit_try_catch try_catch; const void *param_value; int param_index; spinlock_t lock; enum kunit_status status; struct list_head resources; char status_comment[256]; }; struct arch_uprobe_task {}; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3, }; struct uprobe; struct return_instance; struct uprobe_task { enum uprobe_task_state state; union { struct { struct arch_uprobe_task autask; unsigned long vaddr; }; struct { struct callback_head dup_xol_work; unsigned long dup_xol_addr; }; }; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; }; struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; }; struct vm_struct { struct vm_struct *next; void *addr; unsigned long size; unsigned long flags; struct page **pages; unsigned int page_order; unsigned int nr_pages; phys_addr_t phys_addr; const void *caller; u64 android_oem_data1; }; typedef void *fl_owner_t; struct kiocb; struct iov_iter; struct dir_context; struct file_lock; struct io_uring_cmd; struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t, int); ssize_t (*read)(struct file *, char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); ssize_t (*write)(struct file *, const char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iopoll)(struct kiocb *, struct io_comp_batch *, unsigned int); int (*iterate_shared)(struct file *, struct dir_context *); __poll_t (*poll)(struct file *, struct poll_table_struct *); long (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); unsigned long mmap_supported_flags; int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t, loff_t, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); void (*splice_eof)(struct file *); int (*setlease)(struct file *, int, struct file_lock **, void **); long (*fallocate)(struct file *, int, loff_t, loff_t); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned int); int (*fadvise)(struct file *, loff_t, loff_t, int); int (*uring_cmd)(struct io_uring_cmd *, unsigned int); int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *, unsigned int); }; struct wait_page_queue; struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long); void *private; int ki_flags; u16 ki_ioprio; union { struct wait_page_queue *ki_waitq; ssize_t (*dio_complete)(void *); }; }; struct wait_queue_entry; typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); struct wait_queue_entry { unsigned int flags; void *private; wait_queue_func_t func; struct list_head entry; }; typedef struct wait_queue_entry wait_queue_entry_t; struct folio; struct wait_page_queue { struct folio *folio; int bit_nr; wait_queue_entry_t wait; }; typedef struct { unsigned long val; } swp_entry_t; struct folio { union { struct { unsigned long flags; union { struct list_head lru; struct { void *__filler; unsigned int mlock_count; }; }; struct address_space *mapping; unsigned long index; union { void *private; swp_entry_t swap; }; atomic_t _mapcount; atomic_t _refcount; unsigned long memcg_data; }; struct page page; }; union { struct { unsigned long _flags_1; unsigned long _head_1; atomic_t _entire_mapcount; atomic_t _nr_pages_mapped; atomic_t _pincount; unsigned int __padding; unsigned int _folio_nr_pages; union { unsigned long _private_1; unsigned long *_dst_ul; struct page **_dst_pp; }; }; struct page __page_1; }; union { struct { unsigned long _flags_2; unsigned long _head_2; void *_hugetlb_subpool; void *_hugetlb_cgroup; void *_hugetlb_cgroup_rsvd; void *_hugetlb_hwpoison; }; struct { unsigned long _flags_2a; unsigned long _head_2a; struct list_head _deferred_list; }; struct page __page_2; }; }; struct iovec { void __attribute__((btf_type_tag("user"))) *iov_base; __kernel_size_t iov_len; }; struct kvec; struct iov_iter { u8 iter_type; bool copy_mc; bool nofault; bool data_source; bool user_backed; union { size_t iov_offset; int last_offset; }; union { struct iovec __ubuf_iovec; struct { union { const struct iovec *__iov; const struct kvec *kvec; const struct bio_vec *bvec; struct xarray *xarray; void __attribute__((btf_type_tag("user"))) *ubuf; }; size_t count; }; }; union { unsigned long nr_segs; loff_t xarray_start; }; }; struct kvec { void *iov_base; size_t iov_len; }; typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); struct dir_context { filldir_t actor; loff_t pos; }; struct fc_log { refcount_t usage; u8 head; u8 tail; u8 need_free; struct module *owner; char *buffer[8]; }; struct fs_parse_result; typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, struct fs_parameter *, struct fs_parse_result *); struct fs_parameter_spec { const char *name; fs_param_type *type; u8 opt; unsigned short flags; const void *data; }; struct fs_parse_result { bool negated; union { bool boolean; int int_32; unsigned int uint_32; u64 uint_64; }; }; enum freeze_holder { FREEZE_HOLDER_KERNEL = 1, FREEZE_HOLDER_USERSPACE = 2, }; struct writeback_control; struct kstatfs; struct dquot; struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*free_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *, enum freeze_holder); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *, enum freeze_holder); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); struct dquot __attribute__((btf_type_tag("rcu"))) ** (*get_dquots)(struct inode *); long (*nr_cached_objects)(struct super_block *, struct shrink_control *); long (*free_cached_objects)(struct super_block *, struct shrink_control *); void (*shutdown)(struct super_block *); }; enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1, }; struct swap_iocb; struct writeback_control { long nr_to_write; long pages_skipped; loff_t range_start; loff_t range_end; enum writeback_sync_modes sync_mode; unsigned int for_kupdate: 1; unsigned int for_background: 1; unsigned int tagged_writepages: 1; unsigned int for_reclaim: 1; unsigned int range_cyclic: 1; unsigned int for_sync: 1; unsigned int unpinned_fscache_wb: 1; unsigned int no_cgroup_owner: 1; struct swap_iocb **swap_plug; struct bdi_writeback *wb; struct inode *inode; int wb_id; int wb_lcand_id; int wb_tcand_id; size_t wb_bytes; size_t wb_lcand_bytes; size_t wb_tcand_bytes; }; typedef __kernel_uid32_t projid_t; typedef struct { projid_t val; } kprojid_t; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2, }; struct kqid { union { kuid_t uid; kgid_t gid; kprojid_t projid; }; enum quota_type type; }; struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; }; struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; spinlock_t dq_dqb_lock; atomic_t dq_count; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; }; struct shrink_control { gfp_t gfp_mask; int nid; unsigned long nr_to_scan; unsigned long nr_scanned; struct mem_cgroup *memcg; }; struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_inode_usage)(struct inode *, qsize_t *); int (*get_next_id)(struct super_block *, struct kqid *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct qc_info; struct qc_dqblk; struct qc_state; struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, const struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; }; struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; }; struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; }; struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3]; }; struct fid; struct iomap; struct export_operations { int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int); struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int); int (*get_name)(struct dentry *, char *, struct dentry *); struct dentry * (*get_parent)(struct dentry *); int (*commit_metadata)(struct inode *); int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *); int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *); int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *); unsigned long flags; }; struct xattr_handler { const char *name; const char *prefix; int flags; bool (*list)(struct dentry *); int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t); int (*set)(const struct xattr_handler *, struct mnt_idmap *, struct dentry *, struct inode *, const char *, const void *, size_t, int); }; union fscrypt_policy; struct fscrypt_operations { unsigned int needs_bounce_pages: 1; unsigned int has_32bit_inodes: 1; unsigned int supports_subblock_data_units: 1; const char *legacy_key_prefix; int (*get_context)(struct inode *, void *, size_t); int (*set_context)(struct inode *, const void *, size_t, void *); const union fscrypt_policy * (*get_dummy_policy)(struct super_block *); bool (*empty_dir)(struct inode *); bool (*has_stable_inodes)(struct super_block *); struct block_device ** (*get_devices)(struct super_block *, unsigned int *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; u64 android_oem_data1[4]; }; struct fsverity_operations { int (*begin_enable_verity)(struct file *); int (*end_enable_verity)(struct file *, const void *, size_t, u64); int (*get_verity_descriptor)(struct inode *, void *, size_t); struct page * (*read_merkle_tree_page)(struct inode *, unsigned long, unsigned long); int (*write_merkle_tree_block)(struct inode *, const void *, u64, unsigned int); }; struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; }; struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; typedef struct { int val[2]; } __kernel_fsid_t; typedef struct fsnotify_mark_connector __attribute__((btf_type_tag("rcu"))) *fsnotify_connp_t; struct fsnotify_mark_connector { spinlock_t lock; unsigned short type; unsigned short flags; __kernel_fsid_t fsid; union { fsnotify_connp_t *obj; struct fsnotify_mark_connector *destroy_next; }; struct hlist_head list; }; struct list_lru_one { struct list_head list; long nr_items; }; struct list_lru_node { spinlock_t lock; struct list_lru_one lru; long nr_items; long: 64; long: 64; long: 64; }; struct delayed_call { void (*fn)(void *); void *arg; }; typedef struct { uid_t val; } vfsuid_t; typedef struct { gid_t val; } vfsgid_t; struct iattr { unsigned int ia_valid; umode_t ia_mode; union { kuid_t ia_uid; vfsuid_t ia_vfsuid; }; union { kgid_t ia_gid; vfsgid_t ia_vfsgid; }; loff_t ia_size; struct timespec64 ia_atime; struct timespec64 ia_mtime; struct timespec64 ia_ctime; struct file *ia_file; }; struct kstat { u32 result_mask; umode_t mode; unsigned int nlink; uint32_t blksize; u64 attributes; u64 attributes_mask; u64 ino; dev_t dev; dev_t rdev; kuid_t uid; kgid_t gid; loff_t size; struct timespec64 atime; struct timespec64 mtime; struct timespec64 ctime; struct timespec64 btime; u64 blocks; u64 mnt_id; u32 dio_mem_align; u32 dio_offset_align; u64 change_cookie; }; struct offset_ctx { struct xarray xa; u32 next_offset; }; struct cdev { struct kobject kobj; struct module *owner; const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2, MIGRATE_SYNC_NO_COPY = 3, }; struct readahead_control; struct swap_info_struct; struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*read_folio)(struct file *, struct folio *); int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *); void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio)(struct folio *, size_t, size_t); bool (*release_folio)(struct folio *, gfp_t); void (*free_folio)(struct folio *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migrate_folio)(struct address_space *, struct folio *, struct folio *, enum migrate_mode); int (*launder_folio)(struct folio *); bool (*is_partially_uptodate)(struct folio *, size_t, size_t); void (*is_dirty_writeback)(struct folio *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); int (*swap_rw)(struct kiocb *, struct iov_iter *); }; struct readahead_control { struct file *file; struct address_space *mapping; struct file_ra_state *ra; unsigned long _index; unsigned int _nr_pages; unsigned int _batch_count; bool _workingset; unsigned long _pflags; u64 android_oem_data1; }; struct swap_cluster_info; struct percpu_cluster; struct swap_info_struct { struct percpu_ref users; unsigned long flags; short prio; struct plist_node list; signed char type; unsigned int max; unsigned char *swap_map; struct swap_cluster_info *cluster_info; struct list_head free_clusters; struct list_head nonfull_clusters[10]; unsigned int lowest_bit; unsigned int highest_bit; unsigned int pages; unsigned int inuse_pages; unsigned int cluster_next; unsigned int cluster_nr; unsigned int __attribute__((btf_type_tag("percpu"))) *cluster_next_cpu; struct percpu_cluster __attribute__((btf_type_tag("percpu"))) *percpu_cluster; struct rb_root swap_extent_root; struct block_device *bdev; struct file *swap_file; unsigned int old_block_size; struct completion comp; spinlock_t lock; spinlock_t cont_lock; struct work_struct discard_work; struct list_head discard_clusters; struct plist_node avail_lists[0]; }; struct swap_cluster_info { spinlock_t lock; unsigned int count: 12; unsigned int state: 3; unsigned int order: 4; unsigned int reserved: 1; unsigned int flags: 4; struct list_head list; }; struct percpu_cluster { unsigned int next[10]; }; struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); }; struct kernel_symbol { int value_offset; int name_offset; int namespace_offset; }; struct kernel_param_ops; struct kparam_string; struct kparam_array; struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union { void *arg; const struct kparam_string *str; const struct kparam_array *arr; }; }; struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); }; struct kparam_string { unsigned int maxlen; char *string; }; struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; }; struct exception_table_entry { int insn; int fixup; short type; short data; }; typedef __u32 __le32; struct plt_entry { __le32 adrp; __le32 add; __le32 br; }; enum kvm_pgtable_prot { KVM_PGTABLE_PROT_X = 1ULL, KVM_PGTABLE_PROT_W = 2ULL, KVM_PGTABLE_PROT_R = 4ULL, KVM_PGTABLE_PROT_DEVICE = 8ULL, KVM_PGTABLE_PROT_NC = 16ULL, KVM_PGTABLE_PROT_PXN = 32ULL, KVM_PGTABLE_PROT_UXN = 64ULL, KVM_PGTABLE_PROT_SW0 = 36028797018963968ULL, KVM_PGTABLE_PROT_SW1 = 72057594037927936ULL, KVM_PGTABLE_PROT_SW2 = 144115188075855872ULL, KVM_PGTABLE_PROT_SW3 = 288230376151711744ULL, }; typedef u64 kvm_pte_t; enum pkvm_psci_notification { PKVM_PSCI_CPU_SUSPEND = 0, PKVM_PSCI_SYSTEM_SUSPEND = 1, PKVM_PSCI_CPU_ENTRY = 2, }; struct user_pt_regs; struct kvm_hyp_req; struct kvm_hyp_iommu; struct kvm_hyp_iommu_domain; struct iommu_iotlb_gather; struct kvm_iommu_paddr_cache; struct pkvm_module_ops { int (*create_private_mapping)(phys_addr_t, size_t, enum kvm_pgtable_prot, unsigned long *); void * (*alloc_module_va)(u64); int (*map_module_page)(u64, void *, enum kvm_pgtable_prot, bool); int (*register_serial_driver)(void (*)(char)); void (*putc)(char); void (*puts)(const char *); void (*putx64)(u64); void * (*fixmap_map)(phys_addr_t); void (*fixmap_unmap)(); void * (*linear_map_early)(phys_addr_t, size_t, enum kvm_pgtable_prot); void (*linear_unmap_early)(void *, size_t); void (*flush_dcache_to_poc)(void *, size_t); void (*update_hcr_el2)(unsigned long, unsigned long); void (*update_hfgwtr_el2)(unsigned long, unsigned long); int (*register_host_perm_fault_handler)(int (*)(struct user_pt_regs *, u64, u64)); int (*host_stage2_mod_prot)(u64, enum kvm_pgtable_prot, u64, bool); int (*host_stage2_get_leaf)(phys_addr_t, kvm_pte_t *, u32 *); int (*register_host_smc_handler)(bool (*)(struct user_pt_regs *)); int (*register_default_trap_handler)(bool (*)(struct user_pt_regs *)); int (*register_illegal_abt_notifier)(void (*)(struct user_pt_regs *)); int (*register_psci_notifier)(void (*)(enum pkvm_psci_notification, struct user_pt_regs *)); int (*register_hyp_panic_notifier)(void (*)(struct user_pt_regs *)); int (*register_unmask_serror)(bool (*)(), void (*)()); int (*host_donate_hyp)(u64, u64, bool); int (*host_donate_hyp_prot)(u64, u64, bool, enum kvm_pgtable_prot); int (*hyp_donate_host)(u64, u64); int (*host_share_hyp)(u64); int (*host_unshare_hyp)(u64); int (*pin_shared_mem)(void *, void *); void (*unpin_shared_mem)(void *, void *); void * (*memcpy)(void *, const void *, size_t); void * (*memset)(void *, int, size_t); phys_addr_t (*hyp_pa)(void *); void * (*hyp_va)(phys_addr_t); unsigned long (*kern_hyp_va)(unsigned long); void * (*hyp_alloc)(size_t); int (*hyp_alloc_errno)(); void (*hyp_free)(void *); void * (*iommu_donate_pages)(u8, bool); void (*iommu_reclaim_pages)(void *, u8); int (*iommu_request)(struct kvm_hyp_req *); int (*iommu_init_device)(struct kvm_hyp_iommu *); void (*udelay)(unsigned long); u8 (*hyp_alloc_missing_donations)(); bool (*list_add_valid_or_report)(struct list_head *, struct list_head *, struct list_head *); bool (*list_del_entry_valid_or_report)(struct list_head *); void (*iommu_iotlb_gather_add_page)(struct kvm_hyp_iommu_domain *, struct iommu_iotlb_gather *, unsigned long, size_t); int (*register_hyp_event_ids)(unsigned long, unsigned long); void * (*tracing_reserve_entry)(unsigned long); void (*tracing_commit_entry)(); void * (*iommu_donate_pages_atomic)(u8); void (*iommu_reclaim_pages_atomic)(void *, u8); int (*iommu_snapshot_host_stage2)(struct kvm_hyp_iommu_domain *); int (*hyp_smp_processor_id)(); union { void (*iommu_flush_unmap_cache)(struct kvm_iommu_paddr_cache *); struct { u64 android_kabi_reserved1; }; union {}; }; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; u64 android_kabi_reserved5; u64 android_kabi_reserved6; u64 android_kabi_reserved7; u64 android_kabi_reserved8; u64 android_kabi_reserved9; u64 android_kabi_reserved10; u64 android_kabi_reserved11; u64 android_kabi_reserved12; u64 android_kabi_reserved13; u64 android_kabi_reserved14; u64 android_kabi_reserved15; u64 android_kabi_reserved16; u64 android_kabi_reserved17; u64 android_kabi_reserved18; u64 android_kabi_reserved19; u64 android_kabi_reserved20; u64 android_kabi_reserved21; u64 android_kabi_reserved22; u64 android_kabi_reserved23; u64 android_kabi_reserved24; u64 android_kabi_reserved25; u64 android_kabi_reserved26; u64 android_kabi_reserved27; u64 android_kabi_reserved28; u64 android_kabi_reserved29; u64 android_kabi_reserved30; u64 android_kabi_reserved31; u64 android_kabi_reserved32; }; struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; }; typedef __u32 Elf64_Word; typedef __u16 Elf64_Half; typedef __u64 Elf64_Addr; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; }; struct srcu_data; struct srcu_usage; struct srcu_struct { unsigned int srcu_idx; struct srcu_data __attribute__((btf_type_tag("percpu"))) *sda; struct lockdep_map dep_map; struct srcu_usage *srcu_sup; }; struct rcu_segcblist { struct callback_head *head; struct callback_head **tails[4]; unsigned long gp_seq[4]; atomic_long_t len; long seglen[4]; u8 flags; }; struct srcu_node; struct srcu_data { atomic_long_t srcu_lock_count[2]; atomic_long_t srcu_unlock_count[2]; int srcu_nmi_safety; long: 64; long: 64; long: 64; spinlock_t lock; struct rcu_segcblist srcu_cblist; unsigned long srcu_gp_seq_needed; unsigned long srcu_gp_seq_needed_exp; bool srcu_cblist_invoking; struct timer_list delay_work; struct work_struct work; struct callback_head srcu_barrier_head; struct srcu_node *mynode; unsigned long grpmask; int cpu; struct srcu_struct *ssp; long: 64; long: 64; }; struct srcu_node { spinlock_t lock; unsigned long srcu_have_cbs[4]; unsigned long srcu_data_have_cbs[4]; unsigned long srcu_gp_seq_needed_exp; struct srcu_node *srcu_parent; int grplo; int grphi; }; struct srcu_usage { struct srcu_node *node; struct srcu_node *level[3]; int srcu_size_state; struct mutex srcu_cb_mutex; spinlock_t lock; struct mutex srcu_gp_mutex; unsigned long srcu_gp_seq; unsigned long srcu_gp_seq_needed; unsigned long srcu_gp_seq_needed_exp; unsigned long srcu_gp_start; unsigned long srcu_last_gp_end; unsigned long srcu_size_jiffies; unsigned long srcu_n_lock_retries; unsigned long srcu_n_exp_nodelay; bool sda_is_static; unsigned long srcu_barrier_seq; struct mutex srcu_barrier_mutex; struct completion srcu_barrier_completion; atomic_t srcu_barrier_cpu_cnt; unsigned long reschedule_jiffies; unsigned long reschedule_count; struct delayed_work work; struct srcu_struct *srcu_ssp; }; struct bpf_raw_event_map { struct tracepoint *tp; void *bpf_func; u32 num_args; u32 writable_size; long: 64; }; struct trace_eval_map { const char *system; const char *eval_string; unsigned long eval_value; }; enum kunit_speed { KUNIT_SPEED_UNSET = 0, KUNIT_SPEED_VERY_SLOW = 1, KUNIT_SPEED_SLOW = 2, KUNIT_SPEED_NORMAL = 3, KUNIT_SPEED_MAX = 3, }; struct kunit_attributes { enum kunit_speed speed; }; struct kunit_case; struct kunit_suite { const char name[256]; int (*suite_init)(struct kunit_suite *); void (*suite_exit)(struct kunit_suite *); int (*init)(struct kunit *); void (*exit)(struct kunit *); struct kunit_case *test_cases; struct kunit_attributes attr; char status_comment[256]; struct dentry *debugfs; char *log; int suite_init_err; }; struct kunit_case { void (*run_case)(struct kunit *); const char *name; const void * (*generate_params)(const void *, char *); struct kunit_attributes attr; enum kunit_status status; char *module_name; char *log; }; struct static_key_true { struct static_key key; }; struct static_key_false { struct static_key key; }; struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno: 18; unsigned int class_id: 6; unsigned int flags: 8; union { struct static_key_true dd_key_true; struct static_key_false dd_key_false; } key; }; enum class_map_type { DD_CLASS_TYPE_DISJOINT_BITS = 0, DD_CLASS_TYPE_LEVEL_NUM = 1, DD_CLASS_TYPE_DISJOINT_NAMES = 2, DD_CLASS_TYPE_LEVEL_NAMES = 3, }; struct ddebug_class_map { struct list_head link; struct module *mod; const char *mod_name; const char **class_names; const int length; const int base; enum class_map_type map_type; }; struct perf_event_pmu_context { struct pmu *pmu; struct perf_event_context *ctx; struct list_head pmu_ctx_entry; struct list_head pinned_active; struct list_head flexible_active; unsigned int embedded: 1; unsigned int nr_events; unsigned int nr_cgroups; atomic_t refcount; struct callback_head callback_head; void *task_ctx_data; int rotate_necessary; }; struct perf_cpu_pmu_context { struct perf_event_pmu_context epc; struct perf_event_pmu_context *task_epc; struct list_head sched_cb_entry; int sched_cb_usage; int active_oncpu; int exclusive; raw_spinlock_t hrtimer_lock; struct hrtimer hrtimer; ktime_t hrtimer_interval; unsigned int hrtimer_active; }; struct perf_output_handle { struct perf_event *event; struct perf_buffer *rb; unsigned long wakeup; unsigned long size; u64 aux_flags; union { void *addr; unsigned long head; }; int page; }; struct fasync_struct { rwlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; }; struct perf_addr_filter_range { unsigned long start; unsigned long size; }; union perf_sample_weight { __u64 full; struct { __u32 var1_dw; __u16 var2_w; __u16 var3_w; }; }; union perf_mem_data_src { __u64 val; struct { __u64 mem_op: 5; __u64 mem_lvl: 14; __u64 mem_snoop: 5; __u64 mem_lock: 2; __u64 mem_dtlb: 7; __u64 mem_lvl_num: 4; __u64 mem_remote: 1; __u64 mem_snoopx: 2; __u64 mem_blk: 3; __u64 mem_hops: 3; __u64 mem_rsvd: 18; }; }; struct perf_regs { __u64 abi; struct pt_regs *regs; }; struct perf_callchain_entry; struct perf_raw_record; struct perf_branch_stack; struct perf_sample_data { u64 sample_flags; u64 period; u64 dyn_size; u64 type; struct { u32 pid; u32 tid; } tid_entry; u64 time; u64 id; struct { u32 cpu; u32 reserved; } cpu_entry; u64 ip; struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; union perf_sample_weight weight; union perf_mem_data_src data_src; u64 txn; struct perf_regs regs_user; struct perf_regs regs_intr; u64 stack_user_size; u64 stream_id; u64 cgroup; u64 addr; u64 phys_addr; u64 data_page_size; u64 code_page_size; u64 aux_size; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct perf_callchain_entry { __u64 nr; __u64 ip[0]; }; typedef unsigned long (*perf_copy_f)(void *, const void *, unsigned long, unsigned long); struct perf_raw_frag { union { struct perf_raw_frag *next; unsigned long pad; }; perf_copy_f copy; void *data; u32 size; } __attribute__((packed)); struct perf_raw_record { struct perf_raw_frag frag; u32 size; }; struct perf_branch_entry { __u64 from; __u64 to; __u64 mispred: 1; __u64 predicted: 1; __u64 in_tx: 1; __u64 abort: 1; __u64 cycles: 16; __u64 type: 4; __u64 spec: 2; __u64 new_type: 4; __u64 priv: 3; __u64 reserved: 31; }; struct perf_branch_stack { __u64 nr; __u64 hw_idx; struct perf_branch_entry entries[0]; }; struct user_pt_regs { __u64 regs[31]; __u64 sp; __u64 pc; __u64 pstate; }; struct pt_regs { union { struct user_pt_regs user_regs; struct { u64 regs[31]; u64 sp; u64 pc; u64 pstate; }; }; u64 orig_x0; s32 syscallno; u32 unused2; u64 sdei_ttbr1; u64 pmr_save; u64 stackframe[2]; u64 lockdep_hardirqs; u64 exit_rcu; }; struct trace_event_fields { const char *type; union { struct { const char *name; const int size; const int align; const int is_signed; const int filter_type; const int len; }; int (*define_fields)(struct trace_event_call *); }; }; typedef void (*btf_trace_initcall_level)(void *, const char *); typedef int (*initcall_t)(); typedef void (*btf_trace_initcall_start)(void *, initcall_t); typedef void (*btf_trace_initcall_finish)(void *, initcall_t, int); struct obs_kernel_param { const char *str; int (*setup_func)(char *); int early; }; enum system_states { SYSTEM_BOOTING = 0, SYSTEM_SCHEDULING = 1, SYSTEM_FREEING_INITMEM = 2, SYSTEM_RUNNING = 3, SYSTEM_HALT = 4, SYSTEM_POWER_OFF = 5, SYSTEM_RESTART = 6, SYSTEM_SUSPEND = 7, }; typedef int initcall_entry_t; enum cpuhp_state { CPUHP_INVALID = -1, CPUHP_OFFLINE = 0, CPUHP_CREATE_THREADS = 1, CPUHP_PERF_PREPARE = 2, CPUHP_PERF_X86_PREPARE = 3, CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, CPUHP_PERF_POWER = 5, CPUHP_PERF_SUPERH = 6, CPUHP_X86_HPET_DEAD = 7, CPUHP_X86_APB_DEAD = 8, CPUHP_X86_MCE_DEAD = 9, CPUHP_VIRT_NET_DEAD = 10, CPUHP_IBMVNIC_DEAD = 11, CPUHP_SLUB_DEAD = 12, CPUHP_DEBUG_OBJ_DEAD = 13, CPUHP_MM_WRITEBACK_DEAD = 14, CPUHP_MM_DEMOTION_DEAD = 15, CPUHP_MM_VMSTAT_DEAD = 16, CPUHP_SOFTIRQ_DEAD = 17, CPUHP_NET_MVNETA_DEAD = 18, CPUHP_CPUIDLE_DEAD = 19, CPUHP_ARM64_FPSIMD_DEAD = 20, CPUHP_ARM_OMAP_WAKE_DEAD = 21, CPUHP_IRQ_POLL_DEAD = 22, CPUHP_BLOCK_SOFTIRQ_DEAD = 23, CPUHP_BIO_DEAD = 24, CPUHP_ACPI_CPUDRV_DEAD = 25, CPUHP_S390_PFAULT_DEAD = 26, CPUHP_BLK_MQ_DEAD = 27, CPUHP_FS_BUFF_DEAD = 28, CPUHP_PRINTK_DEAD = 29, CPUHP_MM_MEMCQ_DEAD = 30, CPUHP_PERCPU_CNT_DEAD = 31, CPUHP_RADIX_DEAD = 32, CPUHP_PAGE_ALLOC = 33, CPUHP_NET_DEV_DEAD = 34, CPUHP_PCI_XGENE_DEAD = 35, CPUHP_IOMMU_IOVA_DEAD = 36, CPUHP_LUSTRE_CFS_DEAD = 37, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 38, CPUHP_PADATA_DEAD = 39, CPUHP_AP_DTPM_CPU_DEAD = 40, CPUHP_RANDOM_PREPARE = 41, CPUHP_WORKQUEUE_PREP = 42, CPUHP_POWER_NUMA_PREPARE = 43, CPUHP_HRTIMERS_PREPARE = 44, CPUHP_PROFILE_PREPARE = 45, CPUHP_X2APIC_PREPARE = 46, CPUHP_SMPCFD_PREPARE = 47, CPUHP_RELAY_PREPARE = 48, CPUHP_SLAB_PREPARE = 49, CPUHP_MD_RAID5_PREPARE = 50, CPUHP_RCUTREE_PREP = 51, CPUHP_CPUIDLE_COUPLED_PREPARE = 52, CPUHP_POWERPC_PMAC_PREPARE = 53, CPUHP_POWERPC_MMU_CTX_PREPARE = 54, CPUHP_XEN_PREPARE = 55, CPUHP_XEN_EVTCHN_PREPARE = 56, CPUHP_ARM_SHMOBILE_SCU_PREPARE = 57, CPUHP_SH_SH3X_PREPARE = 58, CPUHP_NET_FLOW_PREPARE = 59, CPUHP_TOPOLOGY_PREPARE = 60, CPUHP_NET_IUCV_PREPARE = 61, CPUHP_ARM_BL_PREPARE = 62, CPUHP_TRACE_RB_PREPARE = 63, CPUHP_MM_ZS_PREPARE = 64, CPUHP_MM_ZSWP_MEM_PREPARE = 65, CPUHP_MM_ZSWP_POOL_PREPARE = 66, CPUHP_KVM_PPC_BOOK3S_PREPARE = 67, CPUHP_ZCOMP_PREPARE = 68, CPUHP_TIMERS_PREPARE = 69, CPUHP_MIPS_SOC_PREPARE = 70, CPUHP_BP_PREPARE_DYN = 71, CPUHP_BP_PREPARE_DYN_END = 91, CPUHP_BP_KICK_AP = 92, CPUHP_BRINGUP_CPU = 93, CPUHP_AP_IDLE_DEAD = 94, CPUHP_AP_OFFLINE = 95, CPUHP_AP_CACHECTRL_STARTING = 96, CPUHP_AP_SCHED_STARTING = 97, CPUHP_AP_RCUTREE_DYING = 98, CPUHP_AP_CPU_PM_STARTING = 99, CPUHP_AP_IRQ_GIC_STARTING = 100, CPUHP_AP_IRQ_HIP04_STARTING = 101, CPUHP_AP_IRQ_APPLE_AIC_STARTING = 102, CPUHP_AP_IRQ_ARMADA_XP_STARTING = 103, CPUHP_AP_IRQ_BCM2836_STARTING = 104, CPUHP_AP_IRQ_MIPS_GIC_STARTING = 105, CPUHP_AP_IRQ_RISCV_STARTING = 106, CPUHP_AP_IRQ_LOONGARCH_STARTING = 107, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 108, CPUHP_AP_ARM_MVEBU_COHERENCY = 109, CPUHP_AP_MICROCODE_LOADER = 110, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 111, CPUHP_AP_PERF_X86_STARTING = 112, CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 113, CPUHP_AP_PERF_X86_CQM_STARTING = 114, CPUHP_AP_PERF_X86_CSTATE_STARTING = 115, CPUHP_AP_PERF_XTENSA_STARTING = 116, CPUHP_AP_MIPS_OP_LOONGSON3_STARTING = 117, CPUHP_AP_ARM_VFP_STARTING = 118, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 119, CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 120, CPUHP_AP_PERF_ARM_ACPI_STARTING = 121, CPUHP_AP_PERF_ARM_STARTING = 122, CPUHP_AP_PERF_RISCV_STARTING = 123, CPUHP_AP_ARM_L2X0_STARTING = 124, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 125, CPUHP_AP_ARM_ARCH_TIMER_STARTING = 126, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 127, CPUHP_AP_JCORE_TIMER_STARTING = 128, CPUHP_AP_ARM_TWD_STARTING = 129, CPUHP_AP_QCOM_TIMER_STARTING = 130, CPUHP_AP_TEGRA_TIMER_STARTING = 131, CPUHP_AP_ARMADA_TIMER_STARTING = 132, CPUHP_AP_MARCO_TIMER_STARTING = 133, CPUHP_AP_MIPS_GIC_TIMER_STARTING = 134, CPUHP_AP_ARC_TIMER_STARTING = 135, CPUHP_AP_RISCV_TIMER_STARTING = 136, CPUHP_AP_CLINT_TIMER_STARTING = 137, CPUHP_AP_CSKY_TIMER_STARTING = 138, CPUHP_AP_TI_GP_TIMER_STARTING = 139, CPUHP_AP_HYPERV_TIMER_STARTING = 140, CPUHP_AP_DUMMY_TIMER_STARTING = 141, CPUHP_AP_ARM_XEN_STARTING = 142, CPUHP_AP_ARM_XEN_RUNSTATE_STARTING = 143, CPUHP_AP_ARM_CORESIGHT_STARTING = 144, CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 145, CPUHP_AP_ARM64_ISNDEP_STARTING = 146, CPUHP_AP_SMPCFD_DYING = 147, CPUHP_AP_HRTIMERS_DYING = 148, CPUHP_AP_X86_TBOOT_DYING = 149, CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 150, CPUHP_AP_ONLINE = 151, CPUHP_TEARDOWN_CPU = 152, CPUHP_AP_ONLINE_IDLE = 153, CPUHP_AP_HYPERV_ONLINE = 154, CPUHP_AP_KVM_ONLINE = 155, CPUHP_AP_SCHED_WAIT_EMPTY = 156, CPUHP_AP_SMPBOOT_THREADS = 157, CPUHP_AP_X86_VDSO_VMA_ONLINE = 158, CPUHP_AP_IRQ_AFFINITY_ONLINE = 159, CPUHP_AP_BLK_MQ_ONLINE = 160, CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 161, CPUHP_AP_X86_INTEL_EPB_ONLINE = 162, CPUHP_AP_PERF_ONLINE = 163, CPUHP_AP_PERF_X86_ONLINE = 164, CPUHP_AP_PERF_X86_UNCORE_ONLINE = 165, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 166, CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 167, CPUHP_AP_PERF_X86_RAPL_ONLINE = 168, CPUHP_AP_PERF_X86_CQM_ONLINE = 169, CPUHP_AP_PERF_X86_CSTATE_ONLINE = 170, CPUHP_AP_PERF_X86_IDXD_ONLINE = 171, CPUHP_AP_PERF_S390_CF_ONLINE = 172, CPUHP_AP_PERF_S390_SF_ONLINE = 173, CPUHP_AP_PERF_ARM_CCI_ONLINE = 174, CPUHP_AP_PERF_ARM_CCN_ONLINE = 175, CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE = 176, CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 177, CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 178, CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 179, CPUHP_AP_PERF_ARM_HISI_PA_ONLINE = 180, CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE = 181, CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE = 182, CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE = 183, CPUHP_AP_PERF_ARM_L2X0_ONLINE = 184, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 185, CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 186, CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 187, CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 188, CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE = 189, CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 190, CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 191, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 192, CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 193, CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE = 194, CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE = 195, CPUHP_AP_PERF_CSKY_ONLINE = 196, CPUHP_AP_WATCHDOG_ONLINE = 197, CPUHP_AP_WORKQUEUE_ONLINE = 198, CPUHP_AP_RANDOM_ONLINE = 199, CPUHP_AP_RCUTREE_ONLINE = 200, CPUHP_AP_BASE_CACHEINFO_ONLINE = 201, CPUHP_AP_ONLINE_DYN = 202, CPUHP_AP_ONLINE_DYN_END = 232, CPUHP_AP_MM_DEMOTION_ONLINE = 233, CPUHP_AP_X86_HPET_ONLINE = 234, CPUHP_AP_X86_KVM_CLK_ONLINE = 235, CPUHP_AP_ACTIVE = 236, CPUHP_ONLINE = 237, }; enum { false = 0, true = 1, }; enum { EVENT_FILE_FL_ENABLED = 1, EVENT_FILE_FL_RECORDED_CMD = 2, EVENT_FILE_FL_RECORDED_TGID = 4, EVENT_FILE_FL_FILTERED = 8, EVENT_FILE_FL_NO_SET_FILTER = 16, EVENT_FILE_FL_SOFT_MODE = 32, EVENT_FILE_FL_SOFT_DISABLED = 64, EVENT_FILE_FL_TRIGGER_MODE = 128, EVENT_FILE_FL_TRIGGER_COND = 256, EVENT_FILE_FL_PID_FILTER = 512, EVENT_FILE_FL_WAS_ENABLED = 1024, EVENT_FILE_FL_FREED = 2048, }; enum node_states { N_POSSIBLE = 0, N_ONLINE = 1, N_NORMAL_MEMORY = 2, N_HIGH_MEMORY = 2, N_MEMORY = 3, N_CPU = 4, N_GENERIC_INITIATOR = 5, NR_NODE_STATES = 6, }; enum refcount_saturation_type { REFCOUNT_ADD_NOT_ZERO_OVF = 0, REFCOUNT_ADD_OVF = 1, REFCOUNT_ADD_UAF = 2, REFCOUNT_SUB_UAF = 3, REFCOUNT_DEC_LEAK = 4, }; enum kmalloc_cache_type { KMALLOC_NORMAL = 0, KMALLOC_DMA = 0, KMALLOC_RANDOM_START = 0, KMALLOC_RANDOM_END = 0, KMALLOC_RECLAIM = 1, KMALLOC_CGROUP = 2, NR_KMALLOC_TYPES = 3, }; struct trace_event_raw_initcall_level { struct trace_entry ent; u32 __data_loc_level; char __data[0]; }; struct trace_event_raw_initcall_start { struct trace_entry ent; initcall_t func; char __data[0]; }; struct trace_event_raw_initcall_finish { struct trace_entry ent; initcall_t func; int ret; char __data[0]; }; struct blacklist_entry { struct list_head next; char *buf; }; struct eventfs_inode; struct trace_subsystem_dir; struct trace_event_file { struct list_head list; struct trace_event_call *event_call; struct event_filter __attribute__((btf_type_tag("rcu"))) *filter; struct eventfs_inode *ei; struct trace_array *tr; struct trace_subsystem_dir *system; struct list_head triggers; unsigned long flags; atomic_t ref; atomic_t sm_ref; atomic_t tm_ref; }; struct prog_entry; struct event_filter { struct prog_entry __attribute__((btf_type_tag("rcu"))) *prog; char *filter_string; }; struct trace_event_data_offsets_initcall_level { u32 level; }; struct trace_buffer; struct ring_buffer_event; struct trace_event_buffer { struct trace_buffer *buffer; struct ring_buffer_event *event; struct trace_event_file *trace_file; void *entry; unsigned int trace_ctx; struct pt_regs *regs; }; struct ring_buffer_event { u32 type_len: 5; u32 time_delta: 27; u32 array[0]; }; typedef short __s16; typedef __s16 s16; typedef u16 uint16_t; struct xbc_node { uint16_t next; uint16_t child; uint16_t parent; uint16_t data; }; struct trace_event_data_offsets_initcall_start {}; struct trace_event_data_offsets_initcall_finish {}; typedef __u32 Elf32_Word; struct elf32_note { Elf32_Word n_namesz; Elf32_Word n_descsz; Elf32_Word n_type; }; struct posix_acl_entry { short e_tag; unsigned short e_perm; union { kuid_t e_uid; kgid_t e_gid; }; }; struct posix_acl { refcount_t a_refcount; struct callback_head a_rcu; unsigned int a_count; struct posix_acl_entry a_entries[0]; }; typedef __u64 __addrpair; typedef __u32 __portpair; typedef struct { struct net *net; } possible_net_t; struct proto; struct sock_common { union { __addrpair skc_addrpair; struct { __be32 skc_daddr; __be32 skc_rcv_saddr; }; }; union { unsigned int skc_hash; __u16 skc_u16hashes[2]; }; union { __portpair skc_portpair; struct { __be16 skc_dport; __u16 skc_num; }; }; unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse: 4; unsigned char skc_reuseport: 1; unsigned char skc_ipv6only: 1; unsigned char skc_net_refcnt: 1; int skc_bound_dev_if; union { struct hlist_node skc_bind_node; struct hlist_node skc_portaddr_node; }; struct proto *skc_prot; possible_net_t skc_net; struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; atomic64_t skc_cookie; union { unsigned long skc_flags; struct sock *skc_listener; struct inet_timewait_death_row *skc_tw_dr; }; int skc_dontcopy_begin[0]; union { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; }; unsigned short skc_tx_queue_mapping; unsigned short skc_rx_queue_mapping; union { int skc_incoming_cpu; u32 skc_rcv_wnd; u32 skc_tw_rcv_nxt; }; refcount_t skc_refcnt; int skc_dontcopy_end[0]; union { u32 skc_rxhash; u32 skc_window_clamp; u32 skc_tw_snd_nxt; }; }; typedef struct { spinlock_t slock; int owned; wait_queue_head_t wq; } socket_lock_t; typedef u64 netdev_features_t; struct sock_cgroup_data { struct cgroup *cgroup; u16 prioidx; }; typedef struct {} netns_tracker; struct sk_filter; struct socket_wq; struct xfrm_policy; struct socket; struct sock_reuseport; struct sock { struct sock_common __sk_common; struct dst_entry __attribute__((btf_type_tag("rcu"))) *sk_rx_dst; int sk_rx_dst_ifindex; u32 sk_rx_dst_cookie; socket_lock_t sk_lock; atomic_t sk_drops; int sk_rcvlowat; struct sk_buff_head sk_error_queue; struct sk_buff_head sk_receive_queue; struct { atomic_t rmem_alloc; int len; struct sk_buff *head; struct sk_buff *tail; } sk_backlog; int sk_forward_alloc; u32 sk_reserved_mem; unsigned int sk_ll_usec; unsigned int sk_napi_id; int sk_rcvbuf; int sk_disconnects; struct sk_filter __attribute__((btf_type_tag("rcu"))) *sk_filter; union { struct socket_wq __attribute__((btf_type_tag("rcu"))) *sk_wq; struct socket_wq *sk_wq_raw; }; struct xfrm_policy __attribute__((btf_type_tag("rcu"))) *sk_policy[2]; struct dst_entry __attribute__((btf_type_tag("rcu"))) *sk_dst_cache; atomic_t sk_omem_alloc; int sk_sndbuf; int sk_wmem_queued; refcount_t sk_wmem_alloc; unsigned long sk_tsq_flags; union { struct sk_buff *sk_send_head; struct rb_root tcp_rtx_queue; }; struct sk_buff_head sk_write_queue; __s32 sk_peek_off; int sk_write_pending; __u32 sk_dst_pending_confirm; u32 sk_pacing_status; long sk_sndtimeo; struct timer_list sk_timer; __u32 sk_priority; __u32 sk_mark; unsigned long sk_pacing_rate; unsigned long sk_max_pacing_rate; struct page_frag sk_frag; netdev_features_t sk_route_caps; int sk_gso_type; unsigned int sk_gso_max_size; gfp_t sk_allocation; __u32 sk_txhash; u8 sk_gso_disabled: 1; u8 sk_kern_sock: 1; u8 sk_no_check_tx: 1; u8 sk_no_check_rx: 1; u8 sk_userlocks: 4; u8 sk_pacing_shift; u16 sk_type; u16 sk_protocol; u16 sk_gso_max_segs; unsigned long sk_lingertime; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err; int sk_err_soft; u32 sk_ack_backlog; u32 sk_max_ack_backlog; kuid_t sk_uid; u8 sk_txrehash; u8 sk_prefer_busy_poll; u16 sk_busy_poll_budget; spinlock_t sk_peer_lock; int sk_bind_phc; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; long sk_rcvtimeo; ktime_t sk_stamp; atomic_t sk_tskey; atomic_t sk_zckey; u32 sk_tsflags; u8 sk_shutdown; u8 sk_clockid; u8 sk_txtime_deadline_mode: 1; u8 sk_txtime_report_errors: 1; u8 sk_txtime_unused: 6; bool sk_use_task_frag; struct socket *sk_socket; void *sk_user_data; void *sk_security; struct sock_cgroup_data sk_cgrp_data; struct mem_cgroup *sk_memcg; void (*sk_state_change)(struct sock *); void (*sk_data_ready)(struct sock *); void (*sk_write_space)(struct sock *); void (*sk_error_report)(struct sock *); int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); void (*sk_destruct)(struct sock *); struct sock_reuseport __attribute__((btf_type_tag("rcu"))) *sk_reuseport_cb; struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) *sk_bpf_storage; struct callback_head sk_rcu; netns_tracker ns_tracker; struct hlist_node sk_bind2_node; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; u64 android_kabi_reserved5; u64 android_kabi_reserved6; u64 android_kabi_reserved7; u64 android_kabi_reserved8; u64 android_oem_data1; }; struct smc_hashinfo; typedef struct { union { void *kernel; void __attribute__((btf_type_tag("user"))) *user; }; bool is_kernel: 1; } sockptr_t; typedef unsigned int slab_flags_t; struct sockaddr; struct msghdr; struct sk_psock; struct request_sock_ops; struct timewait_sock_ops; struct raw_hashinfo; struct proto { void (*close)(struct sock *, long); int (*pre_connect)(struct sock *, struct sockaddr *, int); int (*connect)(struct sock *, struct sockaddr *, int); int (*disconnect)(struct sock *, int); struct sock * (*accept)(struct sock *, int, int *, bool); int (*ioctl)(struct sock *, int, int *); int (*init)(struct sock *); void (*destroy)(struct sock *); void (*shutdown)(struct sock *, int); int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); int (*getsockopt)(struct sock *, int, int, char __attribute__((btf_type_tag("user"))) *, int __attribute__((btf_type_tag("user"))) *); void (*keepalive)(struct sock *, int); int (*compat_ioctl)(struct sock *, unsigned int, unsigned long); int (*sendmsg)(struct sock *, struct msghdr *, size_t); int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int *); void (*splice_eof)(struct socket *); int (*bind)(struct sock *, struct sockaddr *, int); int (*bind_add)(struct sock *, struct sockaddr *, int); int (*backlog_rcv)(struct sock *, struct sk_buff *); bool (*bpf_bypass_getsockopt)(int, int); void (*release_cb)(struct sock *); int (*hash)(struct sock *); void (*unhash)(struct sock *); void (*rehash)(struct sock *); int (*get_port)(struct sock *, unsigned short); void (*put_port)(struct sock *); int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); unsigned int inuse_idx; bool (*stream_memory_free)(const struct sock *, int); bool (*sock_is_readable)(struct sock *); void (*enter_memory_pressure)(struct sock *); void (*leave_memory_pressure)(struct sock *); atomic_long_t *memory_allocated; int __attribute__((btf_type_tag("percpu"))) *per_cpu_fw_alloc; struct percpu_counter *sockets_allocated; unsigned long *memory_pressure; long *sysctl_mem; int *sysctl_wmem; int *sysctl_rmem; u32 sysctl_wmem_offset; u32 sysctl_rmem_offset; int max_header; bool no_autobind; struct kmem_cache *slab; unsigned int obj_size; unsigned int ipv6_pinfo_offset; slab_flags_t slab_flags; unsigned int useroffset; unsigned int usersize; unsigned int __attribute__((btf_type_tag("percpu"))) *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; union { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; struct smc_hashinfo *smc_hash; } h; struct module *owner; char name[32]; struct list_head node; int (*diag_destroy)(struct sock *, int); }; typedef unsigned short __kernel_sa_family_t; typedef __kernel_sa_family_t sa_family_t; struct sockaddr { sa_family_t sa_family; union { char sa_data_min[14]; struct { struct {} __empty_sa_data; char sa_data[0]; }; }; }; struct ubuf_info; struct msghdr { void *msg_name; int msg_namelen; int msg_inq; struct iov_iter msg_iter; union { void *msg_control; void __attribute__((btf_type_tag("user"))) *msg_control_user; }; bool msg_control_is_user: 1; bool msg_get_inq: 1; unsigned int msg_flags; __kernel_size_t msg_controllen; struct kiocb *msg_iocb; struct ubuf_info *msg_ubuf; int (*sg_from_iter)(struct sock *, struct sk_buff *, struct iov_iter *, size_t); }; struct ubuf_info { void (*callback)(struct sk_buff *, struct ubuf_info *, bool); refcount_t refcnt; u8 flags; }; typedef u32 xdp_features_t; struct net_device_stats { union { unsigned long rx_packets; atomic_long_t __rx_packets; }; union { unsigned long tx_packets; atomic_long_t __tx_packets; }; union { unsigned long rx_bytes; atomic_long_t __rx_bytes; }; union { unsigned long tx_bytes; atomic_long_t __tx_bytes; }; union { unsigned long rx_errors; atomic_long_t __rx_errors; }; union { unsigned long tx_errors; atomic_long_t __tx_errors; }; union { unsigned long rx_dropped; atomic_long_t __rx_dropped; }; union { unsigned long tx_dropped; atomic_long_t __tx_dropped; }; union { unsigned long multicast; atomic_long_t __multicast; }; union { unsigned long collisions; atomic_long_t __collisions; }; union { unsigned long rx_length_errors; atomic_long_t __rx_length_errors; }; union { unsigned long rx_over_errors; atomic_long_t __rx_over_errors; }; union { unsigned long rx_crc_errors; atomic_long_t __rx_crc_errors; }; union { unsigned long rx_frame_errors; atomic_long_t __rx_frame_errors; }; union { unsigned long rx_fifo_errors; atomic_long_t __rx_fifo_errors; }; union { unsigned long rx_missed_errors; atomic_long_t __rx_missed_errors; }; union { unsigned long tx_aborted_errors; atomic_long_t __tx_aborted_errors; }; union { unsigned long tx_carrier_errors; atomic_long_t __tx_carrier_errors; }; union { unsigned long tx_fifo_errors; atomic_long_t __tx_fifo_errors; }; union { unsigned long tx_heartbeat_errors; atomic_long_t __tx_heartbeat_errors; }; union { unsigned long tx_window_errors; atomic_long_t __tx_window_errors; }; union { unsigned long rx_compressed; atomic_long_t __rx_compressed; }; union { unsigned long tx_compressed; atomic_long_t __tx_compressed; }; }; struct netdev_hw_addr_list { struct list_head list; int count; struct rb_root tree; }; struct tipc_bearer; enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3, }; typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); enum netdev_ml_priv_type { ML_PRIV_NONE = 0, ML_PRIV_CAN = 1, }; enum netdev_stat_type { NETDEV_PCPU_STAT_NONE = 0, NETDEV_PCPU_STAT_LSTATS = 1, NETDEV_PCPU_STAT_TSTATS = 2, NETDEV_PCPU_STAT_DSTATS = 3, }; struct netdev_tc_txq { u16 count; u16 offset; }; struct sfp_bus; struct bpf_xdp_link; struct bpf_xdp_entity { struct bpf_prog *prog; struct bpf_xdp_link *link; }; struct netdev_name_node; struct dev_ifalias; struct net_device_ops; struct xdp_metadata_ops; struct net_device_core_stats; struct iw_handler_def; struct iw_public_data; struct ethtool_ops; struct ndisc_ops; struct header_ops; struct in_device; struct inet6_dev; struct vlan_info; struct wireless_dev; struct wpan_dev; struct netdev_rx_queue; struct bpf_mprog_entry; struct netdev_queue; struct cpu_rmap; struct Qdisc; struct xdp_dev_bulk_queue; struct xps_dev_maps; struct pcpu_lstats; struct pcpu_sw_netstats; struct pcpu_dstats; struct rtnl_link_ops; struct netprio_map; struct phy_device; struct macsec_ops; struct udp_tunnel_nic_info; struct udp_tunnel_nic; struct rtnl_hw_stats64; struct devlink_port; struct net_device { char name[16]; struct netdev_name_node *name_node; struct dev_ifalias __attribute__((btf_type_tag("rcu"))) *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct { struct list_head upper; struct list_head lower; } adj_list; unsigned int flags; xdp_features_t xdp_features; unsigned long long priv_flags; const struct net_device_ops *netdev_ops; const struct xdp_metadata_ops *xdp_metadata_ops; int ifindex; unsigned short gflags; unsigned short hard_header_len; unsigned int mtu; unsigned short needed_headroom; unsigned short needed_tailroom; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; unsigned int min_mtu; unsigned int max_mtu; unsigned short type; unsigned char min_header_len; unsigned char name_assign_type; int group; struct net_device_stats stats; struct net_device_core_stats __attribute__((btf_type_tag("percpu"))) *core_stats; atomic_t carrier_up_count; atomic_t carrier_down_count; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct ethtool_ops *ethtool_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned char perm_addr[32]; unsigned char addr_assign_type; unsigned char addr_len; unsigned char upper_level; unsigned char lower_level; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; unsigned short padded; spinlock_t addr_list_lock; int irq; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; bool uc_promisc; struct in_device __attribute__((btf_type_tag("rcu"))) *ip_ptr; struct inet6_dev __attribute__((btf_type_tag("rcu"))) *ip6_ptr; struct vlan_info __attribute__((btf_type_tag("rcu"))) *vlan_info; struct tipc_bearer __attribute__((btf_type_tag("rcu"))) *tipc_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; const unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; struct bpf_prog __attribute__((btf_type_tag("rcu"))) *xdp_prog; unsigned long gro_flush_timeout; int napi_defer_hard_irqs; unsigned int gro_max_size; unsigned int gro_ipv4_max_size; unsigned int xdp_zc_max_segs; rx_handler_func_t __attribute__((btf_type_tag("rcu"))) *rx_handler; void __attribute__((btf_type_tag("rcu"))) *rx_handler_data; struct bpf_mprog_entry __attribute__((btf_type_tag("rcu"))) *tcx_ingress; struct netdev_queue __attribute__((btf_type_tag("rcu"))) *ingress_queue; struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) *nf_hooks_ingress; unsigned char broadcast[32]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; long: 64; long: 64; long: 64; long: 64; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc __attribute__((btf_type_tag("rcu"))) *qdisc; unsigned int tx_queue_len; spinlock_t tx_global_lock; struct xdp_dev_bulk_queue __attribute__((btf_type_tag("percpu"))) *xdp_bulkq; struct xps_dev_maps __attribute__((btf_type_tag("rcu"))) *xps_maps[2]; struct bpf_mprog_entry __attribute__((btf_type_tag("rcu"))) *tcx_egress; struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) *nf_hooks_egress; struct hlist_head qdisc_hash[16]; struct timer_list watchdog_timer; int watchdog_timeo; u32 proto_down_reason; struct list_head todo_list; int __attribute__((btf_type_tag("percpu"))) *pcpu_refcnt; struct ref_tracker_dir refcnt_tracker; struct list_head link_watch_list; enum { NETREG_UNINITIALIZED = 0, NETREG_REGISTERED = 1, NETREG_UNREGISTERING = 2, NETREG_UNREGISTERED = 3, NETREG_RELEASED = 4, NETREG_DUMMY = 5, } reg_state: 8; bool dismantle; enum { RTNL_LINK_INITIALIZED = 0, RTNL_LINK_INITIALIZING = 1, } rtnl_link_state: 16; bool needs_free_netdev; void (*priv_destructor)(struct net_device *); possible_net_t nd_net; void *ml_priv; enum netdev_ml_priv_type ml_priv_type; enum netdev_stat_type pcpu_stat_type: 8; union { struct pcpu_lstats __attribute__((btf_type_tag("percpu"))) *lstats; struct pcpu_sw_netstats __attribute__((btf_type_tag("percpu"))) *tstats; struct pcpu_dstats __attribute__((btf_type_tag("percpu"))) *dstats; }; struct device dev; const struct attribute_group *sysfs_groups[4]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; unsigned int tso_max_size; u16 gso_max_segs; u16 tso_max_segs; unsigned int gso_ipv4_max_size; s16 num_tc; struct netdev_tc_txq tc_to_txq[16]; u8 prio_tc_map[16]; struct netprio_map __attribute__((btf_type_tag("rcu"))) *priomap; struct phy_device *phydev; struct sfp_bus *sfp_bus; struct lock_class_key *qdisc_tx_busylock; bool proto_down; unsigned int wol_enabled: 1; unsigned int threaded: 1; struct list_head net_notifier_list; const struct macsec_ops *macsec_ops; const struct udp_tunnel_nic_info *udp_tunnel_nic_info; struct udp_tunnel_nic *udp_tunnel_nic; struct bpf_xdp_entity xdp_state[3]; u8 dev_addr_shadow[32]; netdevice_tracker linkwatch_dev_tracker; netdevice_tracker watchdog_dev_tracker; netdevice_tracker dev_registered_tracker; struct rtnl_hw_stats64 *offload_xstats_l3; struct devlink_port *devlink_port; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; u64 android_kabi_reserved5; u64 android_kabi_reserved6; u64 android_kabi_reserved7; u64 android_kabi_reserved8; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct dev_ifalias { struct callback_head rcuhead; char ifalias[0]; }; enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, }; typedef enum netdev_tx netdev_tx_t; enum tc_setup_type { TC_QUERY_CAPS = 0, TC_SETUP_QDISC_MQPRIO = 1, TC_SETUP_CLSU32 = 2, TC_SETUP_CLSFLOWER = 3, TC_SETUP_CLSMATCHALL = 4, TC_SETUP_CLSBPF = 5, TC_SETUP_BLOCK = 6, TC_SETUP_QDISC_CBS = 7, TC_SETUP_QDISC_RED = 8, TC_SETUP_QDISC_PRIO = 9, TC_SETUP_QDISC_MQ = 10, TC_SETUP_QDISC_ETF = 11, TC_SETUP_ROOT_QDISC = 12, TC_SETUP_QDISC_GRED = 13, TC_SETUP_QDISC_TAPRIO = 14, TC_SETUP_FT = 15, TC_SETUP_QDISC_ETS = 16, TC_SETUP_QDISC_TBF = 17, TC_SETUP_QDISC_FIFO = 18, TC_SETUP_QDISC_HTB = 19, TC_SETUP_ACT = 20, }; struct ifreq; struct if_settings; struct ifmap; struct neigh_parms; struct rtnl_link_stats64; struct ifla_vf_info; struct ifla_vf_stats; struct nlattr; struct ifla_vf_guid; struct netlink_ext_ack; struct ndmsg; struct netlink_callback; struct nlmsghdr; struct netdev_phys_item_id; struct netdev_bpf; struct xdp_frame; struct xdp_buff; struct ip_tunnel_parm; struct net_device_path_ctx; struct net_device_path; struct skb_shared_hwtstamps; struct kernel_hwtstamp_config; struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_eth_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_siocbond)(struct net_device *, struct ifreq *, int); int (*ndo_siocwandev)(struct net_device *, struct if_settings *); int (*ndo_siocdevprivate)(struct net_device *, struct ifreq *, void __attribute__((btf_type_tag("user"))) *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *, unsigned int); void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); bool (*ndo_has_offload_stats)(const struct net_device *, int); int (*ndo_get_offload_stats)(int, const struct net_device *, void *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool); int (*ndo_set_vf_trust)(struct net_device *, int, bool); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_get_vf_guid)(struct net_device *, int, struct ifla_vf_guid *, struct ifla_vf_guid *); int (*ndo_set_vf_guid)(struct net_device *, int, u64, int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool); int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32); int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *); int (*ndo_del_slave)(struct net_device *, struct net_device *); struct net_device * (*ndo_get_xmit_slave)(struct net_device *, struct sk_buff *, bool); struct net_device * (*ndo_sk_get_lower_dev)(struct net_device *, struct sock *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t); int (*ndo_set_features)(struct net_device *, netdev_features_t); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16, struct netlink_ext_ack *); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, struct netlink_ext_ack *); int (*ndo_fdb_del_bulk)(struct ndmsg *, struct nlattr **, struct net_device *, u16, struct netlink_ext_ack *); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); int (*ndo_fdb_get)(struct sk_buff *, struct nlattr **, struct net_device *, const unsigned char *, u16, u32, u32, struct netlink_ext_ack *); int (*ndo_mdb_add)(struct net_device *, struct nlattr **, u16, struct netlink_ext_ack *); int (*ndo_mdb_del)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); int (*ndo_mdb_dump)(struct net_device *, struct sk_buff *, struct netlink_callback *); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16, struct netlink_ext_ack *); int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16); int (*ndo_change_carrier)(struct net_device *, bool); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_port_parent_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_bpf)(struct net_device *, struct netdev_bpf *); int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32); struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *, struct xdp_buff *); int (*ndo_xsk_wakeup)(struct net_device *, u32, u32); int (*ndo_tunnel_ctl)(struct net_device *, struct ip_tunnel_parm *, int); struct net_device * (*ndo_get_peer_dev)(struct net_device *); int (*ndo_fill_forward_path)(struct net_device_path_ctx *, struct net_device_path *); ktime_t (*ndo_get_tstamp)(struct net_device *, const struct skb_shared_hwtstamps *, bool); int (*ndo_hwtstamp_get)(struct net_device *, struct kernel_hwtstamp_config *); int (*ndo_hwtstamp_set)(struct net_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; u64 android_kabi_reserved5; u64 android_kabi_reserved6; u64 android_kabi_reserved7; u64 android_kabi_reserved8; }; struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; }; typedef struct { unsigned short encoding; unsigned short parity; } raw_hdlc_proto; typedef struct { unsigned int interval; unsigned int timeout; } cisco_proto; typedef struct { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } fr_proto; typedef struct { unsigned int dlci; } fr_proto_pvc; typedef struct { unsigned int dlci; char master[16]; } fr_proto_pvc_info; typedef struct { unsigned short dce; unsigned int modulo; unsigned int window; unsigned int t1; unsigned int t2; unsigned int n2; } x25_hdlc_proto; typedef struct { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } sync_serial_settings; typedef struct { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } te1_settings; struct if_settings { unsigned int type; unsigned int size; union { raw_hdlc_proto __attribute__((btf_type_tag("user"))) *raw_hdlc; cisco_proto __attribute__((btf_type_tag("user"))) *cisco; fr_proto __attribute__((btf_type_tag("user"))) *fr; fr_proto_pvc __attribute__((btf_type_tag("user"))) *fr_pvc; fr_proto_pvc_info __attribute__((btf_type_tag("user"))) *fr_pvc_info; x25_hdlc_proto __attribute__((btf_type_tag("user"))) *x25; sync_serial_settings __attribute__((btf_type_tag("user"))) *sync; te1_settings __attribute__((btf_type_tag("user"))) *te1; } ifs_ifsu; }; struct ifreq { union { char ifrn_name[16]; } ifr_ifrn; union { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16]; char ifru_newname[16]; void __attribute__((btf_type_tag("user"))) *ifru_data; struct if_settings ifru_settings; } ifr_ifru; }; struct neigh_table; struct neigh_parms { possible_net_t net; struct net_device *dev; netdevice_tracker dev_tracker; struct list_head list; int (*neigh_setup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; int dead; refcount_t refcnt; struct callback_head callback_head; int reachable_time; u32 qlen; int data[14]; unsigned long data_state[1]; u64 android_kabi_reserved1; }; struct fib_rule; struct flowi; struct fib_lookup_arg; struct fib_rule_hdr; struct fib_rules_ops { int family; struct list_head list; int rule_size; int addr_size; int unresolved_rules; int nr_goto_rules; unsigned int fib_rules_seq; int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); bool (*suppress)(struct fib_rule *, int, struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **, struct netlink_ext_ack *); int (*delete)(struct fib_rule *); int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); size_t (*nlmsg_payload)(struct fib_rule *); void (*flush_cache)(struct fib_rules_ops *); int nlgroup; struct list_head rules_list; struct module *owner; struct net *fro_net; struct callback_head rcu; }; typedef __u64 __be64; struct fib_kuid_range { kuid_t start; kuid_t end; }; struct fib_rule_port_range { __u16 start; __u16 end; }; struct fib_rule { struct list_head list; int iifindex; int oifindex; u32 mark; u32 mark_mask; u32 flags; u32 table; u8 action; u8 l3mdev; u8 proto; u8 ip_proto; u32 target; __be64 tun_id; struct fib_rule __attribute__((btf_type_tag("rcu"))) *ctarget; struct net *fr_net; refcount_t refcnt; u32 pref; int suppress_ifgroup; int suppress_prefixlen; char iifname[16]; char oifname[16]; struct fib_kuid_range uid_range; struct fib_rule_port_range sport_range; struct fib_rule_port_range dport_range; struct callback_head rcu; }; struct flowi_tunnel { __be64 tun_id; }; struct flowi_common { int flowic_oif; int flowic_iif; int flowic_l3mdev; __u32 flowic_mark; __u8 flowic_tos; __u8 flowic_scope; __u8 flowic_proto; __u8 flowic_flags; __u32 flowic_secid; kuid_t flowic_uid; __u32 flowic_multipath_hash; struct flowi_tunnel flowic_tun_key; }; union flowi_uli { struct { __be16 dport; __be16 sport; } ports; struct { __u8 type; __u8 code; } icmpt; __be32 gre_key; struct { __u8 type; } mht; }; struct flowi4 { struct flowi_common __fl_common; __be32 saddr; __be32 daddr; union flowi_uli uli; }; struct flowi6 { struct flowi_common __fl_common; struct in6_addr daddr; struct in6_addr saddr; __be32 flowlabel; union flowi_uli uli; __u32 mp_hash; }; struct flowi { union { struct flowi_common __fl_common; struct flowi4 ip4; struct flowi6 ip6; } u; }; struct fib_lookup_arg { void *lookup_ptr; const void *lookup_data; void *result; struct fib_rule *rule; u32 table; int flags; }; struct fib_rule_hdr { __u8 family; __u8 dst_len; __u8 src_len; __u8 tos; __u8 table; __u8 res1; __u8 res2; __u8 action; __u32 flags; }; struct nlattr { __u16 nla_len; __u16 nla_type; }; struct nla_policy; struct netlink_ext_ack { const char *_msg; const struct nlattr *bad_attr; const struct nla_policy *policy; const struct nlattr *miss_nest; u16 miss_type; u8 cookie[20]; u8 cookie_len; char _msg_buf[80]; }; struct netlink_range_validation; struct netlink_range_validation_signed; struct nla_policy { u8 type; u8 validation_type; u16 len; union { u16 strict_start_type; const u32 bitfield32_valid; const u32 mask; const char *reject_message; const struct nla_policy *nested_policy; struct netlink_range_validation *range; struct netlink_range_validation_signed *range_signed; struct { s16 min; s16 max; }; int (*validate)(const struct nlattr *, struct netlink_ext_ack *); }; }; struct netlink_range_validation { u64 min; u64 max; }; struct netlink_range_validation_signed { s64 min; s64 max; }; struct fib_notifier_ops { int family; struct list_head list; unsigned int (*fib_seq_read)(struct net *); int (*fib_dump)(struct net *, struct notifier_block *, struct netlink_ext_ack *); struct module *owner; struct callback_head rcu; }; struct hh_cache { unsigned int hh_len; seqlock_t hh_lock; unsigned long hh_data[12]; }; struct neigh_ops; struct neighbour { struct neighbour __attribute__((btf_type_tag("rcu"))) *next; struct neigh_table *tbl; struct neigh_parms *parms; unsigned long confirmed; unsigned long updated; rwlock_t lock; refcount_t refcnt; unsigned int arp_queue_len_bytes; struct sk_buff_head arp_queue; struct timer_list timer; unsigned long used; atomic_t probes; u8 nud_state; u8 type; u8 dead; u8 protocol; u32 flags; seqlock_t ha_lock; long: 0; unsigned char ha[32]; struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct list_head gc_list; struct list_head managed_list; struct callback_head rcu; struct net_device *dev; netdevice_tracker dev_tracker; u64 android_kabi_reserved1; u8 primary_key[0]; }; struct pneigh_entry; struct neigh_statistics; struct neigh_hash_table; struct neigh_table { int family; unsigned int entry_size; unsigned int key_len; __be16 protocol; __u32 (*hash)(const void *, const struct net_device *, __u32 *); bool (*key_eq)(const struct neighbour *, const void *); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *); int (*is_multicast)(const void *); bool (*allow_add)(const struct net_device *, struct netlink_ext_ack *); char *id; struct neigh_parms parms; struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; int gc_thresh3; unsigned long last_flush; struct delayed_work gc_work; struct delayed_work managed_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; atomic_t gc_entries; struct list_head gc_list; struct list_head managed_list; rwlock_t lock; unsigned long last_rand; struct neigh_statistics __attribute__((btf_type_tag("percpu"))) *stats; struct neigh_hash_table __attribute__((btf_type_tag("rcu"))) *nht; struct pneigh_entry **phash_buckets; u64 android_kabi_reserved1; }; struct pneigh_entry { struct pneigh_entry *next; possible_net_t net; struct net_device *dev; netdevice_tracker dev_tracker; u32 flags; u8 protocol; u32 key[0]; }; struct neigh_statistics { unsigned long allocs; unsigned long destroys; unsigned long hash_grows; unsigned long res_failed; unsigned long lookups; unsigned long hits; unsigned long rcv_probes_mcast; unsigned long rcv_probes_ucast; unsigned long periodic_gc_runs; unsigned long forced_gc_runs; unsigned long unres_discards; unsigned long table_fulls; }; struct neigh_hash_table { struct neighbour __attribute__((btf_type_tag("rcu"))) **hash_buckets; unsigned int hash_shift; __u32 hash_rnd[4]; struct callback_head rcu; }; struct neigh_ops { int family; void (*solicit)(struct neighbour *, struct sk_buff *); void (*error_report)(struct neighbour *, struct sk_buff *); int (*output)(struct neighbour *, struct sk_buff *); int (*connected_output)(struct neighbour *, struct sk_buff *); }; struct ipv6_stable_secret { bool initialized; struct in6_addr secret; }; struct ipv6_devconf { __s32 forwarding; __s32 hop_limit; __s32 mtu6; __s32 accept_ra; __s32 accept_redirects; __s32 autoconf; __s32 dad_transmits; __s32 rtr_solicits; __s32 rtr_solicit_interval; __s32 rtr_solicit_max_interval; __s32 rtr_solicit_delay; __s32 force_mld_version; __s32 mldv1_unsolicited_report_interval; __s32 mldv2_unsolicited_report_interval; __s32 use_tempaddr; __s32 temp_valid_lft; __s32 temp_prefered_lft; __s32 regen_max_retry; __s32 max_desync_factor; __s32 max_addresses; __s32 accept_ra_defrtr; __u32 ra_defrtr_metric; __s32 accept_ra_min_hop_limit; __s32 accept_ra_min_lft; __s32 accept_ra_pinfo; __s32 ignore_routes_with_linkdown; __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; __s32 accept_ra_rt_info_min_plen; __s32 accept_ra_rt_info_max_plen; __s32 accept_ra_rt_table; __s32 proxy_ndp; __s32 accept_source_route; __s32 accept_ra_from_local; __s32 optimistic_dad; __s32 use_optimistic; atomic_t mc_forwarding; __s32 disable_ipv6; __s32 drop_unicast_in_l2_multicast; __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; __s32 drop_unsolicited_na; __s32 accept_untracked_na; struct ipv6_stable_secret stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; __s32 seg6_enabled; __u32 enhanced_dad; __u32 addr_gen_mode; __s32 disable_policy; __s32 ndisc_tclass; __s32 rpl_seg_enabled; __u32 ioam6_id; __u32 ioam6_id_wide; __u8 ioam6_enabled; __u8 ndisc_evict_nocarrier; struct ctl_table_header *sysctl_header; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; __u64 rx_otherhost_dropped; }; struct ifla_vf_info { __u32 vf; __u8 mac[32]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; __be16 vlan_proto; }; struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; __u64 rx_dropped; __u64 tx_dropped; }; struct ifla_vf_guid { __u32 vf; __u64 guid; }; struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; }; struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; struct netlink_ext_ack *extack; u16 family; u16 answer_flags; u32 min_dump_alloc; unsigned int prev_seq; unsigned int seq; bool strict_check; union { u8 ctx[48]; long args[6]; }; }; struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; }; struct netdev_phys_item_id { unsigned char id[32]; unsigned char id_len; }; enum bpf_netdev_command { XDP_SETUP_PROG = 0, XDP_SETUP_PROG_HW = 1, BPF_OFFLOAD_MAP_ALLOC = 2, BPF_OFFLOAD_MAP_FREE = 3, XDP_SETUP_XSK_POOL = 4, }; struct bpf_offloaded_map; struct xsk_buff_pool; struct netdev_bpf { enum bpf_netdev_command command; union { struct { u32 flags; struct bpf_prog *prog; struct netlink_ext_ack *extack; }; struct { struct bpf_offloaded_map *offmap; }; struct { struct xsk_buff_pool *pool; u16 queue_id; } xsk; }; }; struct net_device_path_ctx { const struct net_device *dev; u8 daddr[6]; int num_vlans; struct { u16 id; __be16 proto; } vlan[2]; }; enum net_device_path_type { DEV_PATH_ETHERNET = 0, DEV_PATH_VLAN = 1, DEV_PATH_BRIDGE = 2, DEV_PATH_PPPOE = 3, DEV_PATH_DSA = 4, DEV_PATH_MTK_WDMA = 5, }; struct net_device_path { enum net_device_path_type type; const struct net_device *dev; union { struct { u16 id; __be16 proto; u8 h_dest[6]; } encap; struct { enum { DEV_PATH_BR_VLAN_KEEP = 0, DEV_PATH_BR_VLAN_TAG = 1, DEV_PATH_BR_VLAN_UNTAG = 2, DEV_PATH_BR_VLAN_UNTAG_HW = 3, } vlan_mode; u16 vlan_id; __be16 vlan_proto; } bridge; struct { int port; u16 proto; } dsa; struct { u8 wdma_idx; u8 queue; u16 wcid; u8 bss; } mtk_wdma; }; }; struct skb_shared_hwtstamps { union { ktime_t hwtstamp; void *netdev_data; }; }; enum hwtstamp_source { HWTSTAMP_SOURCE_NETDEV = 0, HWTSTAMP_SOURCE_PHYLIB = 1, }; struct kernel_hwtstamp_config { int flags; int tx_type; int rx_filter; struct ifreq *ifr; bool copied_to_user; enum hwtstamp_source source; }; enum xdp_rss_hash_type { XDP_RSS_L3_IPV4 = 1, XDP_RSS_L3_IPV6 = 2, XDP_RSS_L3_DYNHDR = 4, XDP_RSS_L4 = 8, XDP_RSS_L4_TCP = 16, XDP_RSS_L4_UDP = 32, XDP_RSS_L4_SCTP = 64, XDP_RSS_L4_IPSEC = 128, XDP_RSS_TYPE_NONE = 0, XDP_RSS_TYPE_L2 = 0, XDP_RSS_TYPE_L3_IPV4 = 1, XDP_RSS_TYPE_L3_IPV6 = 2, XDP_RSS_TYPE_L3_IPV4_OPT = 5, XDP_RSS_TYPE_L3_IPV6_EX = 6, XDP_RSS_TYPE_L4_ANY = 8, XDP_RSS_TYPE_L4_IPV4_TCP = 25, XDP_RSS_TYPE_L4_IPV4_UDP = 41, XDP_RSS_TYPE_L4_IPV4_SCTP = 73, XDP_RSS_TYPE_L4_IPV4_IPSEC = 137, XDP_RSS_TYPE_L4_IPV6_TCP = 26, XDP_RSS_TYPE_L4_IPV6_UDP = 42, XDP_RSS_TYPE_L4_IPV6_SCTP = 74, XDP_RSS_TYPE_L4_IPV6_IPSEC = 138, XDP_RSS_TYPE_L4_IPV6_TCP_EX = 30, XDP_RSS_TYPE_L4_IPV6_UDP_EX = 46, XDP_RSS_TYPE_L4_IPV6_SCTP_EX = 78, }; struct xdp_md; struct xdp_metadata_ops { int (*xmo_rx_timestamp)(const struct xdp_md *, u64 *); int (*xmo_rx_hash)(const struct xdp_md *, u32 *, enum xdp_rss_hash_type *); }; struct net_device_core_stats { unsigned long rx_dropped; unsigned long tx_dropped; unsigned long rx_nohandler; unsigned long rx_otherhost_dropped; }; struct iw_request_info; union iwreq_data; typedef int (*iw_handler)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *); struct iw_priv_args; struct iw_statistics; struct iw_handler_def { const iw_handler *standard; __u16 num_standard; __u16 num_private; __u16 num_private_args; const iw_handler *private; const struct iw_priv_args *private_args; struct iw_statistics * (*get_wireless_stats)(struct net_device *); }; enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3, }; struct ethtool_drvinfo; struct ethtool_regs; struct ethtool_wolinfo; struct ethtool_link_ext_state_info; struct ethtool_link_ext_stats; struct ethtool_eeprom; struct ethtool_coalesce; struct kernel_ethtool_coalesce; struct ethtool_ringparam; struct kernel_ethtool_ringparam; struct ethtool_pause_stats; struct ethtool_pauseparam; struct ethtool_test; struct ethtool_stats; struct ethtool_rxnfc; struct ethtool_flash; struct ethtool_channels; struct ethtool_dump; struct ethtool_ts_info; struct ethtool_modinfo; struct ethtool_eee; struct ethtool_tunable; struct ethtool_link_ksettings; struct ethtool_fec_stats; struct ethtool_fecparam; struct ethtool_module_eeprom; struct ethtool_eth_phy_stats; struct ethtool_eth_mac_stats; struct ethtool_eth_ctrl_stats; struct ethtool_rmon_stats; struct ethtool_rmon_hist_range; struct ethtool_module_power_mode_params; struct ethtool_mm_state; struct ethtool_mm_cfg; struct ethtool_mm_stats; struct ethtool_ops { u32 cap_link_lanes_supported: 1; u32 supported_coalesce_params; u32 supported_ring_params; void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_link_ext_state)(struct net_device *, struct ethtool_link_ext_state_info *); void (*get_link_ext_stats)(struct net_device *, struct ethtool_link_ext_stats *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); void (*get_pause_stats)(struct net_device *, struct ethtool_pause_stats *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32, u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8); int (*get_rxfh_context)(struct net_device *, u32 *, u8 *, u8 *, u32); int (*set_rxfh_context)(struct net_device *, const u32 *, const u8 *, const u8, u32 *, bool); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); void (*get_fec_stats)(struct net_device *, struct ethtool_fec_stats *); int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*get_phy_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_phy_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); void (*get_eth_phy_stats)(struct net_device *, struct ethtool_eth_phy_stats *); void (*get_eth_mac_stats)(struct net_device *, struct ethtool_eth_mac_stats *); void (*get_eth_ctrl_stats)(struct net_device *, struct ethtool_eth_ctrl_stats *); void (*get_rmon_stats)(struct net_device *, struct ethtool_rmon_stats *, const struct ethtool_rmon_hist_range **); int (*get_module_power_mode)(struct net_device *, struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); int (*set_module_power_mode)(struct net_device *, const struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); int (*get_mm)(struct net_device *, struct ethtool_mm_state *); int (*set_mm)(struct net_device *, struct ethtool_mm_cfg *, struct netlink_ext_ack *); void (*get_mm_stats)(struct net_device *, struct ethtool_mm_stats *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct nd_opt_hdr; struct ndisc_options; struct prefix_info; struct ndisc_ops { int (*is_useropt)(u8); int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *); int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **); void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *); void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool); }; struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); __be16 (*parse_protocol)(const struct sk_buff *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct ipv4_devconf { void *sysctl; int data[33]; unsigned long state[1]; }; struct in_ifaddr; struct ip_mc_list; struct in_device { struct net_device *dev; netdevice_tracker dev_tracker; refcount_t refcnt; int dead; struct in_ifaddr __attribute__((btf_type_tag("rcu"))) *ifa_list; struct ip_mc_list __attribute__((btf_type_tag("rcu"))) *mc_list; struct ip_mc_list __attribute__((btf_type_tag("rcu"))) * __attribute__((btf_type_tag("rcu"))) *mc_hash; int mc_count; spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; unsigned long mr_v1_seen; unsigned long mr_v2_seen; unsigned long mr_maxdelay; unsigned long mr_qi; unsigned long mr_qri; unsigned char mr_qrv; unsigned char mr_gq_running; u32 mr_ifc_count; struct timer_list mr_gq_timer; struct timer_list mr_ifc_timer; struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct callback_head callback_head; }; struct icmpv6_mib_device; struct icmpv6msg_mib_device; struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry; struct ipstats_mib __attribute__((btf_type_tag("percpu"))) *ipv6; struct icmpv6_mib_device *icmpv6dev; struct icmpv6msg_mib_device *icmpv6msgdev; }; struct ifmcaddr6; struct ifacaddr6; struct inet6_dev { struct net_device *dev; netdevice_tracker dev_tracker; struct list_head addr_list; struct ifmcaddr6 __attribute__((btf_type_tag("rcu"))) *mc_list; struct ifmcaddr6 __attribute__((btf_type_tag("rcu"))) *mc_tomb; unsigned char mc_qrv; unsigned char mc_gq_running; unsigned char mc_ifc_count; unsigned char mc_dad_count; unsigned long mc_v1_seen; unsigned long mc_qi; unsigned long mc_qri; unsigned long mc_maxdelay; struct delayed_work mc_gq_work; struct delayed_work mc_ifc_work; struct delayed_work mc_dad_work; struct delayed_work mc_query_work; struct delayed_work mc_report_work; struct sk_buff_head mc_query_queue; struct sk_buff_head mc_report_queue; spinlock_t mc_query_lock; spinlock_t mc_report_lock; struct mutex mc_lock; struct ifacaddr6 *ac_list; rwlock_t lock; refcount_t refcnt; __u32 if_flags; int dead; u32 desync_factor; struct list_head tempaddr_list; struct in6_addr token; struct neigh_parms *nd_parms; struct ipv6_devconf cnf; struct ipv6_devstat stats; struct timer_list rs_timer; __s32 rs_interval; __u8 rs_probes; unsigned long tstamp; struct callback_head rcu; unsigned int ra_mtu; }; struct ip6_sf_list; struct ifmcaddr6 { struct in6_addr mca_addr; struct inet6_dev *idev; struct ifmcaddr6 __attribute__((btf_type_tag("rcu"))) *next; struct ip6_sf_list __attribute__((btf_type_tag("rcu"))) *mca_sources; struct ip6_sf_list __attribute__((btf_type_tag("rcu"))) *mca_tomb; unsigned int mca_sfmode; unsigned char mca_crcount; unsigned long mca_sfcount[2]; struct delayed_work mca_work; unsigned int mca_flags; int mca_users; refcount_t mca_refcnt; unsigned long mca_cstamp; unsigned long mca_tstamp; struct callback_head rcu; }; struct ip6_sf_list { struct ip6_sf_list __attribute__((btf_type_tag("rcu"))) *sf_next; struct in6_addr sf_addr; unsigned long sf_count[2]; unsigned char sf_gsresp; unsigned char sf_oldin; unsigned char sf_crcount; struct callback_head rcu; }; struct ifacaddr6 { struct in6_addr aca_addr; struct fib6_info *aca_rt; struct ifacaddr6 *aca_next; struct hlist_node aca_addr_lst; int aca_users; refcount_t aca_refcnt; unsigned long aca_cstamp; unsigned long aca_tstamp; struct callback_head rcu; }; struct icmpv6_mib_device { atomic_long_t mibs[7]; }; struct icmpv6msg_mib_device { atomic_long_t mibs[512]; }; struct vlan_group { unsigned int nr_vlan_devs; struct hlist_node hlist; struct net_device **vlan_devices_arrays[16]; }; struct vlan_info { struct net_device *real_dev; struct vlan_group grp; struct list_head vid_list; unsigned int nr_vids; struct callback_head rcu; }; enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4, BPF_PROG_TYPE_TRACEPOINT = 5, BPF_PROG_TYPE_XDP = 6, BPF_PROG_TYPE_PERF_EVENT = 7, BPF_PROG_TYPE_CGROUP_SKB = 8, BPF_PROG_TYPE_CGROUP_SOCK = 9, BPF_PROG_TYPE_LWT_IN = 10, BPF_PROG_TYPE_LWT_OUT = 11, BPF_PROG_TYPE_LWT_XMIT = 12, BPF_PROG_TYPE_SOCK_OPS = 13, BPF_PROG_TYPE_SK_SKB = 14, BPF_PROG_TYPE_CGROUP_DEVICE = 15, BPF_PROG_TYPE_SK_MSG = 16, BPF_PROG_TYPE_RAW_TRACEPOINT = 17, BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, BPF_PROG_TYPE_LIRC_MODE2 = 20, BPF_PROG_TYPE_SK_REUSEPORT = 21, BPF_PROG_TYPE_FLOW_DISSECTOR = 22, BPF_PROG_TYPE_CGROUP_SYSCTL = 23, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, BPF_PROG_TYPE_TRACING = 26, BPF_PROG_TYPE_STRUCT_OPS = 27, BPF_PROG_TYPE_EXT = 28, BPF_PROG_TYPE_LSM = 29, BPF_PROG_TYPE_SK_LOOKUP = 30, BPF_PROG_TYPE_SYSCALL = 31, BPF_PROG_TYPE_NETFILTER = 32, BPF_PROG_TYPE_FUSE = 33, }; enum bpf_attach_type { BPF_CGROUP_INET_INGRESS = 0, BPF_CGROUP_INET_EGRESS = 1, BPF_CGROUP_INET_SOCK_CREATE = 2, BPF_CGROUP_SOCK_OPS = 3, BPF_SK_SKB_STREAM_PARSER = 4, BPF_SK_SKB_STREAM_VERDICT = 5, BPF_CGROUP_DEVICE = 6, BPF_SK_MSG_VERDICT = 7, BPF_CGROUP_INET4_BIND = 8, BPF_CGROUP_INET6_BIND = 9, BPF_CGROUP_INET4_CONNECT = 10, BPF_CGROUP_INET6_CONNECT = 11, BPF_CGROUP_INET4_POST_BIND = 12, BPF_CGROUP_INET6_POST_BIND = 13, BPF_CGROUP_UDP4_SENDMSG = 14, BPF_CGROUP_UDP6_SENDMSG = 15, BPF_LIRC_MODE2 = 16, BPF_FLOW_DISSECTOR = 17, BPF_CGROUP_SYSCTL = 18, BPF_CGROUP_UDP4_RECVMSG = 19, BPF_CGROUP_UDP6_RECVMSG = 20, BPF_CGROUP_GETSOCKOPT = 21, BPF_CGROUP_SETSOCKOPT = 22, BPF_TRACE_RAW_TP = 23, BPF_TRACE_FENTRY = 24, BPF_TRACE_FEXIT = 25, BPF_MODIFY_RETURN = 26, BPF_LSM_MAC = 27, BPF_TRACE_ITER = 28, BPF_CGROUP_INET4_GETPEERNAME = 29, BPF_CGROUP_INET6_GETPEERNAME = 30, BPF_CGROUP_INET4_GETSOCKNAME = 31, BPF_CGROUP_INET6_GETSOCKNAME = 32, BPF_XDP_DEVMAP = 33, BPF_CGROUP_INET_SOCK_RELEASE = 34, BPF_XDP_CPUMAP = 35, BPF_SK_LOOKUP = 36, BPF_XDP = 37, BPF_SK_SKB_VERDICT = 38, BPF_SK_REUSEPORT_SELECT = 39, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, BPF_PERF_EVENT = 41, BPF_TRACE_KPROBE_MULTI = 42, BPF_LSM_CGROUP = 43, BPF_STRUCT_OPS = 44, BPF_NETFILTER = 45, BPF_TCX_INGRESS = 46, BPF_TCX_EGRESS = 47, BPF_TRACE_UPROBE_MULTI = 48, __MAX_BPF_ATTACH_TYPE = 49, }; struct sock_filter { __u16 code; __u8 jt; __u8 jf; __u32 k; }; struct bpf_insn { __u8 code; __u8 dst_reg: 4; __u8 src_reg: 4; __s16 off; __s32 imm; }; struct bpf_prog_stats; struct bpf_prog_aux; struct sock_fprog_kern; struct bpf_prog { u16 pages; u16 jited: 1; u16 jit_requested: 1; u16 gpl_compatible: 1; u16 cb_access: 1; u16 dst_needed: 1; u16 blinding_requested: 1; u16 blinded: 1; u16 is_func: 1; u16 kprobe_override: 1; u16 has_callchain_buf: 1; u16 enforce_expected_attach_type: 1; u16 call_get_stack: 1; u16 call_get_func_ip: 1; u16 tstamp_type_access: 1; enum bpf_prog_type type; enum bpf_attach_type expected_attach_type; u32 len; u32 jited_len; u8 tag[8]; struct bpf_prog_stats __attribute__((btf_type_tag("percpu"))) *stats; int __attribute__((btf_type_tag("percpu"))) *active; unsigned int (*bpf_func)(const void *, const struct bpf_insn *); struct bpf_prog_aux *aux; struct sock_fprog_kern *orig_prog; u64 android_kabi_reserved1; union { struct { struct {} __empty_insns; struct sock_filter insns[0]; }; struct { struct {} __empty_insnsi; struct bpf_insn insnsi[0]; }; }; }; struct bpf_mprog_fp { struct bpf_prog *prog; }; struct bpf_mprog_bundle; struct bpf_mprog_entry { struct bpf_mprog_fp fp_items[64]; struct bpf_mprog_bundle *parent; }; struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; long: 64; long: 64; }; struct netdev_queue { struct net_device *dev; netdevice_tracker dev_tracker; struct Qdisc __attribute__((btf_type_tag("rcu"))) *qdisc; struct Qdisc __attribute__((btf_type_tag("rcu"))) *qdisc_sleeping; struct kobject kobj; unsigned long tx_maxrate; atomic_long_t trans_timeout; struct net_device *sb_dev; struct xsk_buff_pool *pool; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; long: 64; long: 64; long: 64; long: 64; long: 64; struct dql dql; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; long: 64; long: 64; long: 64; long: 64; }; struct qdisc_skb_head { struct sk_buff *head; struct sk_buff *tail; __u32 qlen; spinlock_t lock; }; typedef struct { local64_t v; } u64_stats_t; struct gnet_stats_basic_sync { u64_stats_t bytes; u64_stats_t packets; struct u64_stats_sync syncp; }; struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; }; struct Qdisc_ops; struct qdisc_size_table; struct net_rate_estimator; struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __attribute__((btf_type_tag("rcu"))) *stab; struct hlist_node hash; u32 handle; u32 parent; struct netdev_queue *dev_queue; struct net_rate_estimator __attribute__((btf_type_tag("rcu"))) *rate_est; struct gnet_stats_basic_sync __attribute__((btf_type_tag("percpu"))) *cpu_bstats; struct gnet_stats_queue __attribute__((btf_type_tag("percpu"))) *cpu_qstats; int pad; refcount_t refcnt; long: 64; long: 64; long: 64; struct sk_buff_head gso_skb; struct qdisc_skb_head q; struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; unsigned long state; unsigned long state2; struct Qdisc *next_sched; struct sk_buff_head skb_bad_txq; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t busylock; spinlock_t seqlock; struct callback_head rcu; netdevice_tracker dev_tracker; u64 android_kabi_reserved1; long: 64; long: 64; long: 64; long: 64; long privdata[0]; }; struct xdp_dev_bulk_queue { struct xdp_frame *q[16]; struct list_head flush_node; struct net_device *dev; struct net_device *dev_rx; struct bpf_prog *xdp_prog; unsigned int count; }; struct xps_map; struct xps_dev_maps { struct callback_head rcu; unsigned int nr_ids; s16 num_tc; struct xps_map __attribute__((btf_type_tag("rcu"))) *attr_map[0]; }; struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0]; }; struct pcpu_lstats { u64_stats_t packets; u64_stats_t bytes; struct u64_stats_sync syncp; }; struct pcpu_sw_netstats { u64_stats_t rx_packets; u64_stats_t rx_bytes; u64_stats_t tx_packets; u64_stats_t tx_bytes; struct u64_stats_sync syncp; }; struct pcpu_dstats { u64 rx_packets; u64 rx_bytes; u64 rx_drops; u64 tx_packets; u64 tx_bytes; u64 tx_drops; struct u64_stats_sync syncp; long: 64; long: 64; }; struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; struct net_device * (*alloc)(struct nlattr **, const char *, unsigned char, unsigned int, unsigned int); void (*setup)(struct net_device *); bool netns_refund; unsigned int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); unsigned int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); struct net * (*get_link_net)(const struct net_device *); size_t (*get_linkxstats_size)(const struct net_device *, int); int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); }; struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[0]; }; struct macsec_context; struct macsec_ops { int (*mdo_dev_open)(struct macsec_context *); int (*mdo_dev_stop)(struct macsec_context *); int (*mdo_add_secy)(struct macsec_context *); int (*mdo_upd_secy)(struct macsec_context *); int (*mdo_del_secy)(struct macsec_context *); int (*mdo_add_rxsc)(struct macsec_context *); int (*mdo_upd_rxsc)(struct macsec_context *); int (*mdo_del_rxsc)(struct macsec_context *); int (*mdo_add_rxsa)(struct macsec_context *); int (*mdo_upd_rxsa)(struct macsec_context *); int (*mdo_del_rxsa)(struct macsec_context *); int (*mdo_add_txsa)(struct macsec_context *); int (*mdo_upd_txsa)(struct macsec_context *); int (*mdo_del_txsa)(struct macsec_context *); int (*mdo_get_dev_stats)(struct macsec_context *); int (*mdo_get_tx_sc_stats)(struct macsec_context *); int (*mdo_get_tx_sa_stats)(struct macsec_context *); int (*mdo_get_rx_sc_stats)(struct macsec_context *); int (*mdo_get_rx_sa_stats)(struct macsec_context *); bool rx_uses_md_dst; }; struct udp_tunnel_nic_table_info { unsigned int n_entries; unsigned int tunnel_types; }; struct udp_tunnel_info; struct udp_tunnel_nic_shared; struct udp_tunnel_nic_info { int (*set_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); int (*unset_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); int (*sync_table)(struct net_device *, unsigned int); struct udp_tunnel_nic_shared *shared; unsigned int flags; struct udp_tunnel_nic_table_info tables[4]; }; struct rtnl_hw_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; }; typedef enum { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4, } socket_state; struct socket_wq { wait_queue_head_t wait; struct fasync_struct *fasync_list; unsigned long flags; struct callback_head rcu; long: 64; }; struct proto_ops; struct socket { socket_state state; short type; unsigned long flags; struct file *file; struct sock *sk; const struct proto_ops *ops; long: 64; long: 64; long: 64; struct socket_wq wq; }; typedef struct { size_t written; size_t count; union { char __attribute__((btf_type_tag("user"))) *buf; void *data; } arg; int error; } read_descriptor_t; typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *); struct proto_ops { int family; struct module *owner; int (*release)(struct socket *); int (*bind)(struct socket *, struct sockaddr *, int); int (*connect)(struct socket *, struct sockaddr *, int, int); int (*socketpair)(struct socket *, struct socket *); int (*accept)(struct socket *, struct socket *, int, bool); int (*getname)(struct socket *, struct sockaddr *, int); __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *); int (*ioctl)(struct socket *, unsigned int, unsigned long); int (*compat_ioctl)(struct socket *, unsigned int, unsigned long); int (*gettstamp)(struct socket *, void __attribute__((btf_type_tag("user"))) *, bool, bool); int (*listen)(struct socket *, int); int (*shutdown)(struct socket *, int); int (*setsockopt)(struct socket *, int, int, sockptr_t, unsigned int); int (*getsockopt)(struct socket *, int, int, char __attribute__((btf_type_tag("user"))) *, int __attribute__((btf_type_tag("user"))) *); void (*show_fdinfo)(struct seq_file *, struct socket *); int (*sendmsg)(struct socket *, struct msghdr *, size_t); int (*recvmsg)(struct socket *, struct msghdr *, size_t, int); int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); void (*splice_eof)(struct socket *); int (*set_peek_off)(struct sock *, int); int (*peek_len)(struct socket *); int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t); int (*read_skb)(struct sock *, skb_read_actor_t); int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t); int (*set_rcvlowat)(struct sock *, int); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct request_sock; struct request_sock_ops { int family; unsigned int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(const struct sock *, struct request_sock *); void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); void (*send_reset)(const struct sock *, struct sk_buff *); void (*destructor)(struct request_sock *); void (*syn_ack_timeout)(const struct request_sock *); }; struct saved_syn; struct request_sock { struct sock_common __req_common; struct request_sock *dl_next; u16 mss; u8 num_retrans; u8 syncookie: 1; u8 num_timeout: 7; u32 ts_recent; struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; struct saved_syn *saved_syn; u32 secid; u32 peer_secid; u32 timeout; }; struct saved_syn { u32 mac_hdrlen; u32 network_hdrlen; u32 tcp_hdrlen; u8 data[0]; }; struct timewait_sock_ops { struct kmem_cache *twsk_slab; char *twsk_slab_name; unsigned int twsk_obj_size; int (*twsk_unique)(struct sock *, struct sock *, void *); void (*twsk_destructor)(struct sock *); }; struct sk_filter { refcount_t refcnt; struct callback_head rcu; struct bpf_prog *prog; }; struct xfrm_mark { __u32 v; __u32 m; }; typedef union { __be32 a4; __be32 a6[4]; struct in6_addr in6; } xfrm_address_t; struct xfrm_selector { xfrm_address_t daddr; xfrm_address_t saddr; __be16 dport; __be16 dport_mask; __be16 sport; __be16 sport_mask; __u16 family; __u8 prefixlen_d; __u8 prefixlen_s; __u8 proto; int ifindex; __kernel_uid32_t user; }; struct xfrm_lifetime_cfg { __u64 soft_byte_limit; __u64 hard_byte_limit; __u64 soft_packet_limit; __u64 hard_packet_limit; __u64 soft_add_expires_seconds; __u64 hard_add_expires_seconds; __u64 soft_use_expires_seconds; __u64 hard_use_expires_seconds; }; struct xfrm_lifetime_cur { __u64 bytes; __u64 packets; __u64 add_time; __u64 use_time; }; struct xfrm_policy_walk_entry { struct list_head all; u8 dead; }; struct xfrm_policy_queue { struct sk_buff_head hold_queue; struct timer_list hold_timer; unsigned long timeout; }; struct xfrm_id { xfrm_address_t daddr; __be32 spi; __u8 proto; }; struct xfrm_tmpl { struct xfrm_id id; xfrm_address_t saddr; unsigned short encap_family; u32 reqid; u8 mode; u8 share; u8 optional; u8 allalgs; u32 aalgos; u32 ealgos; u32 calgos; }; struct xfrm_dev_offload { struct net_device *dev; netdevice_tracker dev_tracker; struct net_device *real_dev; unsigned long offload_handle; u8 dir: 2; u8 type: 2; u8 flags: 2; }; struct xfrm_sec_ctx; struct xfrm_policy { possible_net_t xp_net; struct hlist_node bydst; struct hlist_node byidx; rwlock_t lock; refcount_t refcnt; u32 pos; struct timer_list timer; atomic_t genid; u32 priority; u32 index; u32 if_id; struct xfrm_mark mark; struct xfrm_selector selector; struct xfrm_lifetime_cfg lft; struct xfrm_lifetime_cur curlft; struct xfrm_policy_walk_entry walk; struct xfrm_policy_queue polq; bool bydst_reinsert; u8 type; u8 action; u8 flags; u8 xfrm_nr; u16 family; struct xfrm_sec_ctx *security; struct xfrm_tmpl xfrm_vec[6]; struct hlist_node bydst_inexact_list; struct callback_head rcu; struct xfrm_dev_offload xdo; }; struct sock_reuseport { struct callback_head rcu; u16 max_socks; u16 num_socks; u16 num_closed_socks; u16 incoming_cpu; unsigned int synq_overflow_ts; unsigned int reuseport_id; unsigned int bind_inany: 1; unsigned int has_conns: 1; struct bpf_prog __attribute__((btf_type_tag("rcu"))) *prog; struct sock *socks[0]; }; struct fs_struct { int users; spinlock_t lock; seqcount_spinlock_t seq; int umask; int in_exec; struct path root; struct path pwd; }; struct ld_semaphore { atomic_long_t count; raw_spinlock_t wait_lock; unsigned int wait_readers; struct list_head read_wait; struct list_head write_wait; }; typedef unsigned int tcflag_t; typedef unsigned char cc_t; typedef unsigned int speed_t; struct ktermios { tcflag_t c_iflag; tcflag_t c_oflag; tcflag_t c_cflag; tcflag_t c_lflag; cc_t c_line; cc_t c_cc[19]; speed_t c_ispeed; speed_t c_ospeed; }; struct winsize { unsigned short ws_row; unsigned short ws_col; unsigned short ws_xpixel; unsigned short ws_ypixel; }; struct tty_driver; struct tty_port; struct tty_operations; struct tty_ldisc; struct tty_struct { struct kref kref; int index; struct device *dev; struct tty_driver *driver; struct tty_port *port; const struct tty_operations *ops; struct tty_ldisc *ldisc; struct ld_semaphore ldisc_sem; struct mutex atomic_write_lock; struct mutex legacy_mutex; struct mutex throttle_mutex; struct rw_semaphore termios_rwsem; struct mutex winsize_mutex; struct ktermios termios; struct ktermios termios_locked; char name[64]; unsigned long flags; int count; unsigned int receive_room; struct winsize winsize; struct { spinlock_t lock; bool stopped; bool tco_stopped; unsigned long unused[0]; } flow; struct { struct pid *pgrp; struct pid *session; spinlock_t lock; unsigned char pktstatus; bool packet; unsigned long unused[0]; } ctrl; bool hw_stopped; bool closing; int flow_change; struct tty_struct *link; struct fasync_struct *fasync; wait_queue_head_t write_wait; wait_queue_head_t read_wait; struct work_struct hangup_work; void *disc_data; void *driver_data; spinlock_t files_lock; int write_cnt; unsigned char *write_buf; struct list_head tty_files; struct work_struct SAK_work; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct tty_driver { struct kref kref; struct cdev **cdevs; struct module *owner; const char *driver_name; const char *name; int name_base; int major; int minor_start; unsigned int num; short type; short subtype; struct ktermios init_termios; unsigned long flags; struct proc_dir_entry *proc_entry; struct tty_driver *other; struct tty_struct **ttys; struct tty_port **ports; struct ktermios **termios; void *driver_state; const struct tty_operations *ops; struct list_head tty_drivers; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct __kfifo { unsigned int in; unsigned int out; unsigned int mask; unsigned int esize; void *data; }; struct tty_buffer { union { struct tty_buffer *next; struct llist_node free; }; unsigned int used; unsigned int size; unsigned int commit; unsigned int lookahead; unsigned int read; bool flags; long: 0; u8 data[0]; }; struct tty_bufhead { struct tty_buffer *head; struct work_struct work; struct mutex lock; atomic_t priority; struct tty_buffer sentinel; struct llist_head free; atomic_t mem_used; int mem_limit; struct tty_buffer *tail; }; struct tty_port_operations; struct tty_port_client_operations; struct tty_port { struct tty_bufhead buf; struct tty_struct *tty; struct tty_struct *itty; const struct tty_port_operations *ops; const struct tty_port_client_operations *client_ops; spinlock_t lock; int blocked_open; int count; wait_queue_head_t open_wait; wait_queue_head_t delta_msr_wait; unsigned long flags; unsigned long iflags; unsigned char console: 1; struct mutex mutex; struct mutex buf_mutex; unsigned char *xmit_buf; struct { union { struct __kfifo kfifo; unsigned char *type; const unsigned char *const_type; char (*rectype)[0]; unsigned char *ptr; const unsigned char *ptr_const; }; unsigned char buf[0]; } xmit_fifo; unsigned int close_delay; unsigned int closing_wait; int drain_delay; struct kref kref; void *client_data; u64 android_kabi_reserved1; }; struct tty_port_operations { bool (*carrier_raised)(struct tty_port *); void (*dtr_rts)(struct tty_port *, bool); void (*shutdown)(struct tty_port *); int (*activate)(struct tty_port *, struct tty_struct *); void (*destruct)(struct tty_port *); u64 android_kabi_reserved1; }; struct tty_port_client_operations { size_t (*receive_buf)(struct tty_port *, const u8 *, const u8 *, size_t); void (*lookahead_buf)(struct tty_port *, const u8 *, const u8 *, size_t); void (*write_wakeup)(struct tty_port *); }; struct serial_icounter_struct; struct serial_struct; struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int); int (*install)(struct tty_driver *, struct tty_struct *); void (*remove)(struct tty_driver *, struct tty_struct *); int (*open)(struct tty_struct *, struct file *); void (*close)(struct tty_struct *, struct file *); void (*shutdown)(struct tty_struct *); void (*cleanup)(struct tty_struct *); ssize_t (*write)(struct tty_struct *, const u8 *, size_t); int (*put_char)(struct tty_struct *, u8); void (*flush_chars)(struct tty_struct *); unsigned int (*write_room)(struct tty_struct *); unsigned int (*chars_in_buffer)(struct tty_struct *); int (*ioctl)(struct tty_struct *, unsigned int, unsigned long); long (*compat_ioctl)(struct tty_struct *, unsigned int, unsigned long); void (*set_termios)(struct tty_struct *, const struct ktermios *); void (*throttle)(struct tty_struct *); void (*unthrottle)(struct tty_struct *); void (*stop)(struct tty_struct *); void (*start)(struct tty_struct *); void (*hangup)(struct tty_struct *); int (*break_ctl)(struct tty_struct *, int); void (*flush_buffer)(struct tty_struct *); void (*set_ldisc)(struct tty_struct *); void (*wait_until_sent)(struct tty_struct *, int); void (*send_xchar)(struct tty_struct *, char); int (*tiocmget)(struct tty_struct *); int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); int (*resize)(struct tty_struct *, struct winsize *); int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); int (*get_serial)(struct tty_struct *, struct serial_struct *); int (*set_serial)(struct tty_struct *, struct serial_struct *); void (*show_fdinfo)(struct tty_struct *, struct seq_file *); int (*proc_show)(struct seq_file *, void *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct tty_ldisc_ops; struct tty_ldisc { struct tty_ldisc_ops *ops; struct tty_struct *tty; }; struct tty_ldisc_ops { char *name; int num; int (*open)(struct tty_struct *); void (*close)(struct tty_struct *); void (*flush_buffer)(struct tty_struct *); ssize_t (*read)(struct tty_struct *, struct file *, u8 *, size_t, void **, unsigned long); ssize_t (*write)(struct tty_struct *, struct file *, const u8 *, size_t); int (*ioctl)(struct tty_struct *, unsigned int, unsigned long); int (*compat_ioctl)(struct tty_struct *, unsigned int, unsigned long); void (*set_termios)(struct tty_struct *, const struct ktermios *); __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); void (*hangup)(struct tty_struct *); void (*receive_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, bool); size_t (*receive_buf2)(struct tty_struct *, const u8 *, const u8 *, size_t); void (*lookahead_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); struct module *owner; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; enum { Root_NFS = 255, Root_CIFS = 254, Root_Generic = 253, Root_RAM0 = 1048576, }; struct minix_super_block { __u16 s_ninodes; __u16 s_nzones; __u16 s_imap_blocks; __u16 s_zmap_blocks; __u16 s_firstdatazone; __u16 s_log_zone_size; __u32 s_max_size; __u16 s_magic; __u16 s_state; __u32 s_zones; }; struct romfs_super_block { __be32 word0; __be32 word1; __be32 size; __be32 checksum; char name[0]; }; struct cramfs_info { __u32 crc; __u32 edition; __u32 blocks; __u32 files; }; struct cramfs_inode { __u32 mode: 16; __u32 uid: 16; __u32 size: 24; __u32 gid: 8; __u32 namelen: 6; __u32 offset: 26; }; struct cramfs_super { __u32 magic; __u32 size; __u32 flags; __u32 future; __u8 signature[16]; struct cramfs_info fsid; __u8 name[16]; struct cramfs_inode root; }; typedef __u16 __le16; typedef __u64 __le64; struct squashfs_super_block { __le32 s_magic; __le32 inodes; __le32 mkfs_time; __le32 block_size; __le32 fragments; __le16 compression; __le16 block_log; __le16 flags; __le16 no_ids; __le16 s_major; __le16 s_minor; __le64 root_inode; __le64 bytes_used; __le64 id_table_start; __le64 xattr_id_table_start; __le64 inode_table_start; __le64 directory_table_start; __le64 fragment_table_start; __le64 lookup_table_start; }; typedef int (*decompress_fn)(unsigned char *, long, long (*)(void *, unsigned long), long (*)(void *, unsigned long), unsigned char *, long *, void (*)(char *)); struct subprocess_info { struct work_struct work; struct completion *complete; const char *path; char **argv; char **envp; int wait; int retval; int (*init)(struct subprocess_info *, struct cred *); void (*cleanup)(struct subprocess_info *); void *data; }; typedef u64 async_cookie_t; struct async_domain { struct list_head pending; unsigned int registered: 1; }; enum state { Start = 0, Collect = 1, GotHeader = 2, SkipIt = 3, GotName = 4, CopyFile = 5, GotSymlink = 6, Reset = 7, }; struct hash { int ino; int minor; int major; umode_t mode; struct hash *next; char name[4098]; }; enum umh_disable_depth { UMH_ENABLED = 0, UMH_FREEZING = 1, UMH_DISABLED = 2, }; struct dir_entry { struct list_head list; time64_t mtime; char name[0]; }; typedef void (*async_func_t)(void *, async_cookie_t); enum dbg_active_el { DBG_ACTIVE_EL0 = 0, DBG_ACTIVE_EL1 = 1, }; typedef unsigned long uintptr_t; struct step_hook { struct list_head node; int (*fn)(struct pt_regs *, unsigned long); }; struct break_hook { struct list_head node; int (*fn)(struct pt_regs *, unsigned long); u16 imm; u16 mask; }; struct nmi_ctx { u64 hcr; unsigned int cnt; }; enum vec_type { ARM64_VEC_SVE = 0, ARM64_VEC_SME = 1, ARM64_VEC_MAX = 2, }; struct vl_info { enum vec_type type; const char *name; int min_vl; int max_vl; int max_virtualisable_vl; unsigned long vq_map[8]; unsigned long vq_partial_map[8]; }; struct cpu_fp_state { struct user_fpsimd_state *st; void *sve_state; void *sme_state; u64 *svcr; unsigned int sve_vl; unsigned int sme_vl; enum fp_type *fp_type; enum fp_type to_save; }; struct vl_config { int __default_vl; }; enum cpu_pm_event { CPU_PM_ENTER = 0, CPU_PM_ENTER_FAILED = 1, CPU_PM_EXIT = 2, CPU_CLUSTER_PM_ENTER = 3, CPU_CLUSTER_PM_ENTER_FAILED = 4, CPU_CLUSTER_PM_EXIT = 5, }; struct midr_range { u32 model; u32 rv_min; u32 rv_max; }; struct arm64_midr_revidr; struct arm64_cpu_capabilities { const char *desc; u16 capability; u16 type; bool (*matches)(const struct arm64_cpu_capabilities *, int); void (*cpu_enable)(const struct arm64_cpu_capabilities *); union { struct { struct midr_range midr_range; const struct arm64_midr_revidr * const fixed_revs; }; const struct midr_range *midr_range_list; struct { u32 sys_reg; u8 field_pos; u8 field_width; u8 min_field_value; u8 hwcap_type; bool sign; unsigned long hwcap; }; }; const struct arm64_cpu_capabilities *match_list; }; struct arm64_midr_revidr { u32 midr_rv; u32 revidr_mask; }; enum arm64_hyp_spectre_vector { HYP_VECTOR_DIRECT = 0, HYP_VECTOR_SPECTRE_DIRECT = 1, HYP_VECTOR_INDIRECT = 2, HYP_VECTOR_SPECTRE_INDIRECT = 3, }; typedef void (*bp_hardening_cb_t)(); struct bp_hardening_data { enum arm64_hyp_spectre_vector slot; bp_hardening_cb_t fn; }; struct stack_info { unsigned long low; unsigned long high; }; struct plist_head { struct list_head node_list; }; enum pm_qos_type { PM_QOS_UNITIALIZED = 0, PM_QOS_MAX = 1, PM_QOS_MIN = 2, }; struct pm_qos_constraints { struct plist_head list; s32 target_value; s32 default_value; s32 no_constraint_value; enum pm_qos_type type; struct blocking_notifier_head *notifiers; }; struct freq_constraints { struct pm_qos_constraints min_freq; struct blocking_notifier_head min_freq_notifiers; struct pm_qos_constraints max_freq; struct blocking_notifier_head max_freq_notifiers; }; struct pm_qos_flags { struct list_head list; s32 effective_flags; }; struct dev_pm_qos_request; struct dev_pm_qos { struct pm_qos_constraints resume_latency; struct pm_qos_constraints latency_tolerance; struct freq_constraints freq; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; }; struct pm_qos_flags_request { struct list_head node; s32 flags; }; enum freq_qos_req_type { FREQ_QOS_MIN = 1, FREQ_QOS_MAX = 2, }; struct freq_qos_request { enum freq_qos_req_type type; struct plist_node pnode; struct freq_constraints *qos; u64 android_oem_data1; }; enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_LATENCY_TOLERANCE = 2, DEV_PM_QOS_MIN_FREQUENCY = 3, DEV_PM_QOS_MAX_FREQUENCY = 4, DEV_PM_QOS_FLAGS = 5, }; struct dev_pm_qos_request { enum dev_pm_qos_req_type type; union { struct plist_node pnode; struct pm_qos_flags_request flr; struct freq_qos_request freq; } data; struct device *dev; }; enum reboot_mode { REBOOT_UNDEFINED = -1, REBOOT_COLD = 0, REBOOT_WARM = 1, REBOOT_HARD = 2, REBOOT_SOFT = 3, REBOOT_GPIO = 4, }; enum { UNAME26 = 131072, ADDR_NO_RANDOMIZE = 262144, FDPIC_FUNCPTRS = 524288, MMAP_PAGE_ZERO = 1048576, ADDR_COMPAT_LAYOUT = 2097152, READ_IMPLIES_EXEC = 4194304, ADDR_LIMIT_32BIT = 8388608, SHORT_INODE = 16777216, WHOLE_SECONDS = 33554432, STICKY_TIMEOUTS = 67108864, ADDR_LIMIT_3GB = 134217728, }; typedef bool (*stack_trace_consume_fn)(void *, unsigned long); struct kernel_clone_args { u64 flags; int __attribute__((btf_type_tag("user"))) *pidfd; int __attribute__((btf_type_tag("user"))) *child_tid; int __attribute__((btf_type_tag("user"))) *parent_tid; const char *name; int exit_signal; u32 kthread: 1; u32 io_thread: 1; u32 user_worker: 1; u32 no_files: 1; unsigned long stack; unsigned long stack_size; unsigned long tls; pid_t *set_tid; size_t set_tid_size; int cgroup; int idle; int (*fn)(void *); void *fn_arg; struct cgroup *cgrp; struct css_set *cset; }; struct wchan_info { unsigned long pc; int count; }; typedef __u16 Elf32_Half; typedef __u32 Elf32_Addr; typedef __u32 Elf32_Off; struct elf32_hdr { unsigned char e_ident[16]; Elf32_Half e_type; Elf32_Half e_machine; Elf32_Word e_version; Elf32_Addr e_entry; Elf32_Off e_phoff; Elf32_Off e_shoff; Elf32_Word e_flags; Elf32_Half e_ehsize; Elf32_Half e_phentsize; Elf32_Half e_phnum; Elf32_Half e_shentsize; Elf32_Half e_shnum; Elf32_Half e_shstrndx; }; struct arch_elf_state { int flags; }; typedef void (*btf_trace_sys_enter)(void *, struct pt_regs *, long); typedef void (*btf_trace_sys_exit)(void *, struct pt_regs *, long); struct pt_regs_offset { const char *name; int offset; }; struct user_regset; struct user_regset_view { const char *name; const struct user_regset *regsets; unsigned int n; u32 e_flags; u16 e_machine; u8 ei_osabi; }; struct membuf; typedef int user_regset_get2_fn(struct task_struct *, const struct user_regset *, struct membuf); typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void __attribute__((btf_type_tag("user"))) *); typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *); typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int); struct user_regset { user_regset_get2_fn *regset_get; user_regset_set_fn *set; user_regset_active_fn *active; user_regset_writeback_fn *writeback; unsigned int n; unsigned int size; unsigned int align; unsigned int bias; unsigned int core_note_type; }; struct membuf { void *p; size_t left; }; enum compat_regset { REGSET_COMPAT_GPR = 0, REGSET_COMPAT_VFP = 1, }; enum ptrace_syscall_dir { PTRACE_SYSCALL_ENTER = 0, PTRACE_SYSCALL_EXIT = 1, }; enum { TRACE_EVENT_FL_FILTERED = 1, TRACE_EVENT_FL_CAP_ANY = 2, TRACE_EVENT_FL_NO_SET_FILTER = 4, TRACE_EVENT_FL_IGNORE_ENABLE = 8, TRACE_EVENT_FL_TRACEPOINT = 16, TRACE_EVENT_FL_DYNAMIC = 32, TRACE_EVENT_FL_KPROBE = 64, TRACE_EVENT_FL_UPROBE = 128, TRACE_EVENT_FL_EPROBE = 256, TRACE_EVENT_FL_FPROBE = 512, TRACE_EVENT_FL_CUSTOM = 1024, }; enum bp_type_idx { TYPE_INST = 0, TYPE_DATA = 1, TYPE_MAX = 2, }; enum { HW_BREAKPOINT_EMPTY = 0, HW_BREAKPOINT_R = 1, HW_BREAKPOINT_W = 2, HW_BREAKPOINT_RW = 3, HW_BREAKPOINT_X = 4, HW_BREAKPOINT_INVALID = 7, }; enum { HW_BREAKPOINT_LEN_1 = 1, HW_BREAKPOINT_LEN_2 = 2, HW_BREAKPOINT_LEN_3 = 3, HW_BREAKPOINT_LEN_4 = 4, HW_BREAKPOINT_LEN_5 = 5, HW_BREAKPOINT_LEN_6 = 6, HW_BREAKPOINT_LEN_7 = 7, HW_BREAKPOINT_LEN_8 = 8, }; enum perf_type_id { PERF_TYPE_HARDWARE = 0, PERF_TYPE_SOFTWARE = 1, PERF_TYPE_TRACEPOINT = 2, PERF_TYPE_HW_CACHE = 3, PERF_TYPE_RAW = 4, PERF_TYPE_BREAKPOINT = 5, PERF_TYPE_MAX = 6, }; typedef u32 compat_ulong_t; struct trace_event_raw_sys_enter { struct trace_entry ent; long id; unsigned long args[6]; char __data[0]; }; struct trace_event_raw_sys_exit { struct trace_entry ent; long id; long ret; char __data[0]; }; struct seccomp_data { int nr; __u32 arch; __u64 instruction_pointer; __u64 args[6]; }; struct user_sve_header { __u32 size; __u32 max_size; __u16 vl; __u16 max_vl; __u16 flags; __u16 __reserved; }; struct trace_event_data_offsets_sys_enter {}; struct trace_event_data_offsets_sys_exit {}; struct user_za_header { __u32 size; __u32 max_size; __u16 vl; __u16 max_vl; __u16 flags; __u16 __reserved; }; struct user_pac_mask { __u64 data_mask; __u64 insn_mask; }; struct hypervisor_ops { void (*page_relinquish)(struct page *); void (*post_page_relinquish_tlb_inv)(); }; struct mpidr_hash { u64 mask; u32 shift_aff[4]; u32 bits; }; typedef phys_addr_t resource_size_t; struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; enum memblock_flags { MEMBLOCK_NONE = 0, MEMBLOCK_HOTPLUG = 1, MEMBLOCK_MIRROR = 2, MEMBLOCK_NOMAP = 4, MEMBLOCK_DRIVER_MANAGED = 8, }; struct cpu { int node_id; int hotpluggable; struct device dev; }; struct memblock_region; struct memblock_type { unsigned long cnt; unsigned long max; phys_addr_t total_size; struct memblock_region *regions; char *name; }; struct memblock_region { phys_addr_t base; phys_addr_t size; enum memblock_flags flags; }; struct cpu_operations { const char *name; int (*cpu_init)(unsigned int); int (*cpu_prepare)(unsigned int); int (*cpu_boot)(unsigned int); void (*cpu_postboot)(); bool (*cpu_can_disable)(unsigned int); int (*cpu_disable)(unsigned int); void (*cpu_die)(unsigned int); int (*cpu_kill)(unsigned int); }; struct atomic_notifier_head { spinlock_t lock; struct notifier_block __attribute__((btf_type_tag("rcu"))) *head; }; struct siginfo { union { struct { int si_signo; int si_errno; int si_code; union __sifields _sifields; }; int _si_pad[32]; }; }; struct sigaltstack { void __attribute__((btf_type_tag("user"))) *ss_sp; int ss_flags; __kernel_size_t ss_size; }; typedef struct sigaltstack stack_t; struct sigcontext { __u64 fault_address; __u64 regs[31]; __u64 sp; __u64 pc; __u64 pstate; long: 64; __u8 __reserved[4096]; }; struct ucontext { unsigned long uc_flags; struct ucontext *uc_link; stack_t uc_stack; sigset_t uc_sigmask; __u8 __unused[120]; long: 64; struct sigcontext uc_mcontext; }; struct rt_sigframe { struct siginfo info; struct ucontext uc; }; struct _aarch64_ctx { __u32 magic; __u32 size; }; struct fpsimd_context { struct _aarch64_ctx head; __u32 fpsr; __u32 fpcr; __uint128_t vregs[32]; }; struct sve_context { struct _aarch64_ctx head; __u16 vl; __u16 flags; __u16 __reserved[2]; }; struct tpidr2_context { struct _aarch64_ctx head; __u64 tpidr2; }; struct za_context { struct _aarch64_ctx head; __u16 vl; __u16 __reserved[3]; }; struct zt_context { struct _aarch64_ctx head; __u16 nregs; __u16 __reserved[3]; }; struct extra_context { struct _aarch64_ctx head; __u64 datap; __u32 size; __u32 __reserved[3]; }; struct frame_record { u64 fp; u64 lr; }; struct user_ctxs { struct fpsimd_context __attribute__((btf_type_tag("user"))) *fpsimd; u32 fpsimd_size; struct sve_context __attribute__((btf_type_tag("user"))) *sve; u32 sve_size; struct tpidr2_context __attribute__((btf_type_tag("user"))) *tpidr2; u32 tpidr2_size; struct za_context __attribute__((btf_type_tag("user"))) *za; u32 za_size; struct zt_context __attribute__((btf_type_tag("user"))) *zt; u32 zt_size; }; struct ksignal { struct k_sigaction ka; kernel_siginfo_t info; int sig; }; struct rt_sigframe_user_layout { struct rt_sigframe __attribute__((btf_type_tag("user"))) *sigframe; struct frame_record __attribute__((btf_type_tag("user"))) *next_frame; unsigned long size; unsigned long limit; unsigned long fpsimd_offset; unsigned long esr_offset; unsigned long sve_offset; unsigned long tpidr2_offset; unsigned long za_offset; unsigned long zt_offset; unsigned long extra_offset; unsigned long end_offset; }; typedef struct siginfo siginfo_t; struct esr_context { struct _aarch64_ctx head; __u64 esr; }; typedef long (*syscall_fn_t)(const struct pt_regs *); enum { PER_LINUX = 0, PER_LINUX_32BIT = 8388608, PER_LINUX_FDPIC = 524288, PER_SVR4 = 68157441, PER_SVR3 = 83886082, PER_SCOSVR3 = 117440515, PER_OSR5 = 100663299, PER_WYSEV386 = 83886084, PER_ISCR4 = 67108869, PER_BSD = 6, PER_SUNOS = 67108870, PER_XENIX = 83886087, PER_LINUX32 = 8, PER_LINUX32_3GB = 134217736, PER_IRIX32 = 67108873, PER_IRIXN32 = 67108874, PER_IRIX64 = 67108875, PER_RISCOS = 12, PER_SOLARIS = 67108877, PER_UW7 = 68157454, PER_OSF4 = 15, PER_HPUX = 16, PER_MASK = 255, }; struct frame_tail { struct frame_tail __attribute__((btf_type_tag("user"))) *fp; unsigned long lr; }; struct compat_frame_tail { compat_uptr_t fp; u32 sp; u32 lr; }; struct unwind_state { unsigned long fp; unsigned long pc; struct llist_node *kr_cur; struct task_struct *task; struct stack_info stack; struct stack_info *stacks; int nr_stacks; }; typedef bool pstate_check_t(unsigned long); struct sys64_hook { unsigned long esr_mask; unsigned long esr_val; void (*handler)(unsigned long, struct pt_regs *); }; enum lockdep_ok { LOCKDEP_STILL_OK = 0, LOCKDEP_NOW_UNRELIABLE = 1, }; enum siginfo_layout { SIL_KILL = 0, SIL_TIMER = 1, SIL_POLL = 2, SIL_FAULT = 3, SIL_FAULT_TRAPNO = 4, SIL_FAULT_MCEERR = 5, SIL_FAULT_BNDERR = 6, SIL_FAULT_PKUERR = 7, SIL_FAULT_PERF_EVENT = 8, SIL_CHLD = 9, SIL_RT = 10, SIL_SYS = 11, }; enum die_val { DIE_UNUSED = 0, DIE_OOPS = 1, }; enum ftr_type { FTR_EXACT = 0, FTR_LOWER_SAFE = 1, FTR_HIGHER_SAFE = 2, FTR_HIGHER_OR_ZERO_SAFE = 3, }; enum bug_trap_type { BUG_TRAP_TYPE_NONE = 0, BUG_TRAP_TYPE_WARN = 1, BUG_TRAP_TYPE_BUG = 2, }; struct arm64_ftr_override; struct arm64_ftr_bits; struct arm64_ftr_reg { const char *name; u64 strict_mask; u64 user_mask; u64 sys_val; u64 user_val; struct arm64_ftr_override *override; const struct arm64_ftr_bits *ftr_bits; }; struct arm64_ftr_override { u64 val; u64 mask; }; struct arm64_ftr_bits { bool sign; bool visible; bool strict; enum ftr_type type; u8 shift; u8 width; s64 safe_val; }; struct vdso_timestamp { u64 sec; u64 nsec; }; struct timens_offset { s64 sec; u64 nsec; }; struct arch_vdso_data {}; struct vdso_data { u32 seq; s32 clock_mode; u64 cycle_last; u64 mask; u32 mult; u32 shift; union { struct vdso_timestamp basetime[12]; struct timens_offset offset[12]; }; s32 tz_minuteswest; s32 tz_dsttime; u32 hrtimer_res; u32 __unused; struct arch_vdso_data arch_data; }; struct vm_special_mapping; struct vdso_abi_info { const char *name; const char *vdso_code_start; const char *vdso_code_end; unsigned long vdso_pages; struct vm_special_mapping *dm; struct vm_special_mapping *cm; }; struct vm_special_mapping { const char *name; struct page **pages; vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *); int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); }; struct timens_offsets { struct timespec64 monotonic; struct timespec64 boottime; }; struct time_namespace { struct user_namespace *user_ns; struct ucounts *ucounts; struct ns_common ns; struct timens_offsets offsets; struct page *vvar_page; bool frozen_offsets; }; enum vdso_abi { VDSO_ABI_AA64 = 0, VDSO_ABI_AA32 = 1, }; enum aarch32_map { AA32_MAP_VECTORS = 0, AA32_MAP_SIGPAGE = 1, AA32_MAP_VVAR = 2, AA32_MAP_VDSO = 3, }; enum vvar_pages { VVAR_DATA_PAGE_OFFSET = 0, VVAR_TIMENS_PAGE_OFFSET = 1, VVAR_NR_PAGES = 2, }; enum vm_fault_reason { VM_FAULT_OOM = 1, VM_FAULT_SIGBUS = 2, VM_FAULT_MAJOR = 4, VM_FAULT_HWPOISON = 16, VM_FAULT_HWPOISON_LARGE = 32, VM_FAULT_SIGSEGV = 64, VM_FAULT_NOPAGE = 256, VM_FAULT_LOCKED = 512, VM_FAULT_RETRY = 1024, VM_FAULT_FALLBACK = 2048, VM_FAULT_DONE_COW = 4096, VM_FAULT_NEEDDSYNC = 8192, VM_FAULT_COMPLETED = 16384, VM_FAULT_HINDEX_MASK = 983040, }; enum aarch64_map { AA64_MAP_VVAR = 0, AA64_MAP_VDSO = 1, }; struct maple_enode; struct maple_alloc; struct ma_state { struct maple_tree *tree; unsigned long index; unsigned long last; struct maple_enode *node; unsigned long min; unsigned long max; struct maple_alloc *alloc; unsigned char depth; unsigned char offset; unsigned char mas_flags; }; struct vma_iterator { struct ma_state mas; }; struct maple_alloc { unsigned long total; unsigned char node_count; unsigned int request_count; struct maple_alloc *slot[30]; }; typedef unsigned int zap_flags_t; struct zap_details { struct folio *single_folio; bool even_cows; zap_flags_t zap_flags; }; struct return_address_data { unsigned int level; void *addr; }; struct cpuinfo_32bit { u32 reg_id_dfr0; u32 reg_id_dfr1; u32 reg_id_isar0; u32 reg_id_isar1; u32 reg_id_isar2; u32 reg_id_isar3; u32 reg_id_isar4; u32 reg_id_isar5; u32 reg_id_isar6; u32 reg_id_mmfr0; u32 reg_id_mmfr1; u32 reg_id_mmfr2; u32 reg_id_mmfr3; u32 reg_id_mmfr4; u32 reg_id_mmfr5; u32 reg_id_pfr0; u32 reg_id_pfr1; u32 reg_id_pfr2; u32 reg_mvfr0; u32 reg_mvfr1; u32 reg_mvfr2; }; struct cpuinfo_arm64 { struct cpu cpu; struct kobject kobj; u64 reg_ctr; u64 reg_cntfrq; u64 reg_dczid; u64 reg_midr; u64 reg_revidr; u64 reg_gmid; u64 reg_smidr; u64 reg_id_aa64dfr0; u64 reg_id_aa64dfr1; u64 reg_id_aa64isar0; u64 reg_id_aa64isar1; u64 reg_id_aa64isar2; u64 reg_id_aa64mmfr0; u64 reg_id_aa64mmfr1; u64 reg_id_aa64mmfr2; u64 reg_id_aa64mmfr3; u64 reg_id_aa64pfr0; u64 reg_id_aa64pfr1; u64 reg_id_aa64zfr0; u64 reg_id_aa64smfr0; struct cpuinfo_32bit aarch32; u64 reg_zcr; u64 reg_smcr; }; struct kobj_attribute { struct attribute attr; ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *); ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t); }; struct __ftr_reg_entry { u32 sys_id; struct arm64_ftr_reg *reg; }; struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); }; enum aarch64_insn_imm_type { AARCH64_INSN_IMM_ADR = 0, AARCH64_INSN_IMM_26 = 1, AARCH64_INSN_IMM_19 = 2, AARCH64_INSN_IMM_16 = 3, AARCH64_INSN_IMM_14 = 4, AARCH64_INSN_IMM_12 = 5, AARCH64_INSN_IMM_9 = 6, AARCH64_INSN_IMM_7 = 7, AARCH64_INSN_IMM_6 = 8, AARCH64_INSN_IMM_S = 9, AARCH64_INSN_IMM_R = 10, AARCH64_INSN_IMM_N = 11, AARCH64_INSN_IMM_MAX = 12, }; enum aarch64_insn_register_type { AARCH64_INSN_REGTYPE_RT = 0, AARCH64_INSN_REGTYPE_RN = 1, AARCH64_INSN_REGTYPE_RT2 = 2, AARCH64_INSN_REGTYPE_RM = 3, AARCH64_INSN_REGTYPE_RD = 4, AARCH64_INSN_REGTYPE_RA = 5, AARCH64_INSN_REGTYPE_RS = 6, }; enum mitigation_state { SPECTRE_UNAFFECTED = 0, SPECTRE_MITIGATED = 1, SPECTRE_VULNERABLE = 2, }; enum kvm_mode { KVM_MODE_DEFAULT = 0, KVM_MODE_PROTECTED = 1, KVM_MODE_NV = 2, KVM_MODE_NONE = 3, }; enum arm64_bp_harden_el1_vectors { EL1_VECTOR_BHB_LOOP = 0, EL1_VECTOR_BHB_FW = 1, EL1_VECTOR_BHB_CLEAR_INSN = 2, EL1_VECTOR_KPTI = 3, }; enum fixed_addresses { FIX_HOLE = 0, FIX_FDT_END = 1, FIX_FDT = 514, FIX_EARLYCON_MEM_BASE = 515, FIX_TEXT_POKE0 = 516, FIX_ENTRY_TRAMP_TEXT4 = 517, FIX_ENTRY_TRAMP_TEXT3 = 518, FIX_ENTRY_TRAMP_TEXT2 = 519, FIX_ENTRY_TRAMP_TEXT1 = 520, __end_of_permanent_fixed_addresses = 521, FIX_BTMAP_END = 521, FIX_BTMAP_BEGIN = 968, FIX_PTE = 969, FIX_PMD = 970, FIX_PUD = 971, FIX_PGD = 972, __end_of_fixed_addresses = 973, }; enum pageflags { PG_locked = 0, PG_writeback = 1, PG_referenced = 2, PG_uptodate = 3, PG_dirty = 4, PG_lru = 5, PG_head = 6, PG_waiters = 7, PG_active = 8, PG_workingset = 9, PG_error = 10, PG_slab = 11, PG_owner_priv_1 = 12, PG_arch_1 = 13, PG_reserved = 14, PG_private = 15, PG_private_2 = 16, PG_mappedtodisk = 17, PG_reclaim = 18, PG_swapbacked = 19, PG_unevictable = 20, PG_mlocked = 21, PG_arch_2 = 22, PG_arch_3 = 23, PG_oem_reserved_1 = 24, PG_oem_reserved_2 = 25, PG_oem_reserved_3 = 26, PG_oem_reserved_4 = 27, __NR_PAGEFLAGS = 28, PG_readahead = 18, PG_anon_exclusive = 17, PG_checked = 12, PG_swapcache = 12, PG_fscache = 16, PG_pinned = 12, PG_savepinned = 4, PG_foreign = 12, PG_xen_remapped = 12, PG_isolated = 18, PG_reported = 3, PG_vmemmap_self_hosted = 12, PG_has_hwpoisoned = 10, PG_large_rmappable = 9, }; enum { CAP_HWCAP = 1, CAP_COMPAT_HWCAP = 2, CAP_COMPAT_HWCAP2 = 3, }; typedef int (*cmp_func_t)(const void *, const void *); typedef void kpti_remap_fn(int, int, phys_addr_t, unsigned long); typedef void ttbr_replace_func(phys_addr_t); typedef int (*cpu_stop_fn_t)(void *); struct alt_instr; struct alt_region { struct alt_instr *begin; struct alt_instr *end; }; struct alt_instr { s32 orig_offset; s32 alt_offset; u16 cpucap; u8 orig_len; u8 alt_len; }; enum aarch64_insn_hint_cr_op { AARCH64_INSN_HINT_NOP = 0, AARCH64_INSN_HINT_YIELD = 32, AARCH64_INSN_HINT_WFE = 64, AARCH64_INSN_HINT_WFI = 96, AARCH64_INSN_HINT_SEV = 128, AARCH64_INSN_HINT_SEVL = 160, AARCH64_INSN_HINT_XPACLRI = 224, AARCH64_INSN_HINT_PACIA_1716 = 256, AARCH64_INSN_HINT_PACIB_1716 = 320, AARCH64_INSN_HINT_AUTIA_1716 = 384, AARCH64_INSN_HINT_AUTIB_1716 = 448, AARCH64_INSN_HINT_PACIAZ = 768, AARCH64_INSN_HINT_PACIASP = 800, AARCH64_INSN_HINT_PACIBZ = 832, AARCH64_INSN_HINT_PACIBSP = 864, AARCH64_INSN_HINT_AUTIAZ = 896, AARCH64_INSN_HINT_AUTIASP = 928, AARCH64_INSN_HINT_AUTIBZ = 960, AARCH64_INSN_HINT_AUTIBSP = 992, AARCH64_INSN_HINT_ESB = 512, AARCH64_INSN_HINT_PSB = 544, AARCH64_INSN_HINT_TSB = 576, AARCH64_INSN_HINT_CSDB = 640, AARCH64_INSN_HINT_CLEARBHB = 704, AARCH64_INSN_HINT_BTI = 1024, AARCH64_INSN_HINT_BTIC = 1088, AARCH64_INSN_HINT_BTIJ = 1152, AARCH64_INSN_HINT_BTIJC = 1216, }; typedef __u64 Elf64_Off; struct elf64_hdr { unsigned char e_ident[16]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; }; struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; }; typedef struct elf64_shdr Elf64_Shdr; typedef struct elf64_hdr Elf64_Ehdr; typedef void (*alternative_cb_t)(struct alt_instr *, __le32 *, __le32 *, int); enum cache_type { CACHE_TYPE_NOCACHE = 0, CACHE_TYPE_INST = 1, CACHE_TYPE_DATA = 2, CACHE_TYPE_SEPARATE = 3, CACHE_TYPE_UNIFIED = 4, }; struct cacheinfo; struct cpu_cacheinfo { struct cacheinfo *info_list; unsigned int num_levels; unsigned int num_leaves; bool cpu_map_populated; bool early_ci_levels; }; struct cacheinfo { unsigned int id; enum cache_type type; unsigned int level; unsigned int coherency_line_size; unsigned int number_of_sets; unsigned int ways_of_associativity; unsigned int physical_line_partition; unsigned int size; cpumask_t shared_cpu_map; unsigned int attributes; void *fw_token; bool disable_sysfs; void *priv; }; struct secondary_data { struct task_struct *task; long status; }; struct kvm_hyp_req { u8 type; union { struct { u8 dest; int nr_pages; int sz_alloc; } mem; struct { unsigned long guest_ipa; size_t size; } map; }; }; struct msi_dev_domain { struct xarray store; struct irq_domain *domain; }; struct platform_msi_priv_data; struct msi_device_data { unsigned long properties; struct platform_msi_priv_data *platform_data; struct mutex mutex; struct msi_dev_domain __domains[2]; unsigned long __iter_idx; }; struct msi_desc; struct irq_common_data { unsigned int state_use_accessors; void *handler_data; struct msi_desc *msi_desc; cpumask_var_t affinity; cpumask_var_t effective_affinity; unsigned int ipi_offset; }; struct irq_chip; struct irq_data { u32 mask; unsigned int irq; unsigned long hwirq; struct irq_common_data *common; struct irq_chip *chip; struct irq_domain *domain; struct irq_data *parent_data; void *chip_data; }; struct irq_desc; typedef void (*irq_flow_handler_t)(struct irq_desc *); struct irqaction; struct irq_affinity_notify; struct irq_desc { struct irq_common_data irq_common_data; struct irq_data irq_data; unsigned int __attribute__((btf_type_tag("percpu"))) *kstat_irqs; irq_flow_handler_t handle_irq; struct irqaction *action; unsigned int status_use_accessors; unsigned int core_internal_state__do_not_mess_with_it; unsigned int depth; unsigned int wake_depth; unsigned int tot_count; unsigned int irq_count; unsigned long last_unhandled; unsigned int irqs_unhandled; atomic_t threads_handled; int threads_handled_last; raw_spinlock_t lock; struct cpumask *percpu_enabled; const struct cpumask *percpu_affinity; const struct cpumask *affinity_hint; struct irq_affinity_notify *affinity_notify; unsigned long threads_oneshot; atomic_t threads_active; wait_queue_head_t wait_for_threads; unsigned int nr_actions; unsigned int no_suspend_depth; unsigned int cond_suspend_depth; unsigned int force_resume_depth; struct proc_dir_entry *dir; struct callback_head rcu; struct kobject kobj; struct mutex request_mutex; int parent_irq; struct module *owner; const char *name; struct hlist_node resend_node; u64 android_vendor_data1; long: 64; long: 64; long: 64; long: 64; }; struct pci_msi_desc { union { u32 msi_mask; u32 msix_ctrl; }; struct { u8 is_msix: 1; u8 multiple: 3; u8 multi_cap: 3; u8 can_mask: 1; u8 is_64: 1; u8 is_virtual: 1; unsigned int default_irq; } msi_attrib; union { u8 mask_pos; void *mask_base; }; }; union msi_domain_cookie { u64 value; void *ptr; void *iobase; }; union msi_instance_cookie { u64 value; void *ptr; }; struct msi_desc_data { union msi_domain_cookie dcookie; union msi_instance_cookie icookie; }; struct arch_msi_msg_addr_lo { u32 address_lo; }; typedef struct arch_msi_msg_addr_lo arch_msi_msg_addr_lo_t; struct arch_msi_msg_addr_hi { u32 address_hi; }; typedef struct arch_msi_msg_addr_hi arch_msi_msg_addr_hi_t; struct arch_msi_msg_data { u32 data; }; typedef struct arch_msi_msg_data arch_msi_msg_data_t; struct msi_msg { union { u32 address_lo; arch_msi_msg_addr_lo_t arch_addr_lo; }; union { u32 address_hi; arch_msi_msg_addr_hi_t arch_addr_hi; }; union { u32 data; arch_msi_msg_data_t arch_data; }; }; struct irq_affinity_desc; struct msi_desc { unsigned int irq; unsigned int nvec_used; struct device *dev; struct msi_msg msg; struct irq_affinity_desc *affinity; const void *iommu_cookie; struct device_attribute *sysfs_attrs; void (*write_msi_msg)(struct msi_desc *, void *); void *write_msi_msg_data; u16 msi_index; union { struct pci_msi_desc pci; struct msi_desc_data data; }; }; struct irq_affinity_desc { struct cpumask mask; unsigned int is_managed: 1; }; enum irqchip_irq_state { IRQCHIP_STATE_PENDING = 0, IRQCHIP_STATE_ACTIVE = 1, IRQCHIP_STATE_MASKED = 2, IRQCHIP_STATE_LINE_LEVEL = 3, }; struct irq_chip { const char *name; unsigned int (*irq_startup)(struct irq_data *); void (*irq_shutdown)(struct irq_data *); void (*irq_enable)(struct irq_data *); void (*irq_disable)(struct irq_data *); void (*irq_ack)(struct irq_data *); void (*irq_mask)(struct irq_data *); void (*irq_mask_ack)(struct irq_data *); void (*irq_unmask)(struct irq_data *); void (*irq_eoi)(struct irq_data *); int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); int (*irq_retrigger)(struct irq_data *); int (*irq_set_type)(struct irq_data *, unsigned int); int (*irq_set_wake)(struct irq_data *, unsigned int); void (*irq_bus_lock)(struct irq_data *); void (*irq_bus_sync_unlock)(struct irq_data *); void (*irq_suspend)(struct irq_data *); void (*irq_resume)(struct irq_data *); void (*irq_pm_shutdown)(struct irq_data *); void (*irq_calc_mask)(struct irq_data *); void (*irq_print_chip)(struct irq_data *, struct seq_file *); int (*irq_request_resources)(struct irq_data *); void (*irq_release_resources)(struct irq_data *); void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); int (*irq_set_vcpu_affinity)(struct irq_data *, void *); void (*ipi_send_single)(struct irq_data *, unsigned int); void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); int (*irq_nmi_setup)(struct irq_data *); void (*irq_nmi_teardown)(struct irq_data *); unsigned long flags; }; enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2, }; typedef enum irqreturn irqreturn_t; typedef irqreturn_t (*irq_handler_t)(int, void *); struct irqaction { irq_handler_t handler; void *dev_id; void __attribute__((btf_type_tag("percpu"))) *percpu_dev_id; struct irqaction *next; irq_handler_t thread_fn; struct task_struct *thread; struct irqaction *secondary; unsigned int irq; unsigned int flags; unsigned long thread_flags; unsigned long thread_mask; const char *name; struct proc_dir_entry *dir; long: 64; long: 64; long: 64; long: 64; }; struct irq_affinity_notify { unsigned int irq; struct kref kref; struct work_struct work; void (*notify)(struct irq_affinity_notify *, const cpumask_t *); void (*release)(struct kref *); }; enum ipi_msg_type { IPI_RESCHEDULE = 0, IPI_CALL_FUNC = 1, IPI_CPU_STOP = 2, IPI_CPU_CRASH_STOP = 3, IPI_TIMER = 4, IPI_IRQ_WORK = 5, IPI_WAKEUP = 6, NR_IPI = 7, }; enum { IRQ_TYPE_NONE = 0, IRQ_TYPE_EDGE_RISING = 1, IRQ_TYPE_EDGE_FALLING = 2, IRQ_TYPE_EDGE_BOTH = 3, IRQ_TYPE_LEVEL_HIGH = 4, IRQ_TYPE_LEVEL_LOW = 8, IRQ_TYPE_LEVEL_MASK = 12, IRQ_TYPE_SENSE_MASK = 15, IRQ_TYPE_DEFAULT = 15, IRQ_TYPE_PROBE = 16, IRQ_LEVEL = 256, IRQ_PER_CPU = 512, IRQ_NOPROBE = 1024, IRQ_NOREQUEST = 2048, IRQ_NOAUTOEN = 4096, IRQ_NO_BALANCING = 8192, IRQ_MOVE_PCNTXT = 16384, IRQ_NESTED_THREAD = 32768, IRQ_NOTHREAD = 65536, IRQ_PER_CPU_DEVID = 131072, IRQ_IS_POLLED = 262144, IRQ_DISABLE_UNLAZY = 524288, IRQ_HIDDEN = 1048576, IRQ_NO_DEBUG = 2097152, }; enum scale_freq_source { SCALE_FREQ_SOURCE_CPUFREQ = 0, SCALE_FREQ_SOURCE_ARCH = 1, SCALE_FREQ_SOURCE_CPPC = 2, SCALE_FREQ_SOURCE_VIRT = 3, }; struct scale_freq_data { enum scale_freq_source source; void (*set_freq_scale)(); }; enum cpufreq_table_sorting { CPUFREQ_TABLE_UNSORTED = 0, CPUFREQ_TABLE_SORTED_ASCENDING = 1, CPUFREQ_TABLE_SORTED_DESCENDING = 2, }; struct cpufreq_cpuinfo { unsigned int max_freq; unsigned int min_freq; unsigned int transition_latency; }; struct clk; struct cpufreq_governor; struct cpufreq_frequency_table; struct cpufreq_stats; struct thermal_cooling_device; struct cpufreq_policy { cpumask_var_t cpus; cpumask_var_t related_cpus; cpumask_var_t real_cpus; unsigned int shared_type; unsigned int cpu; struct clk *clk; struct cpufreq_cpuinfo cpuinfo; unsigned int min; unsigned int max; unsigned int cur; unsigned int suspend_freq; unsigned int policy; unsigned int last_policy; struct cpufreq_governor *governor; void *governor_data; char last_governor[16]; struct work_struct update; struct freq_constraints constraints; struct freq_qos_request *min_freq_req; struct freq_qos_request *max_freq_req; struct cpufreq_frequency_table *freq_table; enum cpufreq_table_sorting freq_table_sorted; struct list_head policy_list; struct kobject kobj; struct completion kobj_unregister; struct rw_semaphore rwsem; bool fast_switch_possible; bool fast_switch_enabled; bool strict_target; bool efficiencies_available; unsigned int transition_delay_us; bool dvfs_possible_from_any_cpu; bool boost_enabled; unsigned int cached_target_freq; unsigned int cached_resolved_idx; bool transition_ongoing; spinlock_t transition_lock; wait_queue_head_t transition_wait; struct task_struct *transition_task; struct cpufreq_stats *stats; void *driver_data; struct thermal_cooling_device *cdev; struct notifier_block nb_min; struct notifier_block nb_max; }; struct cpufreq_governor { char name[16]; int (*init)(struct cpufreq_policy *); void (*exit)(struct cpufreq_policy *); int (*start)(struct cpufreq_policy *); void (*stop)(struct cpufreq_policy *); void (*limits)(struct cpufreq_policy *); ssize_t (*show_setspeed)(struct cpufreq_policy *, char *); int (*store_setspeed)(struct cpufreq_policy *, unsigned int); struct list_head governor_list; struct module *owner; u8 flags; }; struct cpufreq_frequency_table { unsigned int flags; unsigned int driver_data; unsigned int frequency; }; enum spectre_v4_policy { SPECTRE_V4_POLICY_MITIGATION_DYNAMIC = 0, SPECTRE_V4_POLICY_MITIGATION_ENABLED = 1, SPECTRE_V4_POLICY_MITIGATION_DISABLED = 2, }; struct spectre_v4_param { const char *str; enum spectre_v4_policy policy; }; enum bpf_reg_type { NOT_INIT = 0, SCALAR_VALUE = 1, PTR_TO_CTX = 2, CONST_PTR_TO_MAP = 3, PTR_TO_MAP_VALUE = 4, PTR_TO_MAP_KEY = 5, PTR_TO_STACK = 6, PTR_TO_PACKET_META = 7, PTR_TO_PACKET = 8, PTR_TO_PACKET_END = 9, PTR_TO_FLOW_KEYS = 10, PTR_TO_SOCKET = 11, PTR_TO_SOCK_COMMON = 12, PTR_TO_TCP_SOCK = 13, PTR_TO_TP_BUFFER = 14, PTR_TO_XDP_SOCK = 15, PTR_TO_BTF_ID = 16, PTR_TO_MEM = 17, PTR_TO_BUF = 18, PTR_TO_FUNC = 19, CONST_PTR_TO_DYNPTR = 20, __BPF_REG_TYPE_MAX = 21, PTR_TO_MAP_VALUE_OR_NULL = 260, PTR_TO_SOCKET_OR_NULL = 267, PTR_TO_SOCK_COMMON_OR_NULL = 268, PTR_TO_TCP_SOCK_OR_NULL = 269, PTR_TO_BTF_ID_OR_NULL = 272, __BPF_REG_TYPE_LIMIT = 33554431, }; enum bpf_cgroup_iter_order { BPF_CGROUP_ITER_ORDER_UNSPEC = 0, BPF_CGROUP_ITER_SELF_ONLY = 1, BPF_CGROUP_ITER_DESCENDANTS_PRE = 2, BPF_CGROUP_ITER_DESCENDANTS_POST = 3, BPF_CGROUP_ITER_ANCESTORS_UP = 4, }; enum bpf_iter_task_type { BPF_TASK_ITER_ALL = 0, BPF_TASK_ITER_TID = 1, BPF_TASK_ITER_TGID = 2, }; enum bpf_map_type { BPF_MAP_TYPE_UNSPEC = 0, BPF_MAP_TYPE_HASH = 1, BPF_MAP_TYPE_ARRAY = 2, BPF_MAP_TYPE_PROG_ARRAY = 3, BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, BPF_MAP_TYPE_PERCPU_HASH = 5, BPF_MAP_TYPE_PERCPU_ARRAY = 6, BPF_MAP_TYPE_STACK_TRACE = 7, BPF_MAP_TYPE_CGROUP_ARRAY = 8, BPF_MAP_TYPE_LRU_HASH = 9, BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, BPF_MAP_TYPE_LPM_TRIE = 11, BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, BPF_MAP_TYPE_HASH_OF_MAPS = 13, BPF_MAP_TYPE_DEVMAP = 14, BPF_MAP_TYPE_SOCKMAP = 15, BPF_MAP_TYPE_CPUMAP = 16, BPF_MAP_TYPE_XSKMAP = 17, BPF_MAP_TYPE_SOCKHASH = 18, BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19, BPF_MAP_TYPE_CGROUP_STORAGE = 19, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, BPF_MAP_TYPE_QUEUE = 22, BPF_MAP_TYPE_STACK = 23, BPF_MAP_TYPE_SK_STORAGE = 24, BPF_MAP_TYPE_DEVMAP_HASH = 25, BPF_MAP_TYPE_STRUCT_OPS = 26, BPF_MAP_TYPE_RINGBUF = 27, BPF_MAP_TYPE_INODE_STORAGE = 28, BPF_MAP_TYPE_TASK_STORAGE = 29, BPF_MAP_TYPE_BLOOM_FILTER = 30, BPF_MAP_TYPE_USER_RINGBUF = 31, BPF_MAP_TYPE_CGRP_STORAGE = 32, }; enum btf_field_type { BPF_SPIN_LOCK = 1, BPF_TIMER = 2, BPF_KPTR_UNREF = 4, BPF_KPTR_REF = 8, BPF_KPTR = 12, BPF_LIST_HEAD = 16, BPF_LIST_NODE = 32, BPF_RB_ROOT = 64, BPF_RB_NODE = 128, BPF_GRAPH_NODE_OR_ROOT = 240, BPF_REFCOUNT = 256, }; enum arm_smccc_conduit { SMCCC_CONDUIT_NONE = 0, SMCCC_CONDUIT_SMC = 1, SMCCC_CONDUIT_HVC = 2, }; enum bhb_mitigation_bits { BHB_LOOP = 0, BHB_FW = 1, BHB_HW = 2, BHB_INSN = 3, }; enum aarch64_insn_variant { AARCH64_INSN_VARIANT_32BIT = 0, AARCH64_INSN_VARIANT_64BIT = 1, }; enum aarch64_insn_movewide_type { AARCH64_INSN_MOVEWIDE_ZERO = 0, AARCH64_INSN_MOVEWIDE_KEEP = 1, AARCH64_INSN_MOVEWIDE_INVERSE = 2, }; enum aarch64_insn_register { AARCH64_INSN_REG_0 = 0, AARCH64_INSN_REG_1 = 1, AARCH64_INSN_REG_2 = 2, AARCH64_INSN_REG_3 = 3, AARCH64_INSN_REG_4 = 4, AARCH64_INSN_REG_5 = 5, AARCH64_INSN_REG_6 = 6, AARCH64_INSN_REG_7 = 7, AARCH64_INSN_REG_8 = 8, AARCH64_INSN_REG_9 = 9, AARCH64_INSN_REG_10 = 10, AARCH64_INSN_REG_11 = 11, AARCH64_INSN_REG_12 = 12, AARCH64_INSN_REG_13 = 13, AARCH64_INSN_REG_14 = 14, AARCH64_INSN_REG_15 = 15, AARCH64_INSN_REG_16 = 16, AARCH64_INSN_REG_17 = 17, AARCH64_INSN_REG_18 = 18, AARCH64_INSN_REG_19 = 19, AARCH64_INSN_REG_20 = 20, AARCH64_INSN_REG_21 = 21, AARCH64_INSN_REG_22 = 22, AARCH64_INSN_REG_23 = 23, AARCH64_INSN_REG_24 = 24, AARCH64_INSN_REG_25 = 25, AARCH64_INSN_REG_26 = 26, AARCH64_INSN_REG_27 = 27, AARCH64_INSN_REG_28 = 28, AARCH64_INSN_REG_29 = 29, AARCH64_INSN_REG_FP = 29, AARCH64_INSN_REG_30 = 30, AARCH64_INSN_REG_LR = 30, AARCH64_INSN_REG_ZR = 31, AARCH64_INSN_REG_SP = 31, }; enum aarch64_insn_logic_type { AARCH64_INSN_LOGIC_AND = 0, AARCH64_INSN_LOGIC_BIC = 1, AARCH64_INSN_LOGIC_ORR = 2, AARCH64_INSN_LOGIC_ORN = 3, AARCH64_INSN_LOGIC_EOR = 4, AARCH64_INSN_LOGIC_EON = 5, AARCH64_INSN_LOGIC_AND_SETFLAGS = 6, AARCH64_INSN_LOGIC_BIC_SETFLAGS = 7, }; struct bpf_prog_stats { u64_stats_t cnt; u64_stats_t nsecs; u64_stats_t misses; struct u64_stats_sync syncp; long: 64; }; struct bpf_ksym { unsigned long start; unsigned long end; char name[512]; struct list_head lnode; struct latch_tree_node tnode; bool prog; }; struct btf; struct bpf_ctx_arg_aux; struct bpf_trampoline; struct btf_type; struct bpf_jit_poke_descriptor; struct bpf_kfunc_desc_tab; struct bpf_kfunc_btf_tab; struct bpf_prog_ops; struct bpf_map; struct btf_mod_pair; struct bpf_prog_offload; struct bpf_func_info; struct bpf_func_info_aux; struct bpf_line_info; struct bpf_prog_aux { atomic64_t refcnt; u32 used_map_cnt; u32 used_btf_cnt; u32 max_ctx_offset; u32 max_pkt_offset; u32 max_tp_access; u32 stack_depth; u32 id; u32 func_cnt; u32 func_idx; u32 attach_btf_id; u32 ctx_arg_info_size; u32 max_rdonly_access; u32 max_rdwr_access; struct btf *attach_btf; const struct bpf_ctx_arg_aux *ctx_arg_info; struct mutex dst_mutex; struct bpf_prog *dst_prog; struct bpf_trampoline *dst_trampoline; enum bpf_prog_type saved_dst_prog_type; enum bpf_attach_type saved_dst_attach_type; bool verifier_zext; bool dev_bound; bool offload_requested; bool attach_btf_trace; bool func_proto_unreliable; bool sleepable; bool tail_call_reachable; bool xdp_has_frags; const struct btf_type *attach_func_proto; const char *attach_func_name; struct bpf_prog **func; void *jit_data; struct bpf_jit_poke_descriptor *poke_tab; struct bpf_kfunc_desc_tab *kfunc_tab; struct bpf_kfunc_btf_tab *kfunc_btf_tab; u32 size_poke_tab; struct bpf_ksym ksym; const struct bpf_prog_ops *ops; struct bpf_map **used_maps; struct mutex used_maps_mutex; struct btf_mod_pair *used_btfs; struct bpf_prog *prog; struct user_struct *user; u64 load_time; u32 verified_insns; int cgroup_atype; struct bpf_map *cgroup_storage[2]; char name[16]; void *security; struct bpf_prog_offload *offload; struct btf *btf; struct bpf_func_info *func_info; struct bpf_func_info_aux *func_info_aux; struct bpf_line_info *linfo; void **jited_linfo; u32 func_info_cnt; u32 nr_linfo; u32 linfo_idx; struct module *mod; u32 num_exentries; struct exception_table_entry *extable; union { struct work_struct work; struct callback_head rcu; }; u64 android_kabi_reserved1; }; struct bpf_ctx_arg_aux { u32 offset; enum bpf_reg_type reg_type; u32 btf_id; }; struct btf_func_model { u8 ret_size; u8 ret_flags; u8 nr_args; u8 arg_size[12]; u8 arg_flags[12]; }; struct ftrace_ops; struct bpf_tramp_image; struct bpf_trampoline { struct hlist_node hlist; struct ftrace_ops *fops; struct mutex mutex; refcount_t refcnt; u32 flags; u64 key; struct { struct btf_func_model model; void *addr; bool ftrace_managed; } func; struct bpf_prog *extension_prog; struct hlist_head progs_hlist[3]; int progs_cnt[3]; struct bpf_tramp_image *cur_image; struct module *mod; u64 android_kabi_reserved1; }; struct bpf_tramp_image { void *image; struct bpf_ksym ksym; struct percpu_ref pcref; void *ip_after_call; void *ip_epilogue; union { struct callback_head rcu; struct work_struct work; }; }; struct btf_type { __u32 name_off; __u32 info; union { __u32 size; __u32 type; }; }; struct bpf_jit_poke_descriptor { void *tailcall_target; void *tailcall_bypass; void *bypass_addr; void *aux; union { struct { struct bpf_map *map; u32 key; } tail_call; }; bool tailcall_target_stable; u8 adj_off; u16 reason; u32 insn_idx; }; struct bpf_map_ops; struct btf_record; struct bpf_map { const struct bpf_map_ops *ops; struct bpf_map *inner_map_meta; void *security; enum bpf_map_type map_type; u32 key_size; u32 value_size; u32 max_entries; u64 map_extra; u32 map_flags; u32 id; struct btf_record *record; int numa_node; u32 btf_key_type_id; u32 btf_value_type_id; u32 btf_vmlinux_value_type_id; struct btf *btf; struct obj_cgroup *objcg; char name[16]; long: 64; long: 64; atomic64_t refcnt; atomic64_t usercnt; union { struct work_struct work; struct callback_head rcu; }; struct mutex freeze_mutex; atomic64_t writecnt; struct { spinlock_t lock; enum bpf_prog_type type; bool jited; bool xdp_has_frags; } owner; bool bypass_spec_v1; bool frozen; bool free_after_mult_rcu_gp; s64 __attribute__((btf_type_tag("percpu"))) *elem_count; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); union bpf_attr; struct bpf_verifier_env; struct bpf_func_state; struct bpf_iter_seq_info; struct bpf_map_ops { int (*map_alloc_check)(union bpf_attr *); struct bpf_map * (*map_alloc)(union bpf_attr *); void (*map_release)(struct bpf_map *, struct file *); void (*map_free)(struct bpf_map *); int (*map_get_next_key)(struct bpf_map *, void *, void *); void (*map_release_uref)(struct bpf_map *); void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *); int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr __attribute__((btf_type_tag("user"))) *); int (*map_lookup_and_delete_elem)(struct bpf_map *, void *, void *, u64); int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr __attribute__((btf_type_tag("user"))) *); int (*map_update_batch)(struct bpf_map *, struct file *, const union bpf_attr *, union bpf_attr __attribute__((btf_type_tag("user"))) *); int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr __attribute__((btf_type_tag("user"))) *); void * (*map_lookup_elem)(struct bpf_map *, void *); long (*map_update_elem)(struct bpf_map *, void *, void *, u64); long (*map_delete_elem)(struct bpf_map *, void *); long (*map_push_elem)(struct bpf_map *, void *, u64); long (*map_pop_elem)(struct bpf_map *, void *); long (*map_peek_elem)(struct bpf_map *, void *); void * (*map_lookup_percpu_elem)(struct bpf_map *, void *, u32); void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int); void (*map_fd_put_ptr)(struct bpf_map *, void *, bool); int (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); u32 (*map_fd_sys_lookup_elem)(void *); void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); int (*map_check_btf)(const struct bpf_map *, const struct btf *, const struct btf_type *, const struct btf_type *); int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *); void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *); void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *); int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32); int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *); int (*map_mmap)(struct bpf_map *, struct vm_area_struct *); __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *); int (*map_local_storage_charge)(struct bpf_local_storage_map *, void *, u32); void (*map_local_storage_uncharge)(struct bpf_local_storage_map *, void *, u32); struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) ** (*map_owner_storage_ptr)(void *); long (*map_redirect)(struct bpf_map *, u64, u64); bool (*map_meta_equal)(const struct bpf_map *, const struct bpf_map *); int (*map_set_for_each_callback_args)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *); long (*map_for_each_callback)(struct bpf_map *, bpf_callback_t, void *, u64); u64 (*map_mem_usage)(const struct bpf_map *); int *map_btf_id; const struct bpf_iter_seq_info *iter_seq_info; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; union bpf_attr { struct { __u32 map_type; __u32 key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; __u32 inner_map_fd; __u32 numa_node; char map_name[16]; __u32 map_ifindex; __u32 btf_fd; __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 btf_vmlinux_value_type_id; __u64 map_extra; }; struct { __u32 map_fd; __u64 key; union { __u64 value; __u64 next_key; }; __u64 flags; }; struct { __u64 in_batch; __u64 out_batch; __u64 keys; __u64 values; __u32 count; __u32 map_fd; __u64 elem_flags; __u64 flags; } batch; struct { __u32 prog_type; __u32 insn_cnt; __u64 insns; __u64 license; __u32 log_level; __u32 log_size; __u64 log_buf; __u32 kern_version; __u32 prog_flags; char prog_name[16]; __u32 prog_ifindex; __u32 expected_attach_type; __u32 prog_btf_fd; __u32 func_info_rec_size; __u64 func_info; __u32 func_info_cnt; __u32 line_info_rec_size; __u64 line_info; __u32 line_info_cnt; __u32 attach_btf_id; union { __u32 attach_prog_fd; __u32 attach_btf_obj_fd; }; __u32 core_relo_cnt; __u64 fd_array; __u64 core_relos; __u32 core_relo_rec_size; __u32 log_true_size; }; struct { __u64 pathname; __u32 bpf_fd; __u32 file_flags; __s32 path_fd; }; struct { union { __u32 target_fd; __u32 target_ifindex; }; __u32 attach_bpf_fd; __u32 attach_type; __u32 attach_flags; __u32 replace_bpf_fd; union { __u32 relative_fd; __u32 relative_id; }; __u64 expected_revision; }; struct { __u32 prog_fd; __u32 retval; __u32 data_size_in; __u32 data_size_out; __u64 data_in; __u64 data_out; __u32 repeat; __u32 duration; __u32 ctx_size_in; __u32 ctx_size_out; __u64 ctx_in; __u64 ctx_out; __u32 flags; __u32 cpu; __u32 batch_size; } test; struct { union { __u32 start_id; __u32 prog_id; __u32 map_id; __u32 btf_id; __u32 link_id; }; __u32 next_id; __u32 open_flags; }; struct { __u32 bpf_fd; __u32 info_len; __u64 info; } info; struct { union { __u32 target_fd; __u32 target_ifindex; }; __u32 attach_type; __u32 query_flags; __u32 attach_flags; __u64 prog_ids; union { __u32 prog_cnt; __u32 count; }; __u64 prog_attach_flags; __u64 link_ids; __u64 link_attach_flags; __u64 revision; } query; struct { __u64 name; __u32 prog_fd; } raw_tracepoint; struct { __u64 btf; __u64 btf_log_buf; __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; __u32 btf_log_true_size; }; struct { __u32 pid; __u32 fd; __u32 flags; __u32 buf_len; __u64 buf; __u32 prog_id; __u32 fd_type; __u64 probe_offset; __u64 probe_addr; } task_fd_query; struct { union { __u32 prog_fd; __u32 map_fd; }; union { __u32 target_fd; __u32 target_ifindex; }; __u32 attach_type; __u32 flags; union { __u32 target_btf_id; struct { __u64 iter_info; __u32 iter_info_len; }; struct { __u64 bpf_cookie; } perf_event; struct { __u32 flags; __u32 cnt; __u64 syms; __u64 addrs; __u64 cookies; } kprobe_multi; struct { __u32 target_btf_id; __u64 cookie; } tracing; struct { __u32 pf; __u32 hooknum; __s32 priority; __u32 flags; } netfilter; struct { union { __u32 relative_fd; __u32 relative_id; }; __u64 expected_revision; } tcx; struct { __u64 path; __u64 offsets; __u64 ref_ctr_offsets; __u64 cookies; __u32 cnt; __u32 flags; __u32 pid; } uprobe_multi; }; } link_create; struct { __u32 link_fd; union { __u32 new_prog_fd; __u32 new_map_fd; }; __u32 flags; union { __u32 old_prog_fd; __u32 old_map_fd; }; } link_update; struct { __u32 link_fd; } link_detach; struct { __u32 type; } enable_stats; struct { __u32 link_fd; __u32 flags; } iter_create; struct { __u32 prog_fd; __u32 map_fd; __u32 flags; } prog_bind_map; }; struct btf_header { __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; __u32 type_off; __u32 type_len; __u32 str_off; __u32 str_len; }; struct btf_kfunc_set_tab; struct btf_id_dtor_kfunc_tab; struct btf_struct_metas; struct btf { void *data; struct btf_type **types; u32 *resolved_ids; u32 *resolved_sizes; const char *strings; void *nohdr_data; struct btf_header hdr; u32 nr_types; u32 types_size; u32 data_size; refcount_t refcnt; u32 id; struct callback_head rcu; struct btf_kfunc_set_tab *kfunc_set_tab; struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; struct btf_struct_metas *struct_meta_tab; struct btf *base_btf; u32 start_id; u32 start_str_off; char name[56]; bool kernel_btf; }; struct bpf_iter_aux_info; typedef int (*bpf_iter_init_seq_priv_t)(void *, struct bpf_iter_aux_info *); typedef void (*bpf_iter_fini_seq_priv_t)(void *); struct bpf_iter_seq_info { const struct seq_operations *seq_ops; bpf_iter_init_seq_priv_t init_seq_private; bpf_iter_fini_seq_priv_t fini_seq_private; u32 seq_priv_size; }; struct bpf_iter_aux_info { struct bpf_map *map; struct { struct cgroup *start; enum bpf_cgroup_iter_order order; } cgroup; struct { enum bpf_iter_task_type type; u32 pid; } task; }; typedef void (*btf_dtor_kfunc_t)(void *); struct btf_field_kptr { struct btf *btf; struct module *module; btf_dtor_kfunc_t dtor; u32 btf_id; }; struct btf_field_graph_root { struct btf *btf; u32 value_btf_id; u32 node_offset; struct btf_record *value_rec; }; struct btf_field { u32 offset; u32 size; enum btf_field_type type; union { struct btf_field_kptr kptr; struct btf_field_graph_root graph_root; }; }; struct btf_record { u32 cnt; u32 field_mask; int spin_lock_off; int timer_off; int refcount_off; struct btf_field fields[0]; }; struct bpf_prog_ops { int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr __attribute__((btf_type_tag("user"))) *); }; struct btf_mod_pair { struct btf *btf; struct module *module; }; struct bpf_offload_dev; struct bpf_prog_offload { struct bpf_prog *prog; struct net_device *netdev; struct bpf_offload_dev *offdev; void *dev_priv; struct list_head offloads; bool dev_state; bool opt_failed; void *jited_image; u32 jited_len; u64 android_kabi_reserved1; }; struct bpf_func_info { __u32 insn_off; __u32 type_id; }; struct bpf_func_info_aux { u16 linkage; bool unreliable; }; struct bpf_line_info { __u32 insn_off; __u32 file_name_off; __u32 line_off; __u32 line_col; }; struct bpf_run_ctx {}; struct arm_smccc_res { unsigned long a0; unsigned long a1; unsigned long a2; unsigned long a3; }; struct ftr_set_desc { char name[20]; struct arm64_ftr_override *override; struct { char name[10]; u8 shift; u8 width; bool (*filter)(u64); } fields[0]; }; struct arm_cpuidle_irq_context {}; struct aarch64_insn_patch { void **text_addrs; u32 *new_insns; int insn_cnt; atomic_t cpu_count; }; typedef u32 compat_size_t; struct compat_statfs64 { __u32 f_type; __u32 f_bsize; __u64 f_blocks; __u64 f_bfree; __u64 f_bavail; __u64 f_files; __u64 f_ffree; __kernel_fsid_t f_fsid; __u32 f_namelen; __u32 f_frsize; __u32 f_flags; __u32 f_spare[4]; } __attribute__((packed)); struct compat_sigaltstack { compat_uptr_t ss_sp; int ss_flags; compat_size_t ss_size; }; typedef struct compat_sigaltstack compat_stack_t; struct compat_sigcontext { compat_ulong_t trap_no; compat_ulong_t error_code; compat_ulong_t oldmask; compat_ulong_t arm_r0; compat_ulong_t arm_r1; compat_ulong_t arm_r2; compat_ulong_t arm_r3; compat_ulong_t arm_r4; compat_ulong_t arm_r5; compat_ulong_t arm_r6; compat_ulong_t arm_r7; compat_ulong_t arm_r8; compat_ulong_t arm_r9; compat_ulong_t arm_r10; compat_ulong_t arm_fp; compat_ulong_t arm_ip; compat_ulong_t arm_sp; compat_ulong_t arm_lr; compat_ulong_t arm_pc; compat_ulong_t arm_cpsr; compat_ulong_t fault_address; }; typedef u32 compat_sigset_word; typedef struct { compat_sigset_word sig[2]; } compat_sigset_t; struct compat_ucontext { compat_ulong_t uc_flags; compat_uptr_t uc_link; compat_stack_t uc_stack; struct compat_sigcontext uc_mcontext; compat_sigset_t uc_sigmask; int __unused[30]; compat_ulong_t uc_regspace[128]; }; struct compat_sigframe { struct compat_ucontext uc; compat_ulong_t retcode[2]; }; typedef s32 compat_pid_t; typedef u32 __compat_uid32_t; typedef s32 compat_timer_t; typedef s32 compat_int_t; union compat_sigval { compat_int_t sival_int; compat_uptr_t sival_ptr; }; typedef union compat_sigval compat_sigval_t; typedef s32 compat_clock_t; struct compat_siginfo { int si_signo; int si_errno; int si_code; union { int _pad[29]; struct { compat_pid_t _pid; __compat_uid32_t _uid; } _kill; struct { compat_timer_t _tid; int _overrun; compat_sigval_t _sigval; } _timer; struct { compat_pid_t _pid; __compat_uid32_t _uid; compat_sigval_t _sigval; } _rt; struct { compat_pid_t _pid; __compat_uid32_t _uid; int _status; compat_clock_t _utime; compat_clock_t _stime; } _sigchld; struct { compat_uptr_t _addr; union { int _trapno; short _addr_lsb; struct { char _dummy_bnd[4]; compat_uptr_t _lower; compat_uptr_t _upper; } _addr_bnd; struct { char _dummy_pkey[4]; u32 _pkey; } _addr_pkey; struct { compat_ulong_t _data; u32 _type; u32 _flags; } _perf; }; } _sigfault; struct { compat_long_t _band; int _fd; } _sigpoll; struct { compat_uptr_t _call_addr; int _syscall; unsigned int _arch; } _sigsys; } _sifields; }; struct compat_rt_sigframe { struct compat_siginfo info; struct compat_sigframe sig; }; typedef u64 compat_u64; struct compat_user_vfp { compat_u64 fpregs[32]; compat_ulong_t fpscr; }; struct compat_user_vfp_exc { compat_ulong_t fpexc; compat_ulong_t fpinst; compat_ulong_t fpinst2; }; struct compat_vfp_sigframe { compat_ulong_t magic; compat_ulong_t size; struct compat_user_vfp ufp; struct compat_user_vfp_exc ufp_exc; }; struct compat_aux_sigframe { struct compat_vfp_sigframe vfp; unsigned long end_magic; }; union __fpsimd_vreg { __uint128_t raw; struct { u64 lo; u64 hi; }; }; enum aarch64_reloc_op { RELOC_OP_NONE = 0, RELOC_OP_ABS = 1, RELOC_OP_PREL = 2, RELOC_OP_PAGE = 3, }; enum aarch64_insn_movw_imm_type { AARCH64_INSN_IMM_MOVNZ = 0, AARCH64_INSN_IMM_MOVKZ = 1, }; enum aarch64_insn_branch_type { AARCH64_INSN_BRANCH_NOLINK = 0, AARCH64_INSN_BRANCH_LINK = 1, AARCH64_INSN_BRANCH_RETURN = 2, AARCH64_INSN_BRANCH_COMP_ZERO = 3, AARCH64_INSN_BRANCH_COMP_NONZERO = 4, }; struct ht_iterator; struct hyp_event { char name[32]; bool *enabled; char *print_fmt; struct trace_event_fields *fields; void (*trace_func)(struct ht_iterator *); int id; }; struct hyp_trace_buffer; struct hyp_entry_hdr; struct ht_iterator { struct hyp_trace_buffer *hyp_buffer; int cpu; struct hyp_entry_hdr *ent; unsigned long lost_events; int ent_cpu; size_t ent_size; u64 ts; void *spare; size_t copy_leftover; struct trace_seq seq; struct delayed_work poll_work; }; struct hyp_entry_hdr { unsigned short id; }; typedef __s64 Elf64_Sxword; struct elf64_rela { Elf64_Addr r_offset; Elf64_Xword r_info; Elf64_Sxword r_addend; }; typedef struct elf64_rela Elf64_Rela; enum aarch64_insn_adr_type { AARCH64_INSN_ADR_TYPE_ADRP = 0, AARCH64_INSN_ADR_TYPE_ADR = 1, }; enum aarch64_insn_adsb_type { AARCH64_INSN_ADSB_ADD = 0, AARCH64_INSN_ADSB_SUB = 1, AARCH64_INSN_ADSB_ADD_SETFLAGS = 2, AARCH64_INSN_ADSB_SUB_SETFLAGS = 3, }; enum mod_mem_type { MOD_TEXT = 0, MOD_DATA = 1, MOD_RODATA = 2, MOD_RO_AFTER_INIT = 3, MOD_INIT_TEXT = 4, MOD_INIT_DATA = 5, MOD_INIT_RODATA = 6, MOD_MEM_NUM_TYPES = 7, MOD_INVALID = -1, }; typedef void (*swap_func_t)(void *, void *, int); enum perf_event_arm_regs { PERF_REG_ARM64_X0 = 0, PERF_REG_ARM64_X1 = 1, PERF_REG_ARM64_X2 = 2, PERF_REG_ARM64_X3 = 3, PERF_REG_ARM64_X4 = 4, PERF_REG_ARM64_X5 = 5, PERF_REG_ARM64_X6 = 6, PERF_REG_ARM64_X7 = 7, PERF_REG_ARM64_X8 = 8, PERF_REG_ARM64_X9 = 9, PERF_REG_ARM64_X10 = 10, PERF_REG_ARM64_X11 = 11, PERF_REG_ARM64_X12 = 12, PERF_REG_ARM64_X13 = 13, PERF_REG_ARM64_X14 = 14, PERF_REG_ARM64_X15 = 15, PERF_REG_ARM64_X16 = 16, PERF_REG_ARM64_X17 = 17, PERF_REG_ARM64_X18 = 18, PERF_REG_ARM64_X19 = 19, PERF_REG_ARM64_X20 = 20, PERF_REG_ARM64_X21 = 21, PERF_REG_ARM64_X22 = 22, PERF_REG_ARM64_X23 = 23, PERF_REG_ARM64_X24 = 24, PERF_REG_ARM64_X25 = 25, PERF_REG_ARM64_X26 = 26, PERF_REG_ARM64_X27 = 27, PERF_REG_ARM64_X28 = 28, PERF_REG_ARM64_X29 = 29, PERF_REG_ARM64_LR = 30, PERF_REG_ARM64_SP = 31, PERF_REG_ARM64_PC = 32, PERF_REG_ARM64_MAX = 33, PERF_REG_ARM64_VG = 46, PERF_REG_ARM64_EXTENDED_MAX = 47, }; enum perf_sample_regs_abi { PERF_SAMPLE_REGS_ABI_NONE = 0, PERF_SAMPLE_REGS_ABI_32 = 1, PERF_SAMPLE_REGS_ABI_64 = 2, }; struct perf_guest_info_callbacks { unsigned int (*state)(); unsigned long (*get_ip)(); unsigned int (*handle_intel_pt_intr)(); }; struct perf_callchain_entry_ctx { struct perf_callchain_entry *entry; u32 max_stack; u32 nr; short contexts; bool contexts_maxed; }; enum hw_breakpoint_ops { HW_BREAKPOINT_INSTALL = 0, HW_BREAKPOINT_UNINSTALL = 1, HW_BREAKPOINT_RESTORE = 2, }; struct cpu_suspend_ctx { u64 ctx_regs[13]; u64 sp; }; struct sleep_stack_data { struct cpu_suspend_ctx system_regs; unsigned long callee_saved_regs[12]; }; enum jump_label_type { JUMP_LABEL_NOP = 0, JUMP_LABEL_JMP = 1, }; struct screen_info { __u8 orig_x; __u8 orig_y; __u16 ext_mem_k; __u16 orig_video_page; __u8 orig_video_mode; __u8 orig_video_cols; __u8 flags; __u8 unused2; __u16 orig_video_ega_bx; __u16 unused3; __u8 orig_video_lines; __u8 orig_video_isVGA; __u16 orig_video_points; __u16 lfb_width; __u16 lfb_height; __u16 lfb_depth; __u32 lfb_base; __u32 lfb_size; __u16 cl_magic; __u16 cl_offset; __u16 lfb_linelength; __u8 red_size; __u8 red_pos; __u8 green_size; __u8 green_pos; __u8 blue_size; __u8 blue_pos; __u8 rsvd_size; __u8 rsvd_pos; __u16 vesapm_seg; __u16 vesapm_off; __u16 pages; __u16 vesa_attributes; __u32 capabilities; __u32 ext_lfb_base; __u8 _reserved[2]; } __attribute__((packed)); typedef int (*pte_fn_t)(pte_t *, unsigned long, void *); typedef unsigned long efi_status_t; typedef struct { u32 type; u32 pad; u64 phys_addr; u64 virt_addr; u64 num_pages; u64 attribute; } efi_memory_desc_t; struct set_perm_data { const efi_memory_desc_t *md; bool has_bti; }; typedef unsigned short pci_bus_flags_t; struct pci_dev; struct pci_ops; struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; int domain_nr; char name[48]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned int is_added: 1; unsigned int unsafe_warn: 1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; typedef int pci_power_t; typedef unsigned int pci_channel_state_t; typedef unsigned short pci_dev_flags_t; struct pci_vpd { struct mutex lock; unsigned int len; u8 cap; }; struct pci_slot; struct aer_stats; struct rcec_ea; struct pci_driver; struct pcie_link_state; struct pci_sriov; struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u16 aer_cap; struct aer_stats *aer_stats; struct rcec_ea *rcec_ea; struct pci_dev *rcec; u32 devcap; u8 pcie_cap; u8 msi_cap; u8 msix_cap; u8 pcie_mpss: 3; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; unsigned long *dma_alias_mask; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned int imm_ready: 1; unsigned int pme_support: 5; unsigned int pme_poll: 1; unsigned int d1_support: 1; unsigned int d2_support: 1; unsigned int no_d1d2: 1; unsigned int no_d3cold: 1; unsigned int bridge_d3: 1; unsigned int d3cold_allowed: 1; unsigned int mmio_always_on: 1; unsigned int wakeup_prepared: 1; unsigned int skip_bus_pm: 1; unsigned int ignore_hotplug: 1; unsigned int hotplug_user_indicators: 1; unsigned int clear_retrain_link: 1; unsigned int d3hot_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; u16 l1ss; unsigned int ltr_path: 1; unsigned int pasid_no_tlp: 1; unsigned int eetlp_prefix_path: 1; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct resource resource[17]; struct resource driver_exclusive_resource; bool match_driver; unsigned int transparent: 1; unsigned int io_window: 1; unsigned int pref_window: 1; unsigned int pref_64_window: 1; unsigned int multifunction: 1; unsigned int is_busmaster: 1; unsigned int no_msi: 1; unsigned int no_64bit_msi: 1; unsigned int block_cfg_access: 1; unsigned int broken_parity_status: 1; unsigned int irq_reroute_variant: 2; unsigned int msi_enabled: 1; unsigned int msix_enabled: 1; unsigned int ari_enabled: 1; unsigned int ats_enabled: 1; unsigned int pasid_enabled: 1; unsigned int pri_enabled: 1; unsigned int is_managed: 1; unsigned int is_msi_managed: 1; unsigned int needs_freset: 1; unsigned int state_saved: 1; unsigned int is_physfn: 1; unsigned int is_virtfn: 1; unsigned int is_hotplug_bridge: 1; unsigned int shpc_managed: 1; unsigned int is_thunderbolt: 1; unsigned int untrusted: 1; unsigned int external_facing: 1; unsigned int broken_intx_masking: 1; unsigned int io_window_1k: 1; unsigned int irq_managed: 1; unsigned int non_compliant_bars: 1; unsigned int is_probed: 1; unsigned int link_active_reporting: 1; unsigned int no_vf_scan: 1; unsigned int no_command_memory: 1; unsigned int rom_bar_overlap: 1; unsigned int rom_attr_enabled: 1; pci_dev_flags_t dev_flags; atomic_t enable_cnt; spinlock_t pcie_cap_lock; u32 saved_config_space[16]; struct hlist_head saved_cap_space; struct bin_attribute *res_attr[17]; struct bin_attribute *res_attr_wc[17]; void *msix_base; raw_spinlock_t msi_lock; struct pci_vpd vpd; union { struct pci_sriov *sriov; struct pci_dev *physfn; }; u16 ats_cap; u8 ats_stu; u16 acs_cap; phys_addr_t rom; size_t romlen; const char *driver_override; unsigned long priv_flags; u8 reset_methods[7]; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct hotplug_slot; struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; }; struct pci_dynids { spinlock_t lock; struct list_head list; }; struct pci_device_id; struct pci_error_handlers; struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); int (*sriov_set_msix_vec_count)(struct pci_dev *, int); u32 (*sriov_get_vf_total_msix)(struct pci_dev *); const struct pci_error_handlers *err_handler; const struct attribute_group **groups; const struct attribute_group **dev_groups; struct device_driver driver; struct pci_dynids dynids; bool driver_managed_dma; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; __u32 override_only; }; typedef unsigned int pci_ers_result_t; struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, pci_channel_state_t); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_prepare)(struct pci_dev *); void (*reset_done)(struct pci_dev *); void (*resume)(struct pci_dev *); void (*cor_error_detected)(struct pci_dev *); u64 android_kabi_reserved1; }; struct pci_ops { int (*add_bus)(struct pci_bus *); void (*remove_bus)(struct pci_bus *); void * (*map_bus)(struct pci_bus *, unsigned int, int); int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32); u64 android_kabi_reserved1; }; typedef void (*btf_trace_instruction_emulation)(void *, const char *, u64); enum legacy_insn_status { INSN_DEPRECATED = 0, INSN_OBSOLETE = 1, INSN_UNAVAILABLE = 2, }; struct insn_emulation { const char *name; enum legacy_insn_status status; bool (*try_emulate)(struct pt_regs *, u32); int (*set_hw_mode)(bool); int current_mode; int min; int max; struct ctl_table sysctl[2]; }; enum insn_emulation_mode { INSN_UNDEF = 0, INSN_EMULATE = 1, INSN_HW = 2, }; enum perf_sw_ids { PERF_COUNT_SW_CPU_CLOCK = 0, PERF_COUNT_SW_TASK_CLOCK = 1, PERF_COUNT_SW_PAGE_FAULTS = 2, PERF_COUNT_SW_CONTEXT_SWITCHES = 3, PERF_COUNT_SW_CPU_MIGRATIONS = 4, PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_DUMMY = 9, PERF_COUNT_SW_BPF_OUTPUT = 10, PERF_COUNT_SW_CGROUP_SWITCHES = 11, PERF_COUNT_SW_MAX = 12, }; struct trace_event_raw_instruction_emulation { struct trace_entry ent; u32 __data_loc_instr; u64 addr; char __data[0]; }; struct trace_event_data_offsets_instruction_emulation { u32 instr; }; typedef void (*smp_call_func_t)(void *); typedef bool (*smp_cond_func_t)(int, void *); struct pvclock_vcpu_stolen_time; struct pv_time_stolen_time_region { struct pvclock_vcpu_stolen_time __attribute__((btf_type_tag("rcu"))) *kaddr; }; struct pvclock_vcpu_stolen_time { __le32 revision; __le32 attributes; __le64 stolen_time; u8 padding[48]; }; enum { MEMREMAP_WB = 1, MEMREMAP_WT = 2, MEMREMAP_WC = 4, MEMREMAP_ENC = 8, MEMREMAP_DEC = 16, }; typedef __be64 fdt64_t; enum { ASSUME_PERFECT = 255, ASSUME_VALID_DTB = 1, ASSUME_VALID_INPUT = 2, ASSUME_LATEST = 4, ASSUME_NO_ROLLBACK = 8, ASSUME_LIBFDT_ORDER = 16, ASSUME_LIBFDT_FLAWLESS = 32, }; typedef __be32 fdt32_t; struct fdt_header { fdt32_t magic; fdt32_t totalsize; fdt32_t off_dt_struct; fdt32_t off_dt_strings; fdt32_t off_mem_rsvmap; fdt32_t version; fdt32_t last_comp_version; fdt32_t boot_cpuid_phys; fdt32_t size_dt_strings; fdt32_t size_dt_struct; }; typedef u8 uint8_t; struct fdt_reserve_entry { fdt64_t address; fdt64_t size; }; typedef u64 uint64_t; struct fdt_property { fdt32_t tag; fdt32_t len; fdt32_t nameoff; char data[0]; }; struct fdt_node_header { fdt32_t tag; char name[0]; }; struct core_vma_metadata { unsigned long start; unsigned long end; unsigned long flags; unsigned long dump_size; unsigned long pgoff; struct file *file; }; struct coredump_params { const kernel_siginfo_t *siginfo; struct file *file; unsigned long limit; unsigned long mm_flags; int cpu; loff_t written; loff_t pos; loff_t to_skip; int vma_count; size_t vma_data_size; struct core_vma_metadata *vma_meta; }; struct elf64_phdr { Elf64_Word p_type; Elf64_Word p_flags; Elf64_Off p_offset; Elf64_Addr p_vaddr; Elf64_Addr p_paddr; Elf64_Xword p_filesz; Elf64_Xword p_memsz; Elf64_Xword p_align; }; enum { FOLL_WRITE = 1, FOLL_GET = 2, FOLL_DUMP = 4, FOLL_FORCE = 8, FOLL_NOWAIT = 16, FOLL_NOFAULT = 32, FOLL_HWPOISON = 64, FOLL_ANON = 128, FOLL_LONGTERM = 256, FOLL_SPLIT_PMD = 512, FOLL_PCI_P2PDMA = 1024, FOLL_INTERRUPTIBLE = 2048, FOLL_HONOR_NUMA_FAULT = 4096, }; enum { PACIASP = 3573752639, AUTIASP = 3573752767, SCS_PUSH = 4160783966, SCS_POP = 4167011934, }; struct eh_frame { u32 size; u32 cie_id_or_pointer; union { struct { u8 version; u8 augmentation_string[0]; }; struct { s32 initial_loc; s32 range; u8 opcodes[0]; }; }; }; typedef u32 kprobe_opcode_t; struct kprobe; typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *); typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, unsigned long); typedef u32 probe_opcode_t; typedef void probes_handler_t(u32, long, struct pt_regs *); struct arch_probe_insn { probe_opcode_t *insn; pstate_check_t *pstate_cc; probes_handler_t *handler; unsigned long restore; }; struct arch_specific_insn { struct arch_probe_insn api; }; struct kprobe { struct hlist_node hlist; struct list_head list; unsigned long nmissed; kprobe_opcode_t *addr; const char *symbol_name; unsigned int offset; kprobe_pre_handler_t pre_handler; kprobe_post_handler_t post_handler; kprobe_opcode_t opcode; struct arch_specific_insn ainsn; u32 flags; }; struct prev_kprobe { struct kprobe *kp; unsigned int status; }; struct kprobe_ctlblk { unsigned int kprobe_status; unsigned long saved_irqflag; struct prev_kprobe prev_kprobe; }; enum probe_insn { INSN_REJECTED = 0, INSN_GOOD_NO_SLOT = 1, INSN_GOOD = 2, }; struct kprobe_insn_cache { struct mutex mutex; void * (*alloc)(); void (*free)(void *); const char *sym; struct list_head pages; size_t insn_size; int nr_garbage; }; struct freelist_node { atomic_t refs; struct freelist_node *next; }; struct kretprobe_holder; struct kretprobe_instance { union { struct freelist_node freelist; struct callback_head rcu; }; struct llist_node llist; struct kretprobe_holder *rph; kprobe_opcode_t *ret_addr; void *fp; char data[0]; }; struct kretprobe; struct kretprobe_holder { struct kretprobe __attribute__((btf_type_tag("rcu"))) *rp; refcount_t ref; }; typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *); struct freelist_head { struct freelist_node *head; }; struct kretprobe { struct kprobe kp; kretprobe_handler_t handler; kretprobe_handler_t entry_handler; int maxactive; int nmissed; size_t data_size; struct freelist_head freelist; struct kretprobe_holder *rph; }; enum aarch64_insn_special_register { AARCH64_INSN_SPCLREG_SPSR_EL1 = 49664, AARCH64_INSN_SPCLREG_ELR_EL1 = 49665, AARCH64_INSN_SPCLREG_SP_EL0 = 49672, AARCH64_INSN_SPCLREG_SPSEL = 49680, AARCH64_INSN_SPCLREG_CURRENTEL = 49682, AARCH64_INSN_SPCLREG_DAIF = 55825, AARCH64_INSN_SPCLREG_NZCV = 55824, AARCH64_INSN_SPCLREG_FPCR = 55840, AARCH64_INSN_SPCLREG_DSPSR_EL0 = 55848, AARCH64_INSN_SPCLREG_DLR_EL0 = 55849, AARCH64_INSN_SPCLREG_SPSR_EL2 = 57856, AARCH64_INSN_SPCLREG_ELR_EL2 = 57857, AARCH64_INSN_SPCLREG_SP_EL1 = 57864, AARCH64_INSN_SPCLREG_SPSR_INQ = 57880, AARCH64_INSN_SPCLREG_SPSR_ABT = 57881, AARCH64_INSN_SPCLREG_SPSR_UND = 57882, AARCH64_INSN_SPCLREG_SPSR_FIQ = 57883, AARCH64_INSN_SPCLREG_SPSR_EL3 = 61952, AARCH64_INSN_SPCLREG_ELR_EL3 = 61953, AARCH64_INSN_SPCLREG_SP_EL2 = 61968, }; enum rp_check { RP_CHECK_CALL = 0, RP_CHECK_CHAIN_CALL = 1, RP_CHECK_RET = 2, }; struct arch_uprobe { union { u8 insn[4]; u8 ixol[4]; }; struct arch_probe_insn api; bool simulate; }; enum iommu_page_response_code { IOMMU_PAGE_RESP_SUCCESS = 0, IOMMU_PAGE_RESP_INVALID = 1, IOMMU_PAGE_RESP_FAILURE = 2, }; typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); struct iommu_domain_geometry { dma_addr_t aperture_start; dma_addr_t aperture_end; bool force_aperture; }; struct iommu_dma_cookie; struct iommu_fault; struct iommu_domain { unsigned int type; const struct iommu_domain_ops *ops; unsigned long pgsize_bitmap; struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *, void *); void *fault_data; union { struct { iommu_fault_handler_t handler; void *handler_token; }; struct { struct mm_struct *mm; int users; }; }; }; struct iommu_domain_ops { int (*attach_dev)(struct iommu_domain *, struct device *); int (*set_dev_pasid)(struct iommu_domain *, struct device *, ioasid_t); int (*map)(struct iommu_domain *, unsigned long, phys_addr_t, size_t, int, gfp_t); int (*map_pages)(struct iommu_domain *, unsigned long, phys_addr_t, size_t, size_t, int, gfp_t, size_t *); size_t (*unmap)(struct iommu_domain *, unsigned long, size_t, struct iommu_iotlb_gather *); size_t (*unmap_pages)(struct iommu_domain *, unsigned long, size_t, size_t, struct iommu_iotlb_gather *); void (*flush_iotlb_all)(struct iommu_domain *); void (*iotlb_sync_map)(struct iommu_domain *, unsigned long, size_t); void (*iotlb_sync)(struct iommu_domain *, struct iommu_iotlb_gather *); phys_addr_t (*iova_to_phys)(struct iommu_domain *, dma_addr_t); bool (*enforce_cache_coherency)(struct iommu_domain *); int (*enable_nesting)(struct iommu_domain *); int (*set_pgtable_quirks)(struct iommu_domain *, unsigned long); void (*free)(struct iommu_domain *); }; struct iommu_iotlb_gather { unsigned long start; unsigned long end; size_t pgsize; struct list_head freelist; bool queued; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct iommu_fault_unrecoverable { __u32 reason; __u32 flags; __u32 pasid; __u32 perm; __u64 addr; __u64 fetch_addr; }; struct iommu_fault_page_request { __u32 flags; __u32 pasid; __u32 grpid; __u32 perm; __u64 addr; __u64 private_data[2]; }; struct iommu_fault { __u32 type; __u32 padding; union { struct iommu_fault_unrecoverable event; struct iommu_fault_page_request prm; __u8 padding2[56]; }; }; struct iommu_device { struct list_head list; const struct iommu_ops *ops; struct fwnode_handle *fwnode; struct device *dev; u32 max_pasids; }; struct of_phandle_args { struct device_node *np; int args_count; uint32_t args[16]; }; struct iommu_fault_event { struct iommu_fault fault; struct list_head list; }; struct iommu_page_response { __u32 argsz; __u32 version; __u32 flags; __u32 pasid; __u32 grpid; __u32 code; }; struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; }; struct scatterlist { unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; unsigned int dma_flags; }; struct io_tlb_area; struct io_tlb_slot; struct io_tlb_pool { phys_addr_t start; phys_addr_t end; void *vaddr; unsigned long nslabs; bool late_alloc; unsigned int nareas; unsigned int area_nslabs; struct io_tlb_area *areas; struct io_tlb_slot *slots; struct list_head node; struct callback_head rcu; bool transient; }; struct io_tlb_mem { struct io_tlb_pool defpool; unsigned long nslabs; struct dentry *debugfs; bool force_bounce; bool for_alloc; bool can_grow; u64 phys_limit; spinlock_t lock; struct list_head pools; struct work_struct dyn_alloc; atomic_long_t total_used; atomic_long_t used_hiwater; }; struct iopf_device_param; struct iommu_fault_param; struct iommu_fwspec; struct dev_iommu { struct mutex lock; struct iommu_fault_param *fault_param; struct iopf_device_param *iopf_param; struct iommu_fwspec *fwspec; struct iommu_device *iommu_dev; void *priv; u32 max_pasids; u32 attach_deferred: 1; u32 pci_32bit_workaround: 1; u32 require_direct: 1; }; typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); struct iommu_fault_param { iommu_dev_fault_handler_t handler; void *data; struct list_head faults; struct mutex lock; }; struct iommu_fwspec { const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; u32 flags; unsigned int num_ids; u32 ids[0]; }; struct fault_info { int (*fn)(unsigned long, unsigned long, struct pt_regs *); int sig; int code; const char *name; }; typedef u64 pudval_t; enum node_stat_item { NR_LRU_BASE = 0, NR_INACTIVE_ANON = 0, NR_ACTIVE_ANON = 1, NR_INACTIVE_FILE = 2, NR_ACTIVE_FILE = 3, NR_UNEVICTABLE = 4, NR_SLAB_RECLAIMABLE_B = 5, NR_SLAB_UNRECLAIMABLE_B = 6, NR_ISOLATED_ANON = 7, NR_ISOLATED_FILE = 8, WORKINGSET_NODES = 9, WORKINGSET_REFAULT_BASE = 10, WORKINGSET_REFAULT_ANON = 10, WORKINGSET_REFAULT_FILE = 11, WORKINGSET_ACTIVATE_BASE = 12, WORKINGSET_ACTIVATE_ANON = 12, WORKINGSET_ACTIVATE_FILE = 13, WORKINGSET_RESTORE_BASE = 14, WORKINGSET_RESTORE_ANON = 14, WORKINGSET_RESTORE_FILE = 15, WORKINGSET_NODERECLAIM = 16, NR_ANON_MAPPED = 17, NR_FILE_MAPPED = 18, NR_FILE_PAGES = 19, NR_FILE_DIRTY = 20, NR_WRITEBACK = 21, NR_WRITEBACK_TEMP = 22, NR_SHMEM = 23, NR_SHMEM_THPS = 24, NR_SHMEM_PMDMAPPED = 25, NR_FILE_THPS = 26, NR_FILE_PMDMAPPED = 27, NR_ANON_THPS = 28, NR_VMSCAN_WRITE = 29, NR_VMSCAN_IMMEDIATE = 30, NR_DIRTIED = 31, NR_WRITTEN = 32, NR_THROTTLED_WRITTEN = 33, NR_KERNEL_MISC_RECLAIMABLE = 34, NR_FOLL_PIN_ACQUIRED = 35, NR_FOLL_PIN_RELEASED = 36, NR_KERNEL_STACK_KB = 37, NR_KERNEL_SCS_KB = 38, NR_PAGETABLE = 39, NR_SECONDARY_PAGETABLE = 40, NR_SWAPCACHE = 41, NR_VM_NODE_STAT_ITEMS = 42, }; enum { SECTION_MARKED_PRESENT_BIT = 0, SECTION_HAS_MEM_MAP_BIT = 1, SECTION_IS_ONLINE_BIT = 2, SECTION_IS_EARLY_BIT = 3, SECTION_MAP_LAST_BIT = 4, }; typedef u64 p4dval_t; struct ptdesc { unsigned long __page_flags; union { struct callback_head pt_rcu_head; struct list_head pt_list; struct { unsigned long _pt_pad_1; pgtable_t pmd_huge_pte; }; }; unsigned long __page_mapping; union { struct mm_struct *pt_mm; atomic_t pt_frag_refcount; }; union { unsigned long _pt_pad_2; spinlock_t ptl; }; unsigned int __page_type; atomic_t _refcount; unsigned long pt_memcg_data; }; struct mhp_params { struct vmem_altmap *altmap; pgprot_t pgprot; struct dev_pagemap *pgmap; }; struct mem_section_usage; struct page_ext; struct mem_section { unsigned long section_mem_map; struct mem_section_usage *usage; struct page_ext *page_ext; unsigned long pad; }; struct mem_section_usage { struct callback_head rcu; unsigned long subsection_map[1]; unsigned long pageblock_flags[0]; }; struct page_ext { unsigned long flags; }; struct memory_notify { unsigned long start_pfn; unsigned long nr_pages; int status_change_nid_normal; int status_change_nid; }; struct page_change_data { pgprot_t set_mask; pgprot_t clear_mask; }; typedef int cydp_t; struct prot_bits; struct pg_level { const struct prot_bits *bits; const char *name; size_t num; u64 mask; }; struct prot_bits { u64 mask; u64 val; const char *set; const char *clear; }; struct addr_marker { unsigned long start_address; char *name; }; struct ptdump_info { struct mm_struct *mm; const struct addr_marker *markers; unsigned long base_addr; }; enum address_markers_idx { PAGE_OFFSET_NR = 0, PAGE_END_NR = 1, }; struct ptdump_range; struct ptdump_state { void (*note_page)(struct ptdump_state *, unsigned long, int, u64); void (*effective_prot)(struct ptdump_state *, int, u64); const struct ptdump_range *range; }; struct pg_state { struct ptdump_state ptdump; struct pg_level *pg_level; struct seq_file *seq; const struct addr_marker *marker; unsigned long start_address; int level; u64 current_prot; bool check_wx; unsigned long wx_pages; unsigned long uxn_pages; }; struct ptdump_range { unsigned long start; unsigned long end; }; struct xa_node { unsigned char shift; unsigned char offset; unsigned char count; unsigned char nr_values; struct xa_node __attribute__((btf_type_tag("rcu"))) *parent; struct xarray *array; union { struct list_head private_list; struct callback_head callback_head; }; void __attribute__((btf_type_tag("rcu"))) *slots[64]; union { unsigned long tags[3]; unsigned long marks[3]; }; }; typedef void (*xa_update_node_t)(struct xa_node *); struct xa_state { struct xarray *xa; unsigned long xa_index; unsigned char xa_shift; unsigned char xa_sibs; unsigned char xa_offset; unsigned char xa_pad; struct xa_node *xa_node; struct xa_node *xa_alloc; xa_update_node_t xa_update; struct list_lru *xa_lru; }; struct sock_fprog_kern { u16 len; struct sock_filter *filter; }; struct bpf_map_dev_ops; struct bpf_offloaded_map { struct bpf_map map; struct net_device *netdev; const struct bpf_map_dev_ops *dev_ops; void *dev_priv; struct list_head offloads; long: 64; long: 64; long: 64; }; struct bpf_map_dev_ops { int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *); int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *); int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64); int (*map_delete_elem)(struct bpf_offloaded_map *, void *); u64 android_kabi_reserved1; }; struct Qdisc_class_ops; struct gnet_dump; struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16]; int priv_size; unsigned int static_flags; int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); void (*attach)(struct Qdisc *); int (*change_tx_queue_len)(struct Qdisc *, unsigned int); void (*change_real_num_tx)(struct Qdisc *, unsigned int); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); void (*ingress_block_set)(struct Qdisc *, u32); void (*egress_block_set)(struct Qdisc *, u32); u32 (*ingress_block_get)(struct Qdisc *); u32 (*egress_block_get)(struct Qdisc *); struct module *owner; u64 android_kabi_reserved1; }; struct tcmsg; struct qdisc_walker; struct tcf_block; struct Qdisc_class_ops { unsigned int flags; struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long); void (*qlen_notify)(struct Qdisc *, unsigned long); unsigned long (*find)(struct Qdisc *, u32); int (*change)(struct Qdisc *, u32, u32, struct nlattr **, unsigned long *, struct netlink_ext_ack *); int (*delete)(struct Qdisc *, unsigned long, struct netlink_ext_ack *); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long, struct netlink_ext_ack *); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32); void (*unbind_tcf)(struct Qdisc *, unsigned long); int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); u64 android_kabi_reserved1; }; struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; }; struct flow_block { struct list_head cb_list; }; struct tcf_chain; struct tcf_block { struct mutex lock; struct list_head chain_list; u32 index; u32 classid; refcount_t refcnt; struct net *net; struct Qdisc *q; struct rw_semaphore cb_lock; struct flow_block flow_block; struct list_head owner_list; bool keep_dst; atomic_t offloadcnt; unsigned int nooffloaddevcnt; unsigned int lockeddevcnt; struct { struct tcf_chain *chain; struct list_head filter_chain_list; } chain0; struct callback_head rcu; struct hlist_head proto_destroy_ht[128]; struct mutex proto_destroy_lock; }; struct tcf_proto; struct tcf_proto_ops; struct tcf_chain { struct mutex filter_chain_lock; struct tcf_proto __attribute__((btf_type_tag("rcu"))) *filter_chain; struct list_head list; struct tcf_block *block; u32 index; unsigned int refcnt; unsigned int action_refcnt; bool explicitly_created; bool flushing; const struct tcf_proto_ops *tmplt_ops; void *tmplt_priv; struct callback_head rcu; }; struct tcf_result; struct tcf_proto { struct tcf_proto __attribute__((btf_type_tag("rcu"))) *next; void __attribute__((btf_type_tag("rcu"))) *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; void *data; const struct tcf_proto_ops *ops; struct tcf_chain *chain; spinlock_t lock; bool deleting; refcount_t refcnt; struct callback_head rcu; struct hlist_node destroy_ht_node; }; struct tcf_result { union { struct { unsigned long class; u32 classid; }; const struct tcf_proto *goto_tp; }; }; typedef int flow_setup_cb_t(enum tc_setup_type, void *, void *); struct tcf_walker; struct tcf_exts; struct tcf_proto_ops { struct list_head head; char kind[16]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); void (*destroy)(struct tcf_proto *, bool, struct netlink_ext_ack *); void * (*get)(struct tcf_proto *, u32); void (*put)(struct tcf_proto *, void *); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32, struct nlattr **, void **, u32, struct netlink_ext_ack *); int (*delete)(struct tcf_proto *, void *, bool *, bool, struct netlink_ext_ack *); bool (*delete_empty)(struct tcf_proto *); void (*walk)(struct tcf_proto *, struct tcf_walker *, bool); int (*reoffload)(struct tcf_proto *, bool, flow_setup_cb_t *, void *, struct netlink_ext_ack *); void (*hw_add)(struct tcf_proto *, void *); void (*hw_del)(struct tcf_proto *, void *); void (*bind_class)(void *, u32, unsigned long, void *, unsigned long); void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *); void (*tmplt_destroy)(void *); void (*tmplt_reoffload)(struct tcf_chain *, bool, flow_setup_cb_t *, void *); struct tcf_exts * (*get_exts)(const struct tcf_proto *, u32); int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); int (*terse_dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); int (*tmplt_dump)(struct sk_buff *, struct net *, void *); struct module *owner; int flags; }; struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; }; struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; int padattr; void *xstats; int xstats_len; struct tc_stats tc_stats; }; struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; }; struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[0]; }; struct net_rate_estimator { struct gnet_stats_basic_sync *bstats; spinlock_t *stats_lock; bool running; struct gnet_stats_basic_sync __attribute__((btf_type_tag("percpu"))) *cpu_bstats; u8 ewma_log; u8 intvl_log; seqcount_t seq; u64 last_packets; u64 last_bytes; u64 avpps; u64 avbps; unsigned long next_jiffies; struct timer_list timer; struct callback_head rcu; }; enum bpf_link_type { BPF_LINK_TYPE_UNSPEC = 0, BPF_LINK_TYPE_RAW_TRACEPOINT = 1, BPF_LINK_TYPE_TRACING = 2, BPF_LINK_TYPE_CGROUP = 3, BPF_LINK_TYPE_ITER = 4, BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, BPF_LINK_TYPE_KPROBE_MULTI = 8, BPF_LINK_TYPE_STRUCT_OPS = 9, BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, MAX_BPF_LINK_TYPE = 13, }; enum bpf_text_poke_type { BPF_MOD_CALL = 0, BPF_MOD_JUMP = 1, }; enum { BPF_REG_0 = 0, BPF_REG_1 = 1, BPF_REG_2 = 2, BPF_REG_3 = 3, BPF_REG_4 = 4, BPF_REG_5 = 5, BPF_REG_6 = 6, BPF_REG_7 = 7, BPF_REG_8 = 8, BPF_REG_9 = 9, BPF_REG_10 = 10, __MAX_BPF_REG = 11, }; enum aarch64_insn_ldst_type { AARCH64_INSN_LDST_LOAD_REG_OFFSET = 0, AARCH64_INSN_LDST_STORE_REG_OFFSET = 1, AARCH64_INSN_LDST_LOAD_IMM_OFFSET = 2, AARCH64_INSN_LDST_STORE_IMM_OFFSET = 3, AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX = 4, AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX = 5, AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX = 6, AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX = 7, AARCH64_INSN_LDST_LOAD_EX = 8, AARCH64_INSN_LDST_LOAD_ACQ_EX = 9, AARCH64_INSN_LDST_STORE_EX = 10, AARCH64_INSN_LDST_STORE_REL_EX = 11, AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET = 12, AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET = 13, }; enum aarch64_insn_bitfield_type { AARCH64_INSN_BITFIELD_MOVE = 0, AARCH64_INSN_BITFIELD_MOVE_UNSIGNED = 1, AARCH64_INSN_BITFIELD_MOVE_SIGNED = 2, }; enum aarch64_insn_data3_type { AARCH64_INSN_DATA3_MADD = 0, AARCH64_INSN_DATA3_MSUB = 1, }; enum aarch64_insn_data2_type { AARCH64_INSN_DATA2_UDIV = 0, AARCH64_INSN_DATA2_SDIV = 1, AARCH64_INSN_DATA2_LSLV = 2, AARCH64_INSN_DATA2_LSRV = 3, AARCH64_INSN_DATA2_ASRV = 4, AARCH64_INSN_DATA2_RORV = 5, }; enum aarch64_insn_data1_type { AARCH64_INSN_DATA1_REVERSE_16 = 0, AARCH64_INSN_DATA1_REVERSE_32 = 1, AARCH64_INSN_DATA1_REVERSE_64 = 2, }; enum aarch64_insn_condition { AARCH64_INSN_COND_EQ = 0, AARCH64_INSN_COND_NE = 1, AARCH64_INSN_COND_CS = 2, AARCH64_INSN_COND_CC = 3, AARCH64_INSN_COND_MI = 4, AARCH64_INSN_COND_PL = 5, AARCH64_INSN_COND_VS = 6, AARCH64_INSN_COND_VC = 7, AARCH64_INSN_COND_HI = 8, AARCH64_INSN_COND_LS = 9, AARCH64_INSN_COND_GE = 10, AARCH64_INSN_COND_LT = 11, AARCH64_INSN_COND_GT = 12, AARCH64_INSN_COND_LE = 13, AARCH64_INSN_COND_AL = 14, }; enum aarch64_insn_size_type { AARCH64_INSN_SIZE_8 = 0, AARCH64_INSN_SIZE_16 = 1, AARCH64_INSN_SIZE_32 = 2, AARCH64_INSN_SIZE_64 = 3, }; enum aarch64_insn_mem_atomic_op { AARCH64_INSN_MEM_ATOMIC_ADD = 0, AARCH64_INSN_MEM_ATOMIC_CLR = 1, AARCH64_INSN_MEM_ATOMIC_EOR = 2, AARCH64_INSN_MEM_ATOMIC_SET = 3, AARCH64_INSN_MEM_ATOMIC_SWP = 4, }; enum aarch64_insn_mem_order_type { AARCH64_INSN_MEM_ORDER_NONE = 0, AARCH64_INSN_MEM_ORDER_ACQ = 1, AARCH64_INSN_MEM_ORDER_REL = 2, AARCH64_INSN_MEM_ORDER_ACQREL = 3, }; enum aarch64_insn_mb_type { AARCH64_INSN_MB_SY = 0, AARCH64_INSN_MB_ST = 1, AARCH64_INSN_MB_LD = 2, AARCH64_INSN_MB_ISH = 3, AARCH64_INSN_MB_ISHST = 4, AARCH64_INSN_MB_ISHLD = 5, AARCH64_INSN_MB_NSH = 6, AARCH64_INSN_MB_NSHST = 7, AARCH64_INSN_MB_NSHLD = 8, AARCH64_INSN_MB_OSH = 9, AARCH64_INSN_MB_OSHST = 10, AARCH64_INSN_MB_OSHLD = 11, }; enum { DUMP_PREFIX_NONE = 0, DUMP_PREFIX_ADDRESS = 1, DUMP_PREFIX_OFFSET = 2, }; enum bpf_tramp_prog_type { BPF_TRAMP_FENTRY = 0, BPF_TRAMP_FEXIT = 1, BPF_TRAMP_MODIFY_RETURN = 2, BPF_TRAMP_MAX = 3, BPF_TRAMP_REPLACE = 4, }; struct bpf_plt { u32 insn_ldr; u32 insn_br; u64 target; }; struct jit_ctx { const struct bpf_prog *prog; int idx; int epilogue_offset; int *offset; int exentry_idx; __le32 *image; u32 stack_size; int fpb_offset; }; struct bpf_binary_header { u32 size; long: 0; u8 image[0]; }; typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int); typedef __kernel_long_t __kernel_off_t; typedef __kernel_off_t off_t; struct bpf_tramp_link; struct bpf_tramp_links { struct bpf_tramp_link *links[38]; int nr_links; }; struct bpf_link_ops; struct bpf_link { atomic64_t refcnt; u32 id; enum bpf_link_type type; const struct bpf_link_ops *ops; struct bpf_prog *prog; union { struct callback_head rcu; struct work_struct work; }; }; struct bpf_tramp_link { struct bpf_link link; struct hlist_node tramp_hlist; u64 cookie; }; struct bpf_link_info; struct bpf_link_ops { void (*release)(struct bpf_link *); void (*dealloc)(struct bpf_link *); void (*dealloc_deferred)(struct bpf_link *); int (*detach)(struct bpf_link *); int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *); void (*show_fdinfo)(const struct bpf_link *, struct seq_file *); int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *); int (*update_map)(struct bpf_link *, struct bpf_map *, struct bpf_map *); u64 android_kabi_reserved1; }; struct bpf_link_info { __u32 type; __u32 id; __u32 prog_id; union { struct { __u64 tp_name; __u32 tp_name_len; } raw_tracepoint; struct { __u32 attach_type; __u32 target_obj_id; __u32 target_btf_id; } tracing; struct { __u64 cgroup_id; __u32 attach_type; } cgroup; struct { __u64 target_name; __u32 target_name_len; union { struct { __u32 map_id; } map; }; union { struct { __u64 cgroup_id; __u32 order; } cgroup; struct { __u32 tid; __u32 pid; } task; }; } iter; struct { __u32 netns_ino; __u32 attach_type; } netns; struct { __u32 ifindex; } xdp; struct { __u32 map_id; } struct_ops; struct { __u32 pf; __u32 hooknum; __s32 priority; __u32 flags; } netfilter; struct { __u64 addrs; __u32 count; __u32 flags; } kprobe_multi; struct { __u32 type; union { struct { __u64 file_name; __u32 name_len; __u32 offset; } uprobe; struct { __u64 func_name; __u32 name_len; __u32 offset; __u64 addr; } kprobe; struct { __u64 tp_name; __u32 name_len; } tracepoint; struct { __u64 config; __u32 type; } event; }; } perf_event; struct { __u32 ifindex; __u32 attach_type; } tcx; }; }; struct bpf_tramp_run_ctx; typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *, struct bpf_tramp_run_ctx *); struct bpf_tramp_run_ctx { struct bpf_run_ctx run_ctx; u64 bpf_cookie; struct bpf_run_ctx *saved_run_ctx; }; typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *, u64, struct bpf_tramp_run_ctx *); struct arm64_jit_data { struct bpf_binary_header *header; u8 *image; struct jit_ctx ctx; }; typedef void (*btf_trace_kvm_userspace_exit)(void *, __u32, int); typedef void (*btf_trace_kvm_vcpu_wakeup)(void *, __u64, bool, bool); typedef void (*btf_trace_kvm_set_irq)(void *, unsigned int, int, int); typedef void (*btf_trace_kvm_ack_irq)(void *, unsigned int, unsigned int); typedef void (*btf_trace_kvm_mmio)(void *, int, int, u64, void *); typedef void (*btf_trace_kvm_fpu)(void *, int); typedef void (*btf_trace_kvm_halt_poll_ns)(void *, bool, unsigned int, unsigned int, unsigned int); struct kvm_dirty_ring; typedef void (*btf_trace_kvm_dirty_ring_push)(void *, struct kvm_dirty_ring *, u32, u64); struct kvm_dirty_gfn; struct kvm_dirty_ring { u32 dirty_index; u32 reset_index; u32 size; u32 soft_limit; struct kvm_dirty_gfn *dirty_gfns; int index; }; struct kvm_dirty_gfn { __u32 flags; __u32 slot; __u64 offset; }; typedef void (*btf_trace_kvm_dirty_ring_reset)(void *, struct kvm_dirty_ring *); struct kvm_vcpu; typedef void (*btf_trace_kvm_dirty_ring_exit)(void *, struct kvm_vcpu *); struct preempt_ops; struct preempt_notifier { struct hlist_node link; struct preempt_ops *ops; }; typedef u64 gpa_t; struct kvm_mmio_fragment { gpa_t gpa; void *data; unsigned int len; }; struct kvm_cpu_context { struct user_pt_regs regs; u64 spsr_abt; u64 spsr_und; u64 spsr_irq; u64 spsr_fiq; struct user_fpsimd_state fp_regs; u64 sys_regs[167]; struct kvm_vcpu *__hyp_running_vcpu; }; struct kvm_vcpu_fault_info { u64 esr_el2; u64 far_el2; u64 hpfar_el2; u64 disr_el1; }; struct kvm_guest_debug_arch { __u64 dbg_bcr[16]; __u64 dbg_bvr[16]; __u64 dbg_wcr[16]; __u64 dbg_wvr[16]; }; struct vgic_v2_cpu_if { u32 vgic_hcr; u32 vgic_vmcr; u32 vgic_apr; u32 vgic_lr[64]; unsigned int used_lrs; }; typedef unsigned long irq_hw_number_t; struct its_vm; struct its_vpe { struct page *vpt_page; struct its_vm *its_vm; atomic_t vlpi_count; int irq; irq_hw_number_t vpe_db_lpi; bool resident; bool ready; union { struct { int vpe_proxy_event; bool idai; }; struct { struct fwnode_handle *fwnode; struct irq_domain *sgi_domain; struct { u8 priority; bool enabled; bool group; } sgi_config[16]; atomic_t vmapp_count; }; }; raw_spinlock_t vpe_lock; u16 col_idx; u16 vpe_id; bool pending_last; }; struct vgic_v3_cpu_if { u32 vgic_hcr; u32 vgic_vmcr; u32 vgic_sre; u32 vgic_ap0r[4]; u32 vgic_ap1r[4]; u64 vgic_lr[16]; struct its_vpe its_vpe; unsigned int used_lrs; }; enum vgic_irq_config { VGIC_CONFIG_EDGE = 0, VGIC_CONFIG_LEVEL = 1, }; struct irq_ops; struct vgic_irq { raw_spinlock_t irq_lock; struct list_head lpi_list; struct list_head ap_list; struct kvm_vcpu *vcpu; struct kvm_vcpu *target_vcpu; u32 intid; bool line_level; bool pending_latch; bool active; bool enabled; bool hw; struct kref refcount; u32 hwintid; unsigned int host_irq; union { u8 targets; u32 mpidr; }; u8 source; u8 active_source; u8 priority; u8 group; enum vgic_irq_config config; struct irq_ops *ops; void *owner; }; enum iodev_type { IODEV_CPUIF = 0, IODEV_DIST = 1, IODEV_REDIST = 2, IODEV_ITS = 3, }; struct kvm_io_device_ops; struct kvm_io_device { const struct kvm_io_device_ops *ops; }; struct vgic_its; struct vgic_register_region; struct vgic_io_device { gpa_t base_addr; union { struct kvm_vcpu *redist_vcpu; struct vgic_its *its; }; const struct vgic_register_region *regions; enum iodev_type iodev_type; int nr_regions; struct kvm_io_device dev; }; struct vgic_redist_region; struct vgic_cpu { union { struct vgic_v2_cpu_if vgic_v2; struct vgic_v3_cpu_if vgic_v3; }; struct vgic_irq private_irqs[32]; raw_spinlock_t ap_list_lock; struct list_head ap_list_head; struct vgic_io_device rd_iodev; struct vgic_redist_region *rdreg; u32 rdreg_index; atomic_t syncr_busy; u64 pendbaser; atomic_t ctlr; u32 num_pri_bits; u32 num_id_bits; }; struct arch_timer_offset { u64 *vm_offset; u64 *vcpu_offset; }; struct arch_timer_context { struct kvm_vcpu *vcpu; struct hrtimer hrtimer; u64 ns_frac; struct arch_timer_offset offset; bool loaded; struct { bool level; } irq; u32 host_timer_irq; }; struct arch_timer_cpu { struct arch_timer_context timers[4]; struct hrtimer bg_timer; bool enabled; }; struct kvm_pmu_events { u32 events_host; u32 events_guest; }; struct kvm_pmc { u8 idx; struct perf_event *perf_event; }; struct kvm_pmu { struct irq_work overflow_work; struct kvm_pmu_events events; struct kvm_pmc pmc[32]; int irq_num; bool created; bool irq_level; }; struct kvm_mp_state { __u32 mp_state; }; struct kvm_mmu_memory_cache { gfp_t gfp_zero; gfp_t gfp_custom; struct kmem_cache *kmem_cache; int capacity; int nobjs; void **objects; }; struct kvm_hyp_memcache { phys_addr_t head; unsigned long nr_pages; unsigned long flags; }; struct vcpu_reset_state { unsigned long pc; unsigned long r0; bool be; bool reset; }; struct kvm_s2_mmu; struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; void *sve_state; enum fp_type fp_type; unsigned int sve_max_vl; u64 svcr; struct kvm_s2_mmu *hw_mmu; u64 hcr_el2; u64 mdcr_el2; u64 cptr_el2; u64 mdcr_el2_host; struct kvm_vcpu_fault_info fault; enum { FP_STATE_FREE = 0, FP_STATE_HOST_OWNED = 1, FP_STATE_GUEST_OWNED = 2, } fp_state; u8 cflags; u8 iflags; u8 sflags; bool pause; struct kvm_guest_debug_arch *debug_ptr; struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; struct user_fpsimd_state *host_fpsimd_state; struct { struct kvm_guest_debug_arch regs; u64 pmscr_el1; u64 trfcr_el1; } host_debug_state; struct vgic_cpu vgic_cpu; struct arch_timer_cpu timer_cpu; struct kvm_pmu pmu; struct { u32 mdscr_el1; bool pstate_ss; } guest_debug_preserved; struct kvm_mp_state mp_state; spinlock_t mp_state_lock; union { struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_hyp_memcache stage2_mc; }; unsigned long features[1]; u64 vsesr_el2; struct vcpu_reset_state reset_state; struct { u64 last_steal; gpa_t base; } steal; u32 *ccsidr; struct kvm_hyp_req *hyp_reqs; }; struct kvm_vcpu_stat_generic { u64 halt_successful_poll; u64 halt_attempted_poll; u64 halt_poll_invalid; u64 halt_wakeup; u64 halt_poll_success_ns; u64 halt_poll_fail_ns; u64 halt_wait_ns; u64 halt_poll_success_hist[32]; u64 halt_poll_fail_hist[32]; u64 halt_wait_hist[32]; u64 blocking; }; struct kvm_vcpu_stat { struct kvm_vcpu_stat_generic generic; u64 hvc_exit_stat; u64 wfe_exit_stat; u64 wfi_exit_stat; u64 mmio_exit_user; u64 mmio_exit_kernel; u64 signal_exits; u64 exits; }; struct kvm; struct kvm_run; struct kvm_memory_slot; struct kvm_vcpu { struct kvm *kvm; struct preempt_notifier preempt_notifier; int cpu; int vcpu_id; int vcpu_idx; int ____srcu_idx; int mode; u64 requests; unsigned long guest_debug; struct mutex mutex; struct kvm_run *run; struct rcuwait wait; struct pid __attribute__((btf_type_tag("rcu"))) *pid; int sigset_active; sigset_t sigset; unsigned int halt_poll_ns; bool valid_wakeup; int mmio_needed; int mmio_read_completed; int mmio_is_write; int mmio_cur_fragment; int mmio_nr_fragments; struct kvm_mmio_fragment mmio_fragments[2]; struct { bool in_spin_loop; bool dy_eligible; } spin_loop; bool preempted; bool ready; long: 64; struct kvm_vcpu_arch arch; struct kvm_vcpu_stat stat; char stats_id[48]; struct kvm_dirty_ring dirty_ring; struct kvm_memory_slot *last_used_slot; u64 last_used_slot_gen; long: 64; }; struct kvm_memslots { u64 generation; atomic_long_t last_used_slot; struct rb_root_cached hva_tree; struct rb_root gfn_tree; struct hlist_head id_hash[128]; int node_idx; }; struct kvm_vm_stat_generic { u64 remote_tlb_flush; u64 remote_tlb_flush_requests; }; struct kvm_vm_stat { struct kvm_vm_stat_generic generic; atomic64_t protected_hyp_mem; atomic64_t protected_shared_mem; atomic64_t protected_pgtable_mem; }; struct kvm_vmid { atomic64_t id; }; struct kvm_pgtable; struct kvm_arch; struct kvm_s2_mmu { struct kvm_vmid vmid; phys_addr_t pgd_phys; struct kvm_pgtable *pgt; int __attribute__((btf_type_tag("percpu"))) *last_vcpu_ran; struct kvm_mmu_memory_cache split_page_cache; uint64_t split_page_chunk_size; struct kvm_arch *arch; }; struct its_vm { struct fwnode_handle *fwnode; struct irq_domain *domain; struct page *vprop_page; struct its_vpe **vpes; int nr_vpes; irq_hw_number_t db_lpi_base; unsigned long *db_bitmap; int nr_db_lpis; u32 vlpi_count[16]; }; struct vgic_state_iter; struct vgic_dist { bool in_kernel; bool ready; bool initialized; u32 vgic_model; u32 implementation_rev; bool v2_groups_user_writable; bool msis_require_devid; int nr_spis; gpa_t vgic_dist_base; union { gpa_t vgic_cpu_base; struct list_head rd_regions; }; bool enabled; bool nassgireq; struct vgic_irq *spis; struct vgic_io_device dist_iodev; bool has_its; bool table_write_in_progress; u64 propbaser; raw_spinlock_t lpi_list_lock; struct list_head lpi_list_head; int lpi_list_count; struct list_head lpi_translation_cache; struct vgic_state_iter *iter; struct its_vm its_vm; }; struct arch_timer_vm_data { u64 voffset; u64 poffset; u8 ppi[4]; }; struct kvm_smccc_features { unsigned long std_bmap; unsigned long std_hyp_bmap; unsigned long vendor_hyp_bmap; }; typedef unsigned int pkvm_handle_t; struct kvm_protected_vm { pkvm_handle_t handle; struct kvm_hyp_memcache stage2_teardown_mc; struct maple_tree pinned_pages; gpa_t pvmfw_load_addr; bool enabled; }; struct arm_pmu; struct kvm_arch { struct kvm_s2_mmu mmu; u64 vtcr; struct vgic_dist vgic; struct arch_timer_vm_data timer_data; u32 psci_version; struct mutex config_lock; unsigned long flags; unsigned long vcpu_features[1]; unsigned long *pmu_filter; struct arm_pmu *arm_pmu; cpumask_var_t supported_cpus; struct kvm_smccc_features smccc_feat; struct maple_tree smccc_filter; u64 id_regs[56]; struct kvm_protected_vm pkvm; }; struct mmu_notifier_ops; struct mmu_notifier { struct hlist_node hlist; const struct mmu_notifier_ops *ops; struct mm_struct *mm; struct callback_head rcu; unsigned int users; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct kvm_io_bus; struct kvm_coalesced_mmio_ring; struct kvm_irq_routing_table; struct kvm_stat_data; struct kvm { rwlock_t mmu_lock; struct mutex slots_lock; struct mutex slots_arch_lock; struct mm_struct *mm; unsigned long nr_memslot_pages; struct kvm_memslots __memslots[2]; struct kvm_memslots __attribute__((btf_type_tag("rcu"))) *memslots[1]; struct xarray vcpu_array; atomic_t nr_memslots_dirty_logging; spinlock_t mn_invalidate_lock; unsigned long mn_active_invalidate_count; struct rcuwait mn_memslots_update_rcuwait; spinlock_t gpc_lock; struct list_head gpc_list; atomic_t online_vcpus; int max_vcpus; int created_vcpus; int last_boosted_vcpu; struct list_head vm_list; struct mutex lock; struct kvm_io_bus __attribute__((btf_type_tag("rcu"))) *buses[4]; struct { spinlock_t lock; struct list_head items; struct list_head resampler_list; struct mutex resampler_lock; } irqfds; struct list_head ioeventfds; struct kvm_vm_stat stat; struct kvm_arch arch; refcount_t users_count; struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; spinlock_t ring_lock; struct list_head coalesced_zones; struct mutex irq_lock; struct kvm_irq_routing_table __attribute__((btf_type_tag("rcu"))) *irq_routing; struct hlist_head irq_ack_notifier_list; struct mmu_notifier mmu_notifier; unsigned long mmu_invalidate_seq; long mmu_invalidate_in_progress; unsigned long mmu_invalidate_range_start; unsigned long mmu_invalidate_range_end; struct list_head devices; u64 manual_dirty_log_protect; struct dentry *debugfs_dentry; struct kvm_stat_data **debugfs_stat_data; struct srcu_struct srcu; struct srcu_struct irq_srcu; pid_t userspace_pid; bool override_halt_poll_ns; unsigned int max_halt_poll_ns; u32 dirty_ring_size; bool dirty_ring_with_bitmap; bool vm_bugged; bool vm_dead; char stats_id[48]; }; struct kvm_io_range { gpa_t addr; int len; struct kvm_io_device *dev; }; struct kvm_io_bus { int dev_count; int ioeventfd_count; struct kvm_io_range range[0]; }; struct kvm_io_device_ops { int (*read)(struct kvm_vcpu *, struct kvm_io_device *, gpa_t, int, void *); int (*write)(struct kvm_vcpu *, struct kvm_io_device *, gpa_t, int, const void *); void (*destructor)(struct kvm_io_device *); }; struct irq_ops { unsigned long flags; bool (*get_input_level)(int); }; struct kvm_device; struct vgic_its { gpa_t vgic_its_base; bool enabled; struct vgic_io_device iodev; struct kvm_device *dev; u64 baser_device_table; u64 baser_coll_table; struct mutex cmd_lock; u64 cbaser; u32 creadr; u32 cwriter; u32 abi_rev; struct mutex its_lock; struct list_head device_list; struct list_head collection_list; }; struct kvm_device_ops; struct kvm_device { const struct kvm_device_ops *ops; struct kvm *kvm; void *private; struct list_head vm_node; }; struct kvm_device_attr; struct kvm_device_ops { const char *name; int (*create)(struct kvm_device *, u32); void (*init)(struct kvm_device *); void (*destroy)(struct kvm_device *); void (*release)(struct kvm_device *); int (*set_attr)(struct kvm_device *, struct kvm_device_attr *); int (*get_attr)(struct kvm_device *, struct kvm_device_attr *); int (*has_attr)(struct kvm_device *, struct kvm_device_attr *); long (*ioctl)(struct kvm_device *, unsigned int, unsigned long); int (*mmap)(struct kvm_device *, struct vm_area_struct *); }; struct kvm_device_attr { __u32 flags; __u32 group; __u64 attr; __u64 addr; }; struct vgic_register_region { unsigned int reg_offset; unsigned int len; unsigned int bits_per_irq; unsigned int access_flags; union { unsigned long (*read)(struct kvm_vcpu *, gpa_t, unsigned int); unsigned long (*its_read)(struct kvm *, struct vgic_its *, gpa_t, unsigned int); }; union { void (*write)(struct kvm_vcpu *, gpa_t, unsigned int, unsigned long); void (*its_write)(struct kvm *, struct vgic_its *, gpa_t, unsigned int, unsigned long); }; unsigned long (*uaccess_read)(struct kvm_vcpu *, gpa_t, unsigned int); union { int (*uaccess_write)(struct kvm_vcpu *, gpa_t, unsigned int, unsigned long); int (*uaccess_its_write)(struct kvm *, struct vgic_its *, gpa_t, unsigned int, unsigned long); }; }; struct kvm_coalesced_mmio { __u64 phys_addr; __u32 len; union { __u32 pad; __u32 pio; }; __u8 data[8]; }; struct kvm_coalesced_mmio_ring { __u32 first; __u32 last; struct kvm_coalesced_mmio coalesced_mmio[0]; }; struct kvm_irq_routing_table { int chip[988]; u32 nr_rt_entries; struct hlist_head map[0]; }; struct mmu_notifier_range; struct mmu_notifier_ops { void (*release)(struct mmu_notifier *, struct mm_struct *); int (*clear_flush_young)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); int (*clear_young)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); int (*test_young)(struct mmu_notifier *, struct mm_struct *, unsigned long); void (*change_pte)(struct mmu_notifier *, struct mm_struct *, unsigned long, pte_t); int (*invalidate_range_start)(struct mmu_notifier *, const struct mmu_notifier_range *); void (*invalidate_range_end)(struct mmu_notifier *, const struct mmu_notifier_range *); void (*arch_invalidate_secondary_tlbs)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); struct mmu_notifier * (*alloc_notifier)(struct mm_struct *); void (*free_notifier)(struct mmu_notifier *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; enum mmu_notifier_event { MMU_NOTIFY_UNMAP = 0, MMU_NOTIFY_CLEAR = 1, MMU_NOTIFY_PROTECTION_VMA = 2, MMU_NOTIFY_PROTECTION_PAGE = 3, MMU_NOTIFY_SOFT_DIRTY = 4, MMU_NOTIFY_RELEASE = 5, MMU_NOTIFY_MIGRATE = 6, MMU_NOTIFY_EXCLUSIVE = 7, }; struct mmu_notifier_range { struct mm_struct *mm; unsigned long start; unsigned long end; unsigned int flags; enum mmu_notifier_event event; void *owner; }; enum kvm_stat_kind { KVM_STAT_VM = 0, KVM_STAT_VCPU = 1, }; struct _kvm_stats_desc; struct kvm_stat_data { struct kvm *kvm; const struct _kvm_stats_desc *desc; enum kvm_stat_kind kind; }; struct kvm_stats_desc { __u32 flags; __s16 exponent; __u16 size; __u32 offset; __u32 bucket_size; char name[0]; }; struct _kvm_stats_desc { struct kvm_stats_desc desc; char name[48]; }; struct preempt_ops { void (*sched_in)(struct preempt_notifier *, int); void (*sched_out)(struct preempt_notifier *, struct task_struct *); }; struct kvm_debug_exit_arch { __u32 hsr; __u32 hsr_high; __u64 far; }; struct kvm_hyperv_exit { __u32 type; __u32 pad1; union { struct { __u32 msr; __u32 pad2; __u64 control; __u64 evt_page; __u64 msg_page; } synic; struct { __u64 input; __u64 result; __u64 params[2]; } hcall; struct { __u32 msr; __u32 pad2; __u64 control; __u64 status; __u64 send_page; __u64 recv_page; __u64 pending_page; } syndbg; } u; }; struct kvm_xen_exit { __u32 type; union { struct { __u32 longmode; __u32 cpl; __u64 input; __u64 result; __u64 params[6]; } hcall; } u; }; struct kvm_sync_regs { __u64 device_irq_level; }; struct kvm_run { __u8 request_interrupt_window; __u8 immediate_exit; __u8 padding1[6]; __u32 exit_reason; __u8 ready_for_interrupt_injection; __u8 if_flag; __u16 flags; __u64 cr8; __u64 apic_base; union { struct { __u64 hardware_exit_reason; } hw; struct { __u64 hardware_entry_failure_reason; __u32 cpu; } fail_entry; struct { __u32 exception; __u32 error_code; } ex; struct { __u8 direction; __u8 size; __u16 port; __u32 count; __u64 data_offset; } io; struct { struct kvm_debug_exit_arch arch; } debug; struct { __u64 phys_addr; __u8 data[8]; __u32 len; __u8 is_write; } mmio; struct { __u64 nr; __u64 args[6]; __u64 ret; union { __u64 flags; }; } hypercall; struct { __u64 rip; __u32 is_write; __u32 pad; } tpr_access; struct { __u8 icptcode; __u16 ipa; __u32 ipb; } s390_sieic; __u64 s390_reset_flags; struct { __u64 trans_exc_code; __u32 pgm_code; } s390_ucontrol; struct { __u32 dcrn; __u32 data; __u8 is_write; } dcr; struct { __u32 suberror; __u32 ndata; __u64 data[16]; } internal; struct { __u32 suberror; __u32 ndata; __u64 flags; union { struct { __u8 insn_size; __u8 insn_bytes[15]; }; }; } emulation_failure; struct { __u64 gprs[32]; } osi; struct { __u64 nr; __u64 ret; __u64 args[9]; } papr_hcall; struct { __u16 subchannel_id; __u16 subchannel_nr; __u32 io_int_parm; __u32 io_int_word; __u32 ipb; __u8 dequeued; } s390_tsch; struct { __u32 epr; } epr; struct { __u32 type; __u32 ndata; union { __u64 data[16]; }; } system_event; struct { __u64 addr; __u8 ar; __u8 reserved; __u8 fc; __u8 sel1; __u16 sel2; } s390_stsi; struct { __u8 vector; } eoi; struct kvm_hyperv_exit hyperv; struct { __u64 esr_iss; __u64 fault_ipa; } arm_nisv; struct { __u8 error; __u8 pad[7]; __u32 reason; __u32 index; __u64 data; } msr; struct kvm_xen_exit xen; struct { unsigned long extension_id; unsigned long function_id; unsigned long args[6]; unsigned long ret[2]; } riscv_sbi; struct { unsigned long csr_num; unsigned long new_value; unsigned long write_mask; unsigned long ret_value; } riscv_csr; struct { __u32 flags; } notify; char padding[256]; }; __u64 kvm_valid_regs; __u64 kvm_dirty_regs; union { struct kvm_sync_regs regs; char padding[2048]; } s; }; struct vgic_redist_region { u32 index; gpa_t base; u32 count; u32 free_index; struct list_head list; }; struct interval_tree_node { struct rb_node rb; unsigned long start; unsigned long last; unsigned long __subtree_last; }; typedef u64 gfn_t; struct kvm_arch_memory_slot {}; struct kvm_memory_slot { struct hlist_node id_node[2]; struct interval_tree_node hva_node[2]; struct rb_node gfn_node[2]; gfn_t base_gfn; unsigned long npages; unsigned long *dirty_bitmap; struct kvm_arch_memory_slot arch; unsigned long userspace_addr; u32 flags; short id; u16 as_id; }; typedef void (*btf_trace_kvm_unmap_hva_range)(void *, unsigned long, unsigned long); typedef void (*btf_trace_kvm_set_spte_hva)(void *, unsigned long); typedef void (*btf_trace_kvm_age_hva)(void *, unsigned long, unsigned long); typedef void (*btf_trace_kvm_test_age_hva)(void *, unsigned long); struct trace_print_flags { unsigned long mask; const char *name; }; struct syscore_ops { struct list_head node; int (*suspend)(); void (*resume)(); void (*shutdown)(); }; union kvm_mmu_notifier_arg { pte_t pte; }; struct miscdevice { int minor; const char *name; const struct file_operations *fops; struct list_head list; struct device *parent; struct device *this_device; const struct attribute_group **groups; const char *nodename; umode_t mode; }; enum kvm_mr_change { KVM_MR_CREATE = 0, KVM_MR_DELETE = 1, KVM_MR_MOVE = 2, KVM_MR_FLAGS_ONLY = 3, }; enum { OUTSIDE_GUEST_MODE = 0, IN_GUEST_MODE = 1, EXITING_GUEST_MODE = 2, READING_SHADOW_PAGE_TABLES = 3, }; enum kvm_bus { KVM_MMIO_BUS = 0, KVM_PIO_BUS = 1, KVM_VIRTIO_CCW_NOTIFY_BUS = 2, KVM_FAST_MMIO_BUS = 3, KVM_NR_BUSES = 4, }; enum kobject_action { KOBJ_ADD = 0, KOBJ_REMOVE = 1, KOBJ_CHANGE = 2, KOBJ_MOVE = 3, KOBJ_ONLINE = 4, KOBJ_OFFLINE = 5, KOBJ_BIND = 6, KOBJ_UNBIND = 7, }; typedef unsigned int xa_mark_t; struct trace_event_raw_kvm_userspace_exit { struct trace_entry ent; __u32 reason; int errno; char __data[0]; }; struct trace_event_raw_kvm_vcpu_wakeup { struct trace_entry ent; __u64 ns; bool waited; bool valid; char __data[0]; }; struct trace_event_raw_kvm_set_irq { struct trace_entry ent; unsigned int gsi; int level; int irq_source_id; char __data[0]; }; struct trace_event_raw_kvm_ack_irq { struct trace_entry ent; unsigned int irqchip; unsigned int pin; char __data[0]; }; struct trace_event_raw_kvm_mmio { struct trace_entry ent; u32 type; u32 len; u64 gpa; u64 val; char __data[0]; }; struct trace_event_raw_kvm_fpu { struct trace_entry ent; u32 load; char __data[0]; }; struct trace_event_raw_kvm_halt_poll_ns { struct trace_entry ent; bool grow; unsigned int vcpu_id; unsigned int new; unsigned int old; char __data[0]; }; struct trace_event_raw_kvm_dirty_ring_push { struct trace_entry ent; int index; u32 dirty_index; u32 reset_index; u32 slot; u64 offset; char __data[0]; }; struct trace_event_raw_kvm_dirty_ring_reset { struct trace_entry ent; int index; u32 dirty_index; u32 reset_index; char __data[0]; }; struct trace_event_raw_kvm_dirty_ring_exit { struct trace_entry ent; int vcpu_id; char __data[0]; }; struct trace_event_raw_kvm_unmap_hva_range { struct trace_entry ent; unsigned long start; unsigned long end; char __data[0]; }; struct trace_event_raw_kvm_set_spte_hva { struct trace_entry ent; unsigned long hva; char __data[0]; }; struct trace_event_raw_kvm_age_hva { struct trace_entry ent; unsigned long start; unsigned long end; char __data[0]; }; struct trace_event_raw_kvm_test_age_hva { struct trace_entry ent; unsigned long hva; char __data[0]; }; typedef u64 hpa_t; struct kvm_userspace_memory_region { __u32 slot; __u32 flags; __u64 guest_phys_addr; __u64 memory_size; __u64 userspace_addr; }; struct kvm_memslot_iter { struct kvm_memslots *slots; struct rb_node *node; struct kvm_memory_slot *slot; }; typedef u64 hfn_t; typedef hfn_t kvm_pfn_t; typedef unsigned long hva_t; struct kvm_coalesced_mmio_zone { __u64 addr; __u32 size; union { __u32 pad; __u32 pio; }; }; struct kvm_irqfd { __u32 fd; __u32 gsi; __u32 flags; __u32 resamplefd; __u8 pad[16]; }; struct kvm_irq_level { union { __u32 irq; __s32 status; }; __u32 level; }; struct kvm_msi { __u32 address_lo; __u32 address_hi; __u32 data; __u32 flags; __u32 devid; __u8 pad[12]; }; struct kvm_ioeventfd { __u64 datamatch; __u64 addr; __u32 len; __s32 fd; __u32 flags; __u8 pad[36]; }; struct kvm_irq_routing_irqchip { __u32 irqchip; __u32 pin; }; struct kvm_irq_routing_msi { __u32 address_lo; __u32 address_hi; __u32 data; union { __u32 pad; __u32 devid; }; }; struct kvm_irq_routing_s390_adapter { __u64 ind_addr; __u64 summary_addr; __u64 ind_offset; __u32 summary_offset; __u32 adapter_id; }; struct kvm_irq_routing_hv_sint { __u32 vcpu; __u32 sint; }; struct kvm_irq_routing_xen_evtchn { __u32 port; __u32 vcpu; __u32 priority; }; struct kvm_irq_routing_entry { __u32 gsi; __u32 type; __u32 flags; __u32 pad; union { struct kvm_irq_routing_irqchip irqchip; struct kvm_irq_routing_msi msi; struct kvm_irq_routing_s390_adapter adapter; struct kvm_irq_routing_hv_sint hv_sint; struct kvm_irq_routing_xen_evtchn xen_evtchn; __u32 pad[8]; } u; }; struct kvm_dirty_log { __u32 slot; __u32 padding1; union { void __attribute__((btf_type_tag("user"))) *dirty_bitmap; __u64 padding2; }; }; struct kvm_clear_dirty_log { __u32 slot; __u32 num_pages; __u64 first_page; union { void __attribute__((btf_type_tag("user"))) *dirty_bitmap; __u64 padding2; }; }; struct kvm_sregs {}; struct kvm_fpu {}; struct kvm_regs { struct user_pt_regs regs; __u64 sp_el1; __u64 elr_el1; __u64 spsr[5]; long: 64; struct user_fpsimd_state fp_regs; }; struct kvm_translation { __u64 linear_address; __u64 physical_address; __u8 valid; __u8 writeable; __u8 usermode; __u8 pad[5]; }; struct kvm_guest_debug { __u32 control; __u32 pad; struct kvm_guest_debug_arch arch; }; struct kvm_stats_header { __u32 flags; __u32 name_size; __u32 num_desc; __u32 id_offset; __u32 desc_offset; __u32 data_offset; }; struct kvm_gfn_range; typedef bool (*hva_handler_t)(struct kvm *, struct kvm_gfn_range *); typedef void (*on_lock_fn_t)(struct kvm *, unsigned long, unsigned long); typedef void (*on_unlock_fn_t)(struct kvm *); struct kvm_hva_range { unsigned long start; unsigned long end; union kvm_mmu_notifier_arg arg; hva_handler_t handler; on_lock_fn_t on_lock; on_unlock_fn_t on_unlock; bool flush_on_ret; bool may_block; }; struct kvm_gfn_range { struct kvm_memory_slot *slot; gfn_t start; gfn_t end; union kvm_mmu_notifier_arg arg; bool may_block; }; struct trace_event_data_offsets_kvm_userspace_exit {}; struct trace_event_data_offsets_kvm_vcpu_wakeup {}; struct trace_event_data_offsets_kvm_set_irq {}; struct trace_event_data_offsets_kvm_ack_irq {}; struct trace_event_data_offsets_kvm_mmio {}; struct trace_event_data_offsets_kvm_fpu {}; struct trace_event_data_offsets_kvm_halt_poll_ns {}; struct trace_event_data_offsets_kvm_dirty_ring_push {}; struct trace_event_data_offsets_kvm_dirty_ring_reset {}; struct trace_event_data_offsets_kvm_dirty_ring_exit {}; struct trace_event_data_offsets_kvm_unmap_hva_range {}; struct trace_event_data_offsets_kvm_set_spte_hva {}; struct trace_event_data_offsets_kvm_age_hva {}; struct trace_event_data_offsets_kvm_test_age_hva {}; struct kvm_host_map { struct page *page; void *hva; kvm_pfn_t pfn; kvm_pfn_t gfn; }; struct gfn_to_hva_cache { u64 generation; gpa_t gpa; unsigned long hva; unsigned long len; struct kvm_memory_slot *memslot; }; struct kvm_enable_cap { __u32 cap; __u32 flags; __u64 args[4]; __u8 pad[64]; }; typedef int (*kvm_vm_thread_fn_t)(struct kvm *, uintptr_t); struct kvm_vm_worker_thread_context { struct kvm *kvm; struct task_struct *parent; struct completion init_done; kvm_vm_thread_fn_t thread_fn; uintptr_t data; int err; }; struct kvm_irq_routing { __u32 nr; __u32 flags; struct kvm_irq_routing_entry entries[0]; }; struct kvm_create_device { __u32 type; __u32 fd; __u32 flags; }; struct kvm_signal_mask { __u32 len; __u8 sigset[0]; }; enum { kvm_ioeventfd_flag_nr_datamatch = 0, kvm_ioeventfd_flag_nr_pio = 1, kvm_ioeventfd_flag_nr_deassign = 2, kvm_ioeventfd_flag_nr_virtio_ccw_notify = 3, kvm_ioeventfd_flag_nr_fast_mmio = 4, kvm_ioeventfd_flag_nr_max = 5, }; enum { WORK_STRUCT_PENDING_BIT = 0, WORK_STRUCT_INACTIVE_BIT = 1, WORK_STRUCT_PWQ_BIT = 2, WORK_STRUCT_LINKED_BIT = 3, WORK_STRUCT_COLOR_SHIFT = 4, WORK_STRUCT_COLOR_BITS = 4, WORK_STRUCT_PENDING = 1, WORK_STRUCT_INACTIVE = 2, WORK_STRUCT_PWQ = 4, WORK_STRUCT_LINKED = 8, WORK_STRUCT_STATIC = 0, WORK_NR_COLORS = 16, WORK_CPU_UNBOUND = 32, WORK_STRUCT_FLAG_BITS = 8, WORK_OFFQ_FLAG_BASE = 4, __WORK_OFFQ_CANCELING = 4, WORK_OFFQ_FLAG_BITS = 1, WORK_OFFQ_POOL_SHIFT = 5, WORK_OFFQ_LEFT = 59, WORK_OFFQ_POOL_BITS = 31, WORK_BUSY_PENDING = 1, WORK_BUSY_RUNNING = 2, WORKER_DESC_LEN = 24, }; struct kvm_irq_ack_notifier { struct hlist_node link; unsigned int gsi; void (*irq_acked)(struct kvm_irq_ack_notifier *); }; struct kvm_s390_adapter_int { u64 ind_addr; u64 summary_addr; u64 ind_offset; u32 summary_offset; u32 adapter_id; }; struct kvm_hv_sint { u32 vcpu; u32 sint; }; struct kvm_xen_evtchn { u32 port; u32 vcpu_id; int vcpu_idx; u32 priority; }; struct kvm_kernel_irq_routing_entry { u32 gsi; u32 type; int (*set)(struct kvm_kernel_irq_routing_entry *, struct kvm *, int, int, bool); union { struct { unsigned int irqchip; unsigned int pin; } irqchip; struct { u32 address_lo; u32 address_hi; u32 data; u32 flags; u32 devid; } msi; struct kvm_s390_adapter_int adapter; struct kvm_hv_sint hv_sint; struct kvm_xen_evtchn xen_evtchn; }; struct hlist_node link; }; typedef struct poll_table_struct poll_table; struct irq_bypass_producer; struct irq_bypass_consumer { struct list_head node; void *token; int (*add_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); void (*del_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); void (*stop)(struct irq_bypass_consumer *); void (*start)(struct irq_bypass_consumer *); }; struct kvm_kernel_irqfd_resampler; struct kvm_kernel_irqfd { struct kvm *kvm; wait_queue_entry_t wait; struct kvm_kernel_irq_routing_entry irq_entry; seqcount_spinlock_t irq_entry_sc; int gsi; struct work_struct inject; struct kvm_kernel_irqfd_resampler *resampler; struct eventfd_ctx *resamplefd; struct list_head resampler_link; struct eventfd_ctx *eventfd; struct list_head list; poll_table pt; struct work_struct shutdown; struct irq_bypass_consumer consumer; struct irq_bypass_producer *producer; }; struct kvm_kernel_irqfd_resampler { struct kvm *kvm; struct list_head list; struct kvm_irq_ack_notifier notifier; struct list_head link; }; struct irq_bypass_producer { struct list_head node; void *token; int irq; int (*add_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); void (*del_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); void (*stop)(struct irq_bypass_producer *); void (*start)(struct irq_bypass_producer *); }; struct _ioeventfd { struct list_head list; u64 addr; int length; struct eventfd_ctx *eventfd; u64 datamatch; struct kvm_io_device dev; u8 bus_idx; bool wildcard; }; struct fd { struct file *file; unsigned int flags; }; enum kvm_device_type { KVM_DEV_TYPE_FSL_MPIC_20 = 1, KVM_DEV_TYPE_FSL_MPIC_42 = 2, KVM_DEV_TYPE_XICS = 3, KVM_DEV_TYPE_VFIO = 4, KVM_DEV_TYPE_ARM_VGIC_V2 = 5, KVM_DEV_TYPE_FLIC = 6, KVM_DEV_TYPE_ARM_VGIC_V3 = 7, KVM_DEV_TYPE_ARM_VGIC_ITS = 8, KVM_DEV_TYPE_XIVE = 9, KVM_DEV_TYPE_ARM_PV_TIME = 10, KVM_DEV_TYPE_RISCV_AIA = 11, KVM_DEV_TYPE_MAX = 12, }; struct kvm_vfio_file { struct list_head node; struct file *file; }; struct kvm_vfio { struct list_head file_list; struct mutex lock; bool noncoherent; }; struct kvm_coalesced_mmio_dev { struct list_head list; struct kvm_io_device dev; struct kvm *kvm; struct kvm_coalesced_mmio_zone zone; }; typedef void (*btf_trace_kvm_entry)(void *, unsigned long); typedef void (*btf_trace_kvm_exit)(void *, int, unsigned int, unsigned long); typedef void (*btf_trace_kvm_guest_fault)(void *, unsigned long, unsigned long, unsigned long, unsigned long long); typedef void (*btf_trace_kvm_access_fault)(void *, unsigned long); typedef void (*btf_trace_kvm_irq_line)(void *, unsigned int, int, int, int); typedef void (*btf_trace_kvm_mmio_emulate)(void *, unsigned long, unsigned long, unsigned long); typedef void (*btf_trace_kvm_set_way_flush)(void *, unsigned long, bool); typedef void (*btf_trace_kvm_toggle_cache)(void *, unsigned long, bool, bool); typedef void (*btf_trace_kvm_timer_update_irq)(void *, unsigned long, __u32, int); struct timer_map; typedef void (*btf_trace_kvm_get_timer_map)(void *, unsigned long, struct timer_map *); struct timer_map { struct arch_timer_context *direct_vtimer; struct arch_timer_context *direct_ptimer; struct arch_timer_context *emul_vtimer; struct arch_timer_context *emul_ptimer; }; typedef kvm_pte_t __attribute__((btf_type_tag("rcu"))) *kvm_pteref_t; enum kvm_pgtable_stage2_flags { KVM_PGTABLE_S2_NOFWB = 1, KVM_PGTABLE_S2_IDMAP = 2, KVM_PGTABLE_S2_PREFAULT_BLOCK = 4, }; struct kvm_pgtable_mm_ops; struct kvm_pgtable_pte_ops; struct kvm_pgtable { u32 ia_bits; u32 start_level; kvm_pteref_t pgd; struct kvm_pgtable_mm_ops *mm_ops; struct kvm_s2_mmu *mmu; enum kvm_pgtable_stage2_flags flags; struct kvm_pgtable_pte_ops *pte_ops; }; struct kvm_pgtable_mm_ops { void * (*zalloc_page)(void *); void * (*zalloc_pages_exact)(size_t); void (*free_pages_exact)(void *, size_t); void (*free_unlinked_table)(void *, u32); void (*get_page)(void *); void (*put_page)(void *); int (*page_count)(void *); void * (*phys_to_virt)(phys_addr_t); phys_addr_t (*virt_to_phys)(void *); void (*dcache_clean_inval_poc)(void *, size_t); void (*icache_inval_pou)(void *, size_t); }; typedef bool (*kvm_pgtable_force_pte_cb_t)(u64, u64, enum kvm_pgtable_prot); typedef bool (*kvm_pgtable_pte_is_counted_cb_t)(kvm_pte_t, u32); struct kvm_pgtable_pte_ops { kvm_pgtable_force_pte_cb_t force_pte_cb; kvm_pgtable_pte_is_counted_cb_t pte_is_counted_cb; }; typedef void (*btf_trace_kvm_timer_save_state)(void *, struct arch_timer_context *); typedef void (*btf_trace_kvm_timer_restore_state)(void *, struct arch_timer_context *); typedef void (*btf_trace_kvm_timer_hrtimer_expire)(void *, struct arch_timer_context *); typedef void (*btf_trace_kvm_timer_emulate)(void *, struct arch_timer_context *, bool); typedef void (*btf_trace_kvm_nested_eret)(void *, struct kvm_vcpu *, unsigned long, unsigned long); typedef void (*btf_trace_kvm_inject_nested_exception)(void *, struct kvm_vcpu *, u64, int); typedef void (*btf_trace_kvm_forward_sysreg_trap)(void *, struct kvm_vcpu *, u32, bool); enum __kvm_host_smccc_func { __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = 1, __KVM_HOST_SMCCC_FUNC___pkvm_init = 2, __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping = 3, __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector = 4, __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs = 5, __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs = 6, __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config = 7, __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context = 8, __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa = 9, __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh = 10, __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid = 11, __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range = 12, __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context = 13, __KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va = 14, __KVM_HOST_SMCCC_FUNC___pkvm_map_module_page = 15, __KVM_HOST_SMCCC_FUNC___pkvm_unmap_module_page = 16, __KVM_HOST_SMCCC_FUNC___pkvm_init_module = 17, __KVM_HOST_SMCCC_FUNC___pkvm_register_hcall = 18, __KVM_HOST_SMCCC_FUNC___pkvm_iommu_init = 19, __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize = 20, __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp = 21, __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp = 22, __KVM_HOST_SMCCC_FUNC___pkvm_host_map_guest = 23, __KVM_HOST_SMCCC_FUNC___pkvm_host_unmap_guest = 24, __KVM_HOST_SMCCC_FUNC___pkvm_relax_perms = 25, __KVM_HOST_SMCCC_FUNC___pkvm_wrprotect = 26, __KVM_HOST_SMCCC_FUNC___pkvm_dirty_log = 27, __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid = 28, __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc = 29, __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run = 30, __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff = 31, __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs = 32, __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs = 33, __KVM_HOST_SMCCC_FUNC___pkvm_init_vm = 34, __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu = 35, __KVM_HOST_SMCCC_FUNC___pkvm_start_teardown_vm = 36, __KVM_HOST_SMCCC_FUNC___pkvm_finalize_teardown_vm = 37, __KVM_HOST_SMCCC_FUNC___pkvm_reclaim_dying_guest_page = 38, __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load = 39, __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put = 40, __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_sync_state = 41, __KVM_HOST_SMCCC_FUNC___pkvm_load_tracing = 42, __KVM_HOST_SMCCC_FUNC___pkvm_teardown_tracing = 43, __KVM_HOST_SMCCC_FUNC___pkvm_enable_tracing = 44, __KVM_HOST_SMCCC_FUNC___pkvm_swap_reader_tracing = 45, __KVM_HOST_SMCCC_FUNC___pkvm_enable_event = 46, __KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_refill = 47, __KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaimable = 48, __KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaim = 49, __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_alloc_domain = 50, __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_free_domain = 51, __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_attach_dev = 52, __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_detach_dev = 53, __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_map_pages = 54, __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_unmap_pages = 55, __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_iova_to_phys = 56, __KVM_HOST_SMCCC_FUNC___pkvm_host_hvc_pd = 57, __KVM_HOST_SMCCC_FUNC___pkvm_stage2_snapshot = 58, __KVM_HOST_SMCCC_FUNC___dynamic_hcalls = 128, }; enum vcpu_sysreg { __INVALID_SYSREG__ = 0, MPIDR_EL1 = 1, CLIDR_EL1 = 2, CSSELR_EL1 = 3, SCTLR_EL1 = 4, ACTLR_EL1 = 5, CPACR_EL1 = 6, ZCR_EL1 = 7, TTBR0_EL1 = 8, TTBR1_EL1 = 9, TCR_EL1 = 10, TCR2_EL1 = 11, ESR_EL1 = 12, AFSR0_EL1 = 13, AFSR1_EL1 = 14, FAR_EL1 = 15, MAIR_EL1 = 16, VBAR_EL1 = 17, CONTEXTIDR_EL1 = 18, TPIDR_EL0 = 19, TPIDRRO_EL0 = 20, TPIDR_EL1 = 21, AMAIR_EL1 = 22, CNTKCTL_EL1 = 23, PAR_EL1 = 24, MDSCR_EL1 = 25, MDCCINT_EL1 = 26, OSLSR_EL1 = 27, DISR_EL1 = 28, PMCR_EL0 = 29, PMSELR_EL0 = 30, PMEVCNTR0_EL0 = 31, PMEVCNTR30_EL0 = 61, PMCCNTR_EL0 = 62, PMEVTYPER0_EL0 = 63, PMEVTYPER30_EL0 = 93, PMCCFILTR_EL0 = 94, PMCNTENSET_EL0 = 95, PMINTENSET_EL1 = 96, PMOVSSET_EL0 = 97, PMUSERENR_EL0 = 98, APIAKEYLO_EL1 = 99, APIAKEYHI_EL1 = 100, APIBKEYLO_EL1 = 101, APIBKEYHI_EL1 = 102, APDAKEYLO_EL1 = 103, APDAKEYHI_EL1 = 104, APDBKEYLO_EL1 = 105, APDBKEYHI_EL1 = 106, APGAKEYLO_EL1 = 107, APGAKEYHI_EL1 = 108, ELR_EL1 = 109, SP_EL1 = 110, SPSR_EL1 = 111, CNTVOFF_EL2 = 112, CNTV_CVAL_EL0 = 113, CNTV_CTL_EL0 = 114, CNTP_CVAL_EL0 = 115, CNTP_CTL_EL0 = 116, RGSR_EL1 = 117, GCR_EL1 = 118, TFSR_EL1 = 119, TFSRE0_EL1 = 120, PIR_EL1 = 121, PIRE0_EL1 = 122, DACR32_EL2 = 123, IFSR32_EL2 = 124, FPEXC32_EL2 = 125, DBGVCR32_EL2 = 126, VPIDR_EL2 = 127, VMPIDR_EL2 = 128, SCTLR_EL2 = 129, ACTLR_EL2 = 130, HCR_EL2 = 131, MDCR_EL2 = 132, CPTR_EL2 = 133, HSTR_EL2 = 134, HACR_EL2 = 135, HCRX_EL2 = 136, TTBR0_EL2 = 137, TTBR1_EL2 = 138, TCR_EL2 = 139, VTTBR_EL2 = 140, VTCR_EL2 = 141, SPSR_EL2 = 142, ELR_EL2 = 143, AFSR0_EL2 = 144, AFSR1_EL2 = 145, ESR_EL2 = 146, FAR_EL2 = 147, HPFAR_EL2 = 148, MAIR_EL2 = 149, AMAIR_EL2 = 150, VBAR_EL2 = 151, RVBAR_EL2 = 152, CONTEXTIDR_EL2 = 153, TPIDR_EL2 = 154, CNTHCTL_EL2 = 155, SP_EL2 = 156, HFGRTR_EL2 = 157, HFGWTR_EL2 = 158, HFGITR_EL2 = 159, HDFGRTR_EL2 = 160, HDFGWTR_EL2 = 161, HAFGRTR_EL2 = 162, CNTHP_CTL_EL2 = 163, CNTHP_CVAL_EL2 = 164, CNTHV_CTL_EL2 = 165, CNTHV_CVAL_EL2 = 166, NR_SYS_REGS = 167, }; struct trace_event_raw_kvm_entry { struct trace_entry ent; unsigned long vcpu_pc; char __data[0]; }; struct trace_event_raw_kvm_exit { struct trace_entry ent; int ret; unsigned int esr_ec; unsigned long vcpu_pc; char __data[0]; }; struct trace_event_raw_kvm_guest_fault { struct trace_entry ent; unsigned long vcpu_pc; unsigned long hsr; unsigned long hxfar; unsigned long long ipa; char __data[0]; }; struct trace_event_raw_kvm_access_fault { struct trace_entry ent; unsigned long ipa; char __data[0]; }; struct trace_event_raw_kvm_irq_line { struct trace_entry ent; unsigned int type; int vcpu_idx; int irq_num; int level; char __data[0]; }; struct trace_event_raw_kvm_mmio_emulate { struct trace_entry ent; unsigned long vcpu_pc; unsigned long instr; unsigned long cpsr; char __data[0]; }; struct trace_event_raw_kvm_set_way_flush { struct trace_entry ent; unsigned long vcpu_pc; bool cache; char __data[0]; }; struct trace_event_raw_kvm_toggle_cache { struct trace_entry ent; unsigned long vcpu_pc; bool was; bool now; char __data[0]; }; struct trace_event_raw_kvm_timer_update_irq { struct trace_entry ent; unsigned long vcpu_id; __u32 irq; int level; char __data[0]; }; struct trace_event_raw_kvm_get_timer_map { struct trace_entry ent; unsigned long vcpu_id; int direct_vtimer; int direct_ptimer; int emul_vtimer; int emul_ptimer; char __data[0]; }; struct trace_event_raw_kvm_timer_save_state { struct trace_entry ent; unsigned long ctl; unsigned long long cval; int timer_idx; char __data[0]; }; struct trace_event_raw_kvm_timer_restore_state { struct trace_entry ent; unsigned long ctl; unsigned long long cval; int timer_idx; char __data[0]; }; struct trace_event_raw_kvm_timer_hrtimer_expire { struct trace_entry ent; int timer_idx; char __data[0]; }; struct trace_event_raw_kvm_timer_emulate { struct trace_entry ent; int timer_idx; bool should_fire; char __data[0]; }; struct trace_event_raw_kvm_nested_eret { struct trace_entry ent; struct kvm_vcpu *vcpu; unsigned long elr_el2; unsigned long spsr_el2; unsigned long target_mode; unsigned long hcr_el2; char __data[0]; }; struct trace_event_raw_kvm_inject_nested_exception { struct trace_entry ent; struct kvm_vcpu *vcpu; unsigned long esr_el2; int type; unsigned long spsr_el2; unsigned long pc; unsigned long source_mode; unsigned long hcr_el2; char __data[0]; }; struct trace_event_raw_kvm_forward_sysreg_trap { struct trace_entry ent; u64 pc; u32 sysreg; bool is_read; char __data[0]; }; struct kvm_host_data { struct kvm_cpu_context host_ctxt; }; struct kvm_nvhe_init_params { unsigned long mair_el2; unsigned long tcr_el2; unsigned long tpidr_el2; unsigned long stack_hyp_va; unsigned long stack_pa; phys_addr_t pgd_pa; unsigned long hcr_el2; unsigned long hfgwtr_el2; unsigned long vttbr; unsigned long vtcr; }; struct kvm_vcpu_init { __u32 target; __u32 features[7]; }; struct kvm_vcpu_events { struct { __u8 serror_pending; __u8 serror_has_esr; __u8 ext_dabt_pending; __u8 pad[5]; __u64 serror_esr; } exception; __u32 reserved[12]; }; struct kvm_one_reg { __u64 id; __u64 addr; }; struct kvm_arm_device_addr { __u64 id; __u64 addr; }; struct kvm_arm_counter_offset { __u64 counter_offset; __u64 reserved; }; struct kvm_arm_copy_mte_tags { __u64 guest_ipa; __u64 length; void __attribute__((btf_type_tag("user"))) *addr; __u64 flags; __u64 reserved[2]; }; struct psci_0_1_function_ids { u32 cpu_suspend; u32 cpu_on; u32 cpu_off; u32 migrate; }; struct trace_event_data_offsets_kvm_entry {}; struct trace_event_data_offsets_kvm_exit {}; struct trace_event_data_offsets_kvm_guest_fault {}; struct trace_event_data_offsets_kvm_access_fault {}; struct trace_event_data_offsets_kvm_irq_line {}; struct trace_event_data_offsets_kvm_mmio_emulate {}; struct trace_event_data_offsets_kvm_set_way_flush {}; struct trace_event_data_offsets_kvm_toggle_cache {}; struct trace_event_data_offsets_kvm_timer_update_irq {}; struct trace_event_data_offsets_kvm_get_timer_map {}; struct trace_event_data_offsets_kvm_timer_save_state {}; struct trace_event_data_offsets_kvm_timer_restore_state {}; struct trace_event_data_offsets_kvm_timer_hrtimer_expire {}; struct trace_event_data_offsets_kvm_timer_emulate {}; struct trace_event_data_offsets_kvm_nested_eret {}; struct trace_event_data_offsets_kvm_inject_nested_exception {}; struct trace_event_data_offsets_kvm_forward_sysreg_trap {}; struct kvm_reg_list { __u64 n; __u64 reg[0]; }; enum kvm_pgtable_walk_flags { KVM_PGTABLE_WALK_LEAF = 1, KVM_PGTABLE_WALK_TABLE_PRE = 2, KVM_PGTABLE_WALK_TABLE_POST = 4, KVM_PGTABLE_WALK_SHARED = 8, KVM_PGTABLE_WALK_HANDLE_FAULT = 16, KVM_PGTABLE_WALK_SKIP_BBM_TLBI = 32, KVM_PGTABLE_WALK_SKIP_CMO = 64, }; struct hyp_shared_pfn { u64 pfn; int count; struct rb_node node; }; struct kvm_pinned_page { struct page *page; u64 ipa; u8 order; u16 pins; }; typedef void (*rcu_callback_t)(struct callback_head *); enum kvm_smccc_filter_action { KVM_SMCCC_FILTER_HANDLE = 0, KVM_SMCCC_FILTER_DENY = 1, KVM_SMCCC_FILTER_FWD_TO_USER = 2, NR_SMCCC_FILTER_ACTIONS = 3, }; enum { KVM_REG_ARM_STD_HYP_BIT_PV_TIME = 0, KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT = 1, }; enum { KVM_REG_ARM_STD_BIT_TRNG_V1_0 = 0, KVM_REG_ARM_STD_BMAP_BIT_COUNT = 1, }; enum { KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0, KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1, KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT = 2, }; enum clocksource_ids { CSID_GENERIC = 0, CSID_ARM_ARCH_COUNTER = 1, CSID_MAX = 2, }; struct system_time_snapshot { u64 cycles; ktime_t real; ktime_t boot; ktime_t raw; enum clocksource_ids cs_id; unsigned int clock_was_set_seq; u8 cs_was_changed_seq; u32 mono_shift; u32 mono_mult; }; struct kvm_smccc_filter { __u32 base; __u32 nr_functions; __u8 action; __u8 pad[15]; }; typedef void (*btf_trace_kvm_wfx_arm64)(void *, unsigned long, bool); typedef void (*btf_trace_kvm_hvc_arm64)(void *, unsigned long, unsigned long, unsigned long); typedef void (*btf_trace_kvm_arm_setup_debug)(void *, struct kvm_vcpu *, __u32); typedef void (*btf_trace_kvm_arm_clear_debug)(void *, __u32); typedef void (*btf_trace_kvm_arm_set_dreg32)(void *, const char *, __u64); typedef void (*btf_trace_kvm_arm_set_regset)(void *, const char *, int, __u64 *, __u64 *); typedef void (*btf_trace_trap_reg)(void *, const char *, int, bool, u64); typedef void (*btf_trace_kvm_handle_sys_reg)(void *, unsigned long); struct sys_reg_params; struct sys_reg_desc; typedef void (*btf_trace_kvm_sys_access)(void *, unsigned long, struct sys_reg_params *, const struct sys_reg_desc *); struct sys_reg_params { u8 Op0; u8 Op1; u8 CRn; u8 CRm; u8 Op2; u64 regval; bool is_write; }; struct sys_reg_desc { const char *name; enum { AA32_DIRECT = 0, AA32_LO = 1, AA32_HI = 2, } aarch32_map; u8 Op0; u8 Op1; u8 CRn; u8 CRm; u8 Op2; bool (*access)(struct kvm_vcpu *, struct sys_reg_params *, const struct sys_reg_desc *); u64 (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *); int reg; u64 val; int (*__get_user)(struct kvm_vcpu *, const struct sys_reg_desc *, u64 *); int (*set_user)(struct kvm_vcpu *, const struct sys_reg_desc *, u64); unsigned int (*visibility)(const struct kvm_vcpu *, const struct sys_reg_desc *); }; typedef void (*btf_trace_kvm_set_guest_debug)(void *, struct kvm_vcpu *, __u32); typedef int (*exit_handle_fn)(struct kvm_vcpu *); struct trace_event_raw_kvm_wfx_arm64 { struct trace_entry ent; unsigned long vcpu_pc; bool is_wfe; char __data[0]; }; struct trace_event_raw_kvm_hvc_arm64 { struct trace_entry ent; unsigned long vcpu_pc; unsigned long r0; unsigned long imm; char __data[0]; }; struct trace_event_raw_kvm_arm_setup_debug { struct trace_entry ent; struct kvm_vcpu *vcpu; __u32 guest_debug; char __data[0]; }; struct trace_event_raw_kvm_arm_clear_debug { struct trace_entry ent; __u32 guest_debug; char __data[0]; }; struct trace_event_raw_kvm_arm_set_dreg32 { struct trace_entry ent; const char *name; __u64 value; char __data[0]; }; struct trace_event_raw_kvm_arm_set_regset { struct trace_entry ent; const char *name; int len; u64 ctrls[16]; u64 values[16]; char __data[0]; }; struct trace_event_raw_trap_reg { struct trace_entry ent; const char *fn; int reg; bool is_write; u64 write_value; char __data[0]; }; struct trace_event_raw_kvm_handle_sys_reg { struct trace_entry ent; unsigned long hsr; char __data[0]; }; struct trace_event_raw_kvm_sys_access { struct trace_entry ent; unsigned long vcpu_pc; bool is_write; const char *name; u8 Op0; u8 Op1; u8 CRn; u8 CRm; u8 Op2; char __data[0]; }; struct trace_event_raw_kvm_set_guest_debug { struct trace_entry ent; struct kvm_vcpu *vcpu; __u32 guest_debug; char __data[0]; }; struct trace_event_data_offsets_kvm_wfx_arm64 {}; struct trace_event_data_offsets_kvm_hvc_arm64 {}; struct trace_event_data_offsets_kvm_arm_setup_debug {}; struct trace_event_data_offsets_kvm_arm_clear_debug {}; struct trace_event_data_offsets_kvm_arm_set_dreg32 {}; struct trace_event_data_offsets_kvm_arm_set_regset {}; struct trace_event_data_offsets_trap_reg {}; struct trace_event_data_offsets_kvm_handle_sys_reg {}; struct trace_event_data_offsets_kvm_sys_access {}; struct trace_event_data_offsets_kvm_set_guest_debug {}; struct sve_state_reg_region { unsigned int koffset; unsigned int klen; unsigned int upad; }; enum kvm_arch_timers { TIMER_PTIMER = 0, TIMER_VTIMER = 1, NR_KVM_EL0_TIMERS = 2, TIMER_HVTIMER = 2, TIMER_HPTIMER = 3, NR_KVM_TIMERS = 4, }; enum kvm_arch_timer_regs { TIMER_REG_CNT = 0, TIMER_REG_CVAL = 1, TIMER_REG_TVAL = 2, TIMER_REG_CTL = 3, TIMER_REG_VOFF = 4, }; enum vgic_type { VGIC_V2 = 0, VGIC_V3 = 1, }; struct __va_list { void *__stack; void *__gr_top; void *__vr_top; int __gr_offs; int __vr_offs; }; typedef __builtin_va_list va_list; struct kvm_nvhe_stacktrace_info { unsigned long stack_base; unsigned long overflow_stack_base; unsigned long fp; unsigned long pc; }; struct vgic_vmcr { u32 grpen0; u32 grpen1; u32 ackctl; u32 fiqen; u32 cbpr; u32 eoim; u32 abpr; u32 bpr; u32 pmr; }; enum pkvm_moveable_reg_type { PKVM_MREG_MEMORY = 0, PKVM_MREG_PROTECTED_RANGE = 1, }; struct pkvm_moveable_reg { phys_addr_t start; u64 size; enum pkvm_moveable_reg_type type; }; struct reserved_mem_ops; struct reserved_mem { const char *name; unsigned long fdt_node; unsigned long phandle; const struct reserved_mem_ops *ops; phys_addr_t base; phys_addr_t size; void *priv; }; struct reserved_mem_ops { int (*device_init)(struct reserved_mem *, struct device *); void (*device_release)(struct reserved_mem *, struct device *); }; struct pkvm_mod_sec_mapping { struct pkvm_module_section *sec; enum kvm_pgtable_prot prot; }; struct hyp_event_id { unsigned short id; void *data; }; struct kvm_protected_vm_info { __u64 firmware_size; __u64 __reserved[7]; }; struct cyclecounter; struct timecounter { const struct cyclecounter *cc; u64 cycle_last; u64 nsec; u64 mask; u64 frac; }; struct cyclecounter { u64 (*read)(const struct cyclecounter *); u64 mask; u32 mult; u32 shift; }; enum irq_domain_bus_token { DOMAIN_BUS_ANY = 0, DOMAIN_BUS_WIRED = 1, DOMAIN_BUS_GENERIC_MSI = 2, DOMAIN_BUS_PCI_MSI = 3, DOMAIN_BUS_PLATFORM_MSI = 4, DOMAIN_BUS_NEXUS = 5, DOMAIN_BUS_IPI = 6, DOMAIN_BUS_FSL_MC_MSI = 7, DOMAIN_BUS_TI_SCI_INTA_MSI = 8, DOMAIN_BUS_WAKEUP = 9, DOMAIN_BUS_VMD_MSI = 10, DOMAIN_BUS_PCI_DEVICE_MSI = 11, DOMAIN_BUS_PCI_DEVICE_MSIX = 12, DOMAIN_BUS_DMAR = 13, DOMAIN_BUS_AMDVI = 14, DOMAIN_BUS_PCI_DEVICE_IMS = 15, }; struct irq_fwspec; struct irq_domain_ops { int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token); int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token); int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t); void (*unmap)(struct irq_domain *, unsigned int); int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, unsigned long *, unsigned int *); int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); void (*free)(struct irq_domain *, unsigned int, unsigned int); int (*activate)(struct irq_domain *, struct irq_data *, bool); void (*deactivate)(struct irq_domain *, struct irq_data *); int (*translate)(struct irq_domain *, struct irq_fwspec *, unsigned long *, unsigned int *); }; struct irq_domain_chip_generic; struct msi_parent_ops; struct irq_domain { struct list_head link; const char *name; const struct irq_domain_ops *ops; void *host_data; unsigned int flags; unsigned int mapcount; struct mutex mutex; struct irq_domain *root; struct fwnode_handle *fwnode; enum irq_domain_bus_token bus_token; struct irq_domain_chip_generic *gc; struct device *dev; struct device *pm_dev; struct irq_domain *parent; const struct msi_parent_ops *msi_parent_ops; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; irq_hw_number_t hwirq_max; unsigned int revmap_size; struct xarray revmap_tree; struct irq_data __attribute__((btf_type_tag("rcu"))) *revmap[0]; }; enum irq_gc_flags { IRQ_GC_INIT_MASK_CACHE = 1, IRQ_GC_INIT_NESTED_LOCK = 2, IRQ_GC_MASK_CACHE_PER_TYPE = 4, IRQ_GC_NO_MASK = 8, IRQ_GC_BE_IO = 16, }; struct irq_chip_generic; struct irq_domain_chip_generic { unsigned int irqs_per_chip; unsigned int num_chips; unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; struct irq_chip_generic *gc[0]; }; struct irq_chip_regs { unsigned long enable; unsigned long disable; unsigned long mask; unsigned long ack; unsigned long eoi; unsigned long type; unsigned long polarity; }; struct irq_chip_type { struct irq_chip chip; struct irq_chip_regs regs; irq_flow_handler_t handler; u32 type; u32 mask_cache_priv; u32 *mask_cache; }; struct irq_chip_generic { raw_spinlock_t lock; void *reg_base; u32 (*reg_readl)(void *); void (*reg_writel)(u32, void *); void (*suspend)(struct irq_chip_generic *); void (*resume)(struct irq_chip_generic *); unsigned int irq_base; unsigned int irq_cnt; u32 mask_cache; u32 type_cache; u32 polarity_cache; u32 wake_enabled; u32 wake_active; unsigned int num_ct; void *private; unsigned long installed; unsigned long unused; struct irq_domain *domain; struct list_head list; struct irq_chip_type chip_types[0]; }; struct msi_domain_info; struct msi_parent_ops { u32 supported_flags; const char *prefix; bool (*init_dev_msi_info)(struct device *, struct irq_domain *, struct irq_domain *, struct msi_domain_info *); }; struct msi_domain_ops; struct msi_domain_info { u32 flags; enum irq_domain_bus_token bus_token; unsigned int hwsize; struct msi_domain_ops *ops; struct irq_chip *chip; void *chip_data; irq_flow_handler_t handler; void *handler_data; const char *handler_name; void *data; }; struct msi_alloc_info; typedef struct msi_alloc_info msi_alloc_info_t; struct msi_domain_ops { irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *); int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *); void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int); int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *); void (*prepare_desc)(struct irq_domain *, msi_alloc_info_t *, struct msi_desc *); void (*set_desc)(msi_alloc_info_t *, struct msi_desc *); int (*domain_alloc_irqs)(struct irq_domain *, struct device *, int); void (*domain_free_irqs)(struct irq_domain *, struct device *); void (*msi_post_free)(struct irq_domain *, struct device *); }; struct msi_alloc_info { struct msi_desc *desc; irq_hw_number_t hwirq; unsigned long flags; union { unsigned long ul; void *ptr; } scratchpad[2]; }; struct irq_fwspec { struct fwnode_handle *fwnode; int param_count; u32 param[16]; }; enum hrtimer_mode { HRTIMER_MODE_ABS = 0, HRTIMER_MODE_REL = 1, HRTIMER_MODE_PINNED = 2, HRTIMER_MODE_SOFT = 4, HRTIMER_MODE_HARD = 8, HRTIMER_MODE_ABS_PINNED = 2, HRTIMER_MODE_REL_PINNED = 3, HRTIMER_MODE_ABS_SOFT = 4, HRTIMER_MODE_REL_SOFT = 5, HRTIMER_MODE_ABS_PINNED_SOFT = 6, HRTIMER_MODE_REL_PINNED_SOFT = 7, HRTIMER_MODE_ABS_HARD = 8, HRTIMER_MODE_REL_HARD = 9, HRTIMER_MODE_ABS_PINNED_HARD = 10, HRTIMER_MODE_REL_PINNED_HARD = 11, }; enum { IRQD_TRIGGER_MASK = 15, IRQD_SETAFFINITY_PENDING = 256, IRQD_ACTIVATED = 512, IRQD_NO_BALANCING = 1024, IRQD_PER_CPU = 2048, IRQD_AFFINITY_SET = 4096, IRQD_LEVEL = 8192, IRQD_WAKEUP_STATE = 16384, IRQD_MOVE_PCNTXT = 32768, IRQD_IRQ_DISABLED = 65536, IRQD_IRQ_MASKED = 131072, IRQD_IRQ_INPROGRESS = 262144, IRQD_WAKEUP_ARMED = 524288, IRQD_FORWARDED_TO_VCPU = 1048576, IRQD_AFFINITY_MANAGED = 2097152, IRQD_IRQ_STARTED = 4194304, IRQD_MANAGED_SHUTDOWN = 8388608, IRQD_SINGLE_TARGET = 16777216, IRQD_DEFAULT_TRIGGER_SET = 33554432, IRQD_CAN_RESERVE = 67108864, IRQD_HANDLE_ENFORCE_IRQCTX = 134217728, IRQD_AFFINITY_ON_ACTIVATE = 268435456, IRQD_IRQ_ENABLED_ON_SUSPEND = 536870912, IRQD_RESEND_WHEN_IN_PROGRESS = 1073741824, }; enum { IRQCHIP_FWNODE_REAL = 0, IRQCHIP_FWNODE_NAMED = 1, IRQCHIP_FWNODE_NAMED_ID = 2, }; struct arch_timer_kvm_info { struct timecounter timecounter; int virtual_irq; int physical_irq; }; union trap_config { u64 val; struct { unsigned long cgt: 10; unsigned long fgt: 4; unsigned long bit: 6; unsigned long pol: 1; unsigned long fgf: 5; unsigned long unused: 37; unsigned long mbz: 1; }; }; struct encoding_to_trap_config { const u32 encoding; const u32 end; const union trap_config tc; const unsigned int line; }; enum cgt_group_id { __RESERVED__ = 0, CGT_HCR_TID1 = 1, CGT_HCR_TID2 = 2, CGT_HCR_TID3 = 3, CGT_HCR_IMO = 4, CGT_HCR_FMO = 5, CGT_HCR_TIDCP = 6, CGT_HCR_TACR = 7, CGT_HCR_TSW = 8, CGT_HCR_TPC = 9, CGT_HCR_TPU = 10, CGT_HCR_TTLB = 11, CGT_HCR_TVM = 12, CGT_HCR_TDZ = 13, CGT_HCR_TRVM = 14, CGT_HCR_TLOR = 15, CGT_HCR_TERR = 16, CGT_HCR_APK = 17, CGT_HCR_NV = 18, CGT_HCR_NV_nNV2 = 19, CGT_HCR_NV1_nNV2 = 20, CGT_HCR_AT = 21, CGT_HCR_nFIEN = 22, CGT_HCR_TID4 = 23, CGT_HCR_TICAB = 24, CGT_HCR_TOCU = 25, CGT_HCR_ENSCXT = 26, CGT_HCR_TTLBIS = 27, CGT_HCR_TTLBOS = 28, CGT_MDCR_TPMCR = 29, CGT_MDCR_TPM = 30, CGT_MDCR_TDE = 31, CGT_MDCR_TDA = 32, CGT_MDCR_TDOSA = 33, CGT_MDCR_TDRA = 34, CGT_MDCR_E2PB = 35, CGT_MDCR_TPMS = 36, CGT_MDCR_TTRF = 37, CGT_MDCR_E2TB = 38, CGT_MDCR_TDCC = 39, __MULTIPLE_CONTROL_BITS__ = 40, CGT_HCR_IMO_FMO = 40, CGT_HCR_TID2_TID4 = 41, CGT_HCR_TTLB_TTLBIS = 42, CGT_HCR_TTLB_TTLBOS = 43, CGT_HCR_TVM_TRVM = 44, CGT_HCR_TPU_TICAB = 45, CGT_HCR_TPU_TOCU = 46, CGT_HCR_NV1_nNV2_ENSCXT = 47, CGT_MDCR_TPM_TPMCR = 48, CGT_MDCR_TDE_TDA = 49, CGT_MDCR_TDE_TDOSA = 50, CGT_MDCR_TDE_TDRA = 51, CGT_MDCR_TDCC_TDE_TDA = 52, __COMPLEX_CONDITIONS__ = 53, CGT_CNTHCTL_EL1PCTEN = 53, CGT_CNTHCTL_EL1PTEN = 54, __NR_CGT_GROUP_IDS__ = 55, }; enum trap_behaviour { BEHAVE_HANDLE_LOCALLY = 0, BEHAVE_FORWARD_READ = 1, BEHAVE_FORWARD_WRITE = 2, BEHAVE_FORWARD_ANY = 3, }; struct trap_bits { const enum vcpu_sysreg index; const enum trap_behaviour behaviour; const u64 value; const u64 mask; }; typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *); enum fgt_group_id { __NO_FGT_GROUP__ = 0, HFGxTR_GROUP = 1, HDFGRTR_GROUP = 2, HDFGWTR_GROUP = 3, HFGITR_GROUP = 4, HAFGRTR_GROUP = 5, __NR_FGT_GROUP_IDS__ = 6, }; enum fg_filter_id { __NO_FGF__ = 0, HCRX_FGTnXS = 1, __NR_FG_FILTER_IDS__ = 2, }; enum exception_type { except_type_sync = 0, except_type_irq = 128, except_type_fiq = 256, except_type_serror = 384, }; typedef void (*btf_trace_vgic_update_irq_pending)(void *, unsigned long, __u32, bool); struct vgic_global { enum vgic_type type; phys_addr_t vcpu_base; void *vcpu_base_va; void *vcpu_hyp_va; void *vctrl_base; void *vctrl_hyp; int nr_lr; unsigned int maint_irq; int max_gic_vcpus; bool can_emulate_gicv2; bool has_gicv4; bool has_gicv4_1; bool no_hw_deactivation; struct static_key_false gicv3_cpuif; u32 ich_vtr_el2; }; struct trace_event_raw_vgic_update_irq_pending { struct trace_entry ent; unsigned long vcpu_id; __u32 irq; bool level; char __data[0]; }; typedef int (*list_cmp_func_t)(void *, const struct list_head *, const struct list_head *); struct trace_event_data_offsets_vgic_update_irq_pending {}; enum gic_type { GIC_V2 = 0, GIC_V3 = 1, }; struct gic_kvm_info { enum gic_type type; struct resource vcpu; unsigned int maint_irq; bool no_maint_irq_mask; struct resource vctrl; bool has_v4; bool has_v4_1; bool no_hw_deactivation; }; struct its_vlpi_map { struct its_vm *vm; struct its_vpe *vpe; u32 vintid; u8 properties; bool db_enabled; }; struct vgic_reg_attr { struct kvm_vcpu *vcpu; gpa_t addr; }; struct vgic_its_abi { int cte_esz; int dte_esz; int ite_esz; int (*save_tables)(struct vgic_its *); int (*restore_tables)(struct vgic_its *); int (*commit)(struct vgic_its *); }; struct vgic_translation_cache_entry { struct list_head entry; phys_addr_t db; u32 devid; u32 eventid; struct vgic_irq *irq; }; struct its_device { struct list_head dev_list; struct list_head itt_head; u32 num_eventid_bits; gpa_t itt_addr; u32 device_id; }; struct its_collection; struct its_ite { struct list_head ite_list; struct vgic_irq *irq; struct its_collection *collection; u32 event_id; }; struct its_collection { struct list_head coll_list; u32 collection_id; u32 target_addr; }; typedef int (*entry_fn_t)(struct vgic_its *, u32, void *, void *); struct vgic_state_iter { int nr_cpus; int nr_spis; int nr_lpis; int dist_id; int vcpu_id; int intid; int lpi_idx; u32 *lpi_array; }; struct kvm_iommu_driver { int (*init_driver)(); void (*remove_driver)(); pkvm_handle_t (*get_iommu_id)(struct device *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; u64 android_kabi_reserved5; u64 android_kabi_reserved6; u64 android_kabi_reserved7; u64 android_kabi_reserved8; }; enum kvm_power_domain_type { KVM_POWER_DOMAIN_NONE = 0, KVM_POWER_DOMAIN_HOST_HVC = 1, }; struct kvm_power_domain { enum kvm_power_domain_type type; union { u64 device_id; }; }; struct kvm_hyp_iommu { struct kvm_power_domain power_domain; u32 lock; bool power_is_off; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct kvm_hyp_iommu_domain { atomic_t refs; pkvm_handle_t domain_id; void *priv; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct pmu_hw_events; struct platform_device; struct arm_pmu { struct pmu pmu; cpumask_t supported_cpus; char *name; int pmuver; irqreturn_t (*handle_irq)(struct arm_pmu *); void (*enable)(struct perf_event *); void (*disable)(struct perf_event *); int (*get_event_idx)(struct pmu_hw_events *, struct perf_event *); void (*clear_event_idx)(struct pmu_hw_events *, struct perf_event *); int (*set_event_filter)(struct hw_perf_event *, struct perf_event_attr *); u64 (*read_counter)(struct perf_event *); void (*write_counter)(struct perf_event *, u64); void (*start)(struct arm_pmu *); void (*stop)(struct arm_pmu *); void (*reset)(void *); int (*map_event)(struct perf_event *); int num_events; bool secure_access; unsigned long pmceid_bitmap[1]; unsigned long pmceid_ext_bitmap[1]; struct platform_device *plat_device; struct pmu_hw_events __attribute__((btf_type_tag("percpu"))) *hw_events; struct hlist_node node; struct notifier_block cpu_pm_nb; const struct attribute_group *attr_groups[5]; u64 reg_pmmir; unsigned long acpi_cpuid; }; struct pmu_hw_events { struct perf_event *events[32]; unsigned long used_mask[1]; raw_spinlock_t pmu_lock; struct arm_pmu *percpu_pmu; int irq; }; struct pdev_archdata {}; struct platform_device_id; struct mfd_cell; struct platform_device { const char *name; int id; bool id_auto; struct device dev; u64 platform_dma_mask; struct device_dma_parameters dma_parms; u32 num_resources; struct resource *resource; const struct platform_device_id *id_entry; const char *driver_override; struct mfd_cell *mfd_cell; struct pdev_archdata archdata; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct platform_device_id { char name[20]; kernel_ulong_t driver_data; }; struct kvm_pmu_event_filter { __u16 base_event; __u16 nevents; __u8 action; __u8 pad[3]; }; struct arm_pmu_entry { struct list_head entry; struct arm_pmu *arm_pmu; }; struct kvm_pgtable_snapshot { struct kvm_pgtable pgtable; struct kvm_hyp_memcache mc; void *pgd_hva; size_t pgd_pages; phys_addr_t *used_pages_hva; size_t num_used_pages; size_t used_pages_idx; }; struct kvm_pgtable_visit_ctx; typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *, enum kvm_pgtable_walk_flags); struct kvm_pgtable_walker { const kvm_pgtable_visitor_fn_t cb; void * const arg; const enum kvm_pgtable_walk_flags flags; }; struct kvm_pgtable_visit_ctx { kvm_pte_t *ptep; kvm_pte_t old; void *arg; struct kvm_pgtable_mm_ops *mm_ops; u64 start; struct kvm_pgtable_pte_ops *pte_ops; u64 addr; u64 end; u32 level; enum kvm_pgtable_walk_flags flags; }; struct kvm_ptdump_guest_state { union { struct kvm *kvm; struct kvm_pgtable_snapshot *snap; }; struct pg_state parser_state; struct addr_marker ipa_marker[2]; struct pg_level level[4]; struct ptdump_range range[2]; }; struct hyp_event_table; struct hyp_event_mod_tables { struct hyp_event_table *tables; unsigned long nr_tables; }; struct hyp_event_table { struct hyp_event *start; unsigned long nr_events; }; struct trace_hyp_format_hyp_enter { struct hyp_entry_hdr hdr; }; struct trace_hyp_format_hyp_exit { struct hyp_entry_hdr hdr; }; struct trace_hyp_format_host_hcall { struct hyp_entry_hdr hdr; unsigned int id; u8 invalid; } __attribute__((packed)); struct trace_hyp_format_host_smc { struct hyp_entry_hdr hdr; u64 id; u8 forwarded; } __attribute__((packed)); struct trace_hyp_format_host_mem_abort { struct hyp_entry_hdr hdr; u64 esr; u64 addr; } __attribute__((packed)); struct trace_hyp_format___hyp_printk { struct hyp_entry_hdr hdr; u8 fmt_id; u64 a; u64 b; u64 c; u64 d; } __attribute__((packed)); struct trace_hyp_format_host_ffa_call { struct hyp_entry_hdr hdr; u64 func_id; u64 res_a1; u64 res_a2; u64 res_a3; u64 res_a4; int handled; int err; } __attribute__((packed)); struct trace_hyp_format_psci_mem_protect { struct hyp_entry_hdr hdr; u64 count; u64 was; } __attribute__((packed)); struct trace_hyp_format_iommu_idmap { struct hyp_entry_hdr hdr; u64 from; u64 to; int prot; } __attribute__((packed)); struct trace_page_desc; struct ring_buffer_writer { struct trace_page_desc *pdesc; int (*get_reader_page)(int); }; struct hyp_trace_desc; struct hyp_trace_buffer { struct hyp_trace_desc *desc; struct ring_buffer_writer writer; struct trace_buffer *trace_buffer; size_t desc_size; bool tracing_on; int nr_readers; struct mutex lock; struct ht_iterator *printk_iter; bool printk_on; }; struct kvm_nvhe_clock_data { u32 mult; u32 shift; u64 epoch_ns; u64 epoch_cyc; }; struct trace_page_desc { int nr_cpus; char __data[0]; }; struct hyp_trace_desc { struct kvm_nvhe_clock_data clock_data; struct trace_page_desc page_desc; }; enum ring_buffer_flags { RB_FL_OVERWRITE = 1, }; struct rb_page_desc { int cpu; int nr_page_va; unsigned long meta_va; unsigned long page_va[0]; }; struct kvm_vcpu___2; struct kvm_cpu_context___2 { struct user_pt_regs regs; u64 spsr_abt; u64 spsr_und; u64 spsr_irq; u64 spsr_fiq; struct user_fpsimd_state fp_regs; u64 sys_regs[167]; struct kvm_vcpu___2 *__hyp_running_vcpu; }; struct vgic_irq___2 { raw_spinlock_t irq_lock; struct list_head lpi_list; struct list_head ap_list; struct kvm_vcpu___2 *vcpu; struct kvm_vcpu___2 *target_vcpu; u32 intid; bool line_level; bool pending_latch; bool active; bool enabled; bool hw; struct kref refcount; u32 hwintid; unsigned int host_irq; union { u8 targets; u32 mpidr; }; u8 source; u8 active_source; u8 priority; u8 group; enum vgic_irq_config config; struct irq_ops *ops; void *owner; }; struct kvm_io_device_ops___2; struct kvm_io_device___2 { const struct kvm_io_device_ops___2 *ops; }; struct vgic_its___2; struct vgic_io_device___2 { gpa_t base_addr; union { struct kvm_vcpu___2 *redist_vcpu; struct vgic_its___2 *its; }; const struct vgic_register_region *regions; enum iodev_type iodev_type; int nr_regions; struct kvm_io_device___2 dev; }; struct vgic_cpu___2 { union { struct vgic_v2_cpu_if vgic_v2; struct vgic_v3_cpu_if vgic_v3; }; struct vgic_irq___2 private_irqs[32]; raw_spinlock_t ap_list_lock; struct list_head ap_list_head; struct vgic_io_device___2 rd_iodev; struct vgic_redist_region *rdreg; u32 rdreg_index; atomic_t syncr_busy; u64 pendbaser; atomic_t ctlr; u32 num_pri_bits; u32 num_id_bits; }; struct arch_timer_context___2 { struct kvm_vcpu___2 *vcpu; struct hrtimer hrtimer; u64 ns_frac; struct arch_timer_offset offset; bool loaded; struct { bool level; } irq; u32 host_timer_irq; }; struct arch_timer_cpu___2 { struct arch_timer_context___2 timers[4]; struct hrtimer bg_timer; bool enabled; }; struct kvm_s2_mmu___2; struct kvm_vcpu_arch___2 { struct kvm_cpu_context___2 ctxt; void *sve_state; enum fp_type fp_type; unsigned int sve_max_vl; u64 svcr; struct kvm_s2_mmu___2 *hw_mmu; u64 hcr_el2; u64 mdcr_el2; u64 cptr_el2; u64 mdcr_el2_host; struct kvm_vcpu_fault_info fault; enum { FP_STATE_FREE___2 = 0, FP_STATE_HOST_OWNED___2 = 1, FP_STATE_GUEST_OWNED___2 = 2, } fp_state; u8 cflags; u8 iflags; u8 sflags; bool pause; struct kvm_guest_debug_arch *debug_ptr; struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; struct user_fpsimd_state *host_fpsimd_state; struct { struct kvm_guest_debug_arch regs; u64 pmscr_el1; u64 trfcr_el1; } host_debug_state; struct vgic_cpu___2 vgic_cpu; struct arch_timer_cpu___2 timer_cpu; struct kvm_pmu pmu; struct { u32 mdscr_el1; bool pstate_ss; } guest_debug_preserved; struct kvm_mp_state mp_state; spinlock_t mp_state_lock; union { struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_hyp_memcache stage2_mc; }; unsigned long features[1]; u64 vsesr_el2; struct vcpu_reset_state reset_state; struct { u64 last_steal; gpa_t base; } steal; u32 *ccsidr; struct kvm_hyp_req *hyp_reqs; }; struct kvm___2; struct kvm_vcpu___2 { struct kvm___2 *kvm; struct preempt_notifier preempt_notifier; int cpu; int vcpu_id; int vcpu_idx; int ____srcu_idx; int mode; u64 requests; unsigned long guest_debug; struct mutex mutex; struct kvm_run *run; struct rcuwait wait; struct pid __attribute__((btf_type_tag("rcu"))) *pid; int sigset_active; sigset_t sigset; unsigned int halt_poll_ns; bool valid_wakeup; int mmio_needed; int mmio_read_completed; int mmio_is_write; int mmio_cur_fragment; int mmio_nr_fragments; struct kvm_mmio_fragment mmio_fragments[2]; struct { bool in_spin_loop; bool dy_eligible; } spin_loop; bool preempted; bool ready; long: 64; struct kvm_vcpu_arch___2 arch; struct kvm_vcpu_stat stat; char stats_id[48]; struct kvm_dirty_ring dirty_ring; struct kvm_memory_slot *last_used_slot; u64 last_used_slot_gen; long: 64; }; struct kvm_pgtable___2; struct kvm_arch___2; struct kvm_s2_mmu___2 { struct kvm_vmid vmid; phys_addr_t pgd_phys; struct kvm_pgtable___2 *pgt; int __attribute__((btf_type_tag("percpu"))) *last_vcpu_ran; struct kvm_mmu_memory_cache split_page_cache; uint64_t split_page_chunk_size; struct kvm_arch___2 *arch; }; struct vgic_dist___2 { bool in_kernel; bool ready; bool initialized; u32 vgic_model; u32 implementation_rev; bool v2_groups_user_writable; bool msis_require_devid; int nr_spis; gpa_t vgic_dist_base; union { gpa_t vgic_cpu_base; struct list_head rd_regions; }; bool enabled; bool nassgireq; struct vgic_irq___2 *spis; struct vgic_io_device___2 dist_iodev; bool has_its; bool table_write_in_progress; u64 propbaser; raw_spinlock_t lpi_list_lock; struct list_head lpi_list_head; int lpi_list_count; struct list_head lpi_translation_cache; struct vgic_state_iter *iter; struct its_vm its_vm; }; struct kvm_arch___2 { struct kvm_s2_mmu___2 mmu; u64 vtcr; struct vgic_dist___2 vgic; struct arch_timer_vm_data timer_data; u32 psci_version; struct mutex config_lock; unsigned long flags; unsigned long vcpu_features[1]; unsigned long *pmu_filter; struct arm_pmu *arm_pmu; cpumask_var_t supported_cpus; struct kvm_smccc_features smccc_feat; struct maple_tree smccc_filter; u64 id_regs[56]; struct kvm_protected_vm pkvm; }; struct kvm_io_bus___2; struct kvm_stat_data___2; struct kvm___2 { rwlock_t mmu_lock; struct mutex slots_lock; struct mutex slots_arch_lock; struct mm_struct *mm; unsigned long nr_memslot_pages; struct kvm_memslots __memslots[2]; struct kvm_memslots __attribute__((btf_type_tag("rcu"))) *memslots[1]; struct xarray vcpu_array; atomic_t nr_memslots_dirty_logging; spinlock_t mn_invalidate_lock; unsigned long mn_active_invalidate_count; struct rcuwait mn_memslots_update_rcuwait; spinlock_t gpc_lock; struct list_head gpc_list; atomic_t online_vcpus; int max_vcpus; int created_vcpus; int last_boosted_vcpu; struct list_head vm_list; struct mutex lock; struct kvm_io_bus___2 __attribute__((btf_type_tag("rcu"))) *buses[4]; struct { spinlock_t lock; struct list_head items; struct list_head resampler_list; struct mutex resampler_lock; } irqfds; struct list_head ioeventfds; struct kvm_vm_stat stat; struct kvm_arch___2 arch; refcount_t users_count; struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; spinlock_t ring_lock; struct list_head coalesced_zones; struct mutex irq_lock; struct kvm_irq_routing_table __attribute__((btf_type_tag("rcu"))) *irq_routing; struct hlist_head irq_ack_notifier_list; struct mmu_notifier mmu_notifier; unsigned long mmu_invalidate_seq; long mmu_invalidate_in_progress; unsigned long mmu_invalidate_range_start; unsigned long mmu_invalidate_range_end; struct list_head devices; u64 manual_dirty_log_protect; struct dentry *debugfs_dentry; struct kvm_stat_data___2 **debugfs_stat_data; struct srcu_struct srcu; struct srcu_struct irq_srcu; pid_t userspace_pid; bool override_halt_poll_ns; unsigned int max_halt_poll_ns; u32 dirty_ring_size; bool dirty_ring_with_bitmap; bool vm_bugged; bool vm_dead; char stats_id[48]; }; struct kvm_io_range___2 { gpa_t addr; int len; struct kvm_io_device___2 *dev; }; struct kvm_io_bus___2 { int dev_count; int ioeventfd_count; struct kvm_io_range___2 range[0]; }; struct kvm_io_device_ops___2 { int (*read)(struct kvm_vcpu___2 *, struct kvm_io_device___2 *, gpa_t, int, void *); int (*write)(struct kvm_vcpu___2 *, struct kvm_io_device___2 *, gpa_t, int, const void *); void (*destructor)(struct kvm_io_device___2 *); }; typedef kvm_pte_t *kvm_pteref_t___2; struct kvm_pgtable___2 { u32 ia_bits; u32 start_level; kvm_pteref_t___2 pgd; struct kvm_pgtable_mm_ops *mm_ops; struct kvm_s2_mmu___2 *mmu; enum kvm_pgtable_stage2_flags flags; struct kvm_pgtable_pte_ops *pte_ops; }; struct kvm_device___2; struct vgic_its___2 { gpa_t vgic_its_base; bool enabled; struct vgic_io_device___2 iodev; struct kvm_device___2 *dev; u64 baser_device_table; u64 baser_coll_table; struct mutex cmd_lock; u64 cbaser; u32 creadr; u32 cwriter; u32 abi_rev; struct mutex its_lock; struct list_head device_list; struct list_head collection_list; }; struct kvm_device_ops___2; struct kvm_device___2 { const struct kvm_device_ops___2 *ops; struct kvm___2 *kvm; void *private; struct list_head vm_node; }; struct kvm_device_ops___2 { const char *name; int (*create)(struct kvm_device___2 *, u32); void (*init)(struct kvm_device___2 *); void (*destroy)(struct kvm_device___2 *); void (*release)(struct kvm_device___2 *); int (*set_attr)(struct kvm_device___2 *, struct kvm_device_attr *); int (*get_attr)(struct kvm_device___2 *, struct kvm_device_attr *); int (*has_attr)(struct kvm_device___2 *, struct kvm_device_attr *); long (*ioctl)(struct kvm_device___2 *, unsigned int, unsigned long); int (*mmap)(struct kvm_device___2 *, struct vm_area_struct *); }; struct kvm_stat_data___2 { struct kvm___2 *kvm; const struct _kvm_stats_desc *desc; enum kvm_stat_kind kind; }; struct kvm_host_data___2 { struct kvm_cpu_context___2 host_ctxt; }; typedef bool (*exit_handler_fn)(struct kvm_vcpu___2 *, u64 *); struct timer_map___2 { struct arch_timer_context___2 *direct_vtimer; struct arch_timer_context___2 *direct_ptimer; struct arch_timer_context___2 *emul_vtimer; struct arch_timer_context___2 *emul_ptimer; }; struct kvm_exception_table_entry { int insn; int fixup; }; struct tlb_inv_context { unsigned long flags; u64 tcr; u64 sctlr; }; struct kvm_host_sve_state { u64 zcr_el1; u32 fpsr; u32 fpcr; char sve_regs[0]; }; struct tlb_inv_context___2 { struct kvm_s2_mmu___2 *mmu; u64 tcr; u64 sctlr; }; struct kvm_iommu_ops { int (*init)(unsigned long); struct kvm_hyp_iommu * (*get_iommu_by_id)(pkvm_handle_t); int (*alloc_domain)(struct kvm_hyp_iommu_domain *, u32); void (*free_domain)(struct kvm_hyp_iommu_domain *); int (*attach_dev)(struct kvm_hyp_iommu *, struct kvm_hyp_iommu_domain *, u32, u32, u32); int (*detach_dev)(struct kvm_hyp_iommu *, struct kvm_hyp_iommu_domain *, u32, u32); bool (*dabt_handler)(struct kvm_cpu_context___2 *, u64, u64); int (*suspend)(struct kvm_hyp_iommu *); int (*resume)(struct kvm_hyp_iommu *); void (*iotlb_sync)(struct kvm_hyp_iommu_domain *, struct iommu_iotlb_gather *); void (*host_stage2_idmap)(struct kvm_hyp_iommu_domain *, phys_addr_t, phys_addr_t, int); int (*map_pages)(struct kvm_hyp_iommu_domain *, unsigned long, phys_addr_t, size_t, size_t, int, size_t *); size_t (*unmap_pages)(struct kvm_hyp_iommu_domain *, unsigned long, size_t, size_t, struct iommu_iotlb_gather *, struct kvm_iommu_paddr_cache *); phys_addr_t (*iova_to_phys)(struct kvm_hyp_iommu_domain *, unsigned long); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; u64 android_kabi_reserved5; u64 android_kabi_reserved6; u64 android_kabi_reserved7; u64 android_kabi_reserved8; }; struct kvm_iommu_paddr_cache { unsigned short ptr; u64 paddr[511]; size_t pgsize[511]; }; typedef void (*hcall_t)(struct kvm_cpu_context___2 *); struct pkvm_hyp_vcpu; typedef void (*hyp_entry_exit_handler_fn)(struct pkvm_hyp_vcpu *); struct pkvm_hyp_vcpu { struct kvm_vcpu___2 vcpu; struct kvm_vcpu___2 *host_vcpu; struct pkvm_hyp_vcpu **loaded_hyp_vcpu; u32 exit_code; int power_state; long: 64; }; union hyp_spinlock { u32 __val; struct { u16 owner; u16 next; }; }; typedef union hyp_spinlock hyp_spinlock_t; struct hyp_pool { hyp_spinlock_t lock; struct list_head free_area[11]; phys_addr_t range_start; phys_addr_t range_end; u64 free_pages; u8 max_order; }; struct pkvm_hyp_vm { struct kvm___2 kvm; struct kvm___2 *host_kvm; struct kvm_pgtable___2 pgt; struct kvm_pgtable_mm_ops mm_ops; struct hyp_pool pool; hyp_spinlock_t pgtable_lock; struct pkvm_hyp_vcpu *pvmfw_entry_vcpu; unsigned short refcount; unsigned int nr_vcpus; hyp_spinlock_t vcpus_lock; bool is_dying; struct pkvm_hyp_vcpu *vcpus[0]; }; struct kvm_host_psci_config { u32 version; u32 smccc_version; struct psci_0_1_function_ids function_ids_0_1; bool psci_0_1_cpu_suspend_implemented; bool psci_0_1_cpu_on_implemented; bool psci_0_1_cpu_off_implemented; bool psci_0_1_migrate_implemented; }; struct psci_boot_args { atomic_t lock; unsigned long pc; unsigned long r0; }; struct hyp_mgt_allocator_ops { int (*refill)(struct kvm_hyp_memcache *); int (*reclaimable)(); void (*reclaim)(struct kvm_hyp_memcache *, int); }; struct hyp_allocator { struct list_head chunks; unsigned long start; u32 size; hyp_spinlock_t lock; }; struct chunk_hdr { u32 alloc_size; u32 mapped_size; struct list_head node; u32 hash; long: 0; char data; }; enum pkvm_page_state { PKVM_PAGE_OWNED = 0, PKVM_PAGE_SHARED_OWNED = 1, PKVM_PAGE_SHARED_BORROWED = 2, PKVM_PAGE_MMIO_DMA = 3, PKVM_MODULE_OWNED_PAGE = 4, PKVM_NOPAGE = 8, PKVM_PAGE_RESTRICTED_PROT = 16, PKVM_MMIO = 32, }; struct hyp_page { unsigned short refcount; u8 order; enum pkvm_page_state host_state: 8; }; enum pkvm_component_id { PKVM_ID_HOST = 0, PKVM_ID_HYP = 1, PKVM_ID_FFA = 2, PKVM_ID_GUEST = 3, PKVM_ID_PROTECTED = 4, PKVM_ID_MAX = 4, }; struct hyp_fixmap_slot { u64 addr; kvm_pte_t *ptep; }; struct host_mmu { struct kvm_arch___2 arch; struct kvm_pgtable___2 pgt; struct kvm_pgtable_mm_ops mm_ops; hyp_spinlock_t lock; }; struct guest_request_walker_data { unsigned long ipa_start; phys_addr_t phys_start; u64 size; enum pkvm_page_state desired_state; enum pkvm_page_state desired_mask; int max_ptes; }; struct kvm_mem_range { u64 start; u64 end; }; struct pkvm_mem_transition; struct pkvm_checked_mem_transition { const struct pkvm_mem_transition *tx; u64 completer_addr; u64 nr_pages; }; struct pkvm_mem_transition { u64 nr_pages; struct { enum pkvm_component_id id; u64 addr; union { struct { u64 completer_addr; } host; struct { u64 completer_addr; } hyp; struct { struct pkvm_hyp_vm *hyp_vm; struct kvm_hyp_memcache *mc; } guest; }; } initiator; struct { enum pkvm_component_id id; union { struct { struct pkvm_hyp_vm *hyp_vm; struct kvm_hyp_memcache *mc; phys_addr_t phys; } guest; }; const enum kvm_pgtable_prot prot; } completer; }; struct check_walk_data { enum pkvm_page_state desired; enum pkvm_page_state (*get_page_state)(kvm_pte_t, u64); }; struct relinquish_data { enum pkvm_page_state expected_state; u64 pa; }; struct sys_reg_desc_reset { int reg; void (*reset)(struct kvm_vcpu___2 *, const struct sys_reg_desc_reset *); u64 value; }; struct sys_reg_desc___2 { const char *name; enum { AA32_DIRECT___2 = 0, AA32_LO___2 = 1, AA32_HI___2 = 2, } aarch32_map; u8 Op0; u8 Op1; u8 CRn; u8 CRm; u8 Op2; bool (*access)(struct kvm_vcpu___2 *, struct sys_reg_params *, const struct sys_reg_desc___2 *); u64 (*reset)(struct kvm_vcpu___2 *, const struct sys_reg_desc___2 *); int reg; u64 val; int (*__get_user)(struct kvm_vcpu___2 *, const struct sys_reg_desc___2 *, u64 *); int (*set_user)(struct kvm_vcpu___2 *, const struct sys_reg_desc___2 *, u64); unsigned int (*visibility)(const struct kvm_vcpu___2 *, const struct sys_reg_desc___2 *); }; typedef struct { u32 __val; } hyp_rwlock_t; struct kvm_ffa_descriptor_buffer { void *buf; size_t len; }; struct kvm_ffa_buffers { hyp_spinlock_t lock; void *tx; void *rx; }; struct ffa_mem_region { u16 sender_id; u16 attributes; u32 flags; u64 handle; u64 tag; u32 ep_mem_size; u32 ep_count; u32 ep_mem_offset; u32 reserved[3]; }; struct ffa_mem_region_addr_range { u64 address; u32 pg_cnt; u32 reserved; }; struct ffa_composite_mem_region { u32 total_pg_cnt; u32 addr_range_cnt; u64 reserved; struct ffa_mem_region_addr_range constituents[0]; }; struct ffa_mem_region_attributes { u16 receiver; u8 attrs; u8 flag; u32 composite_off; u64 reserved; }; struct kvm_pgtable_walk_data { struct kvm_pgtable_walker *walker; const u64 start; u64 addr; const u64 end; }; struct hyp_map_data { const u64 phys; kvm_pte_t attr; }; struct stage2_map_data { const u64 phys; kvm_pte_t attr; u64 annotation; kvm_pte_t *anchor; kvm_pte_t *childp; struct kvm_s2_mmu___2 *mmu; void *memcache; bool force_pte; }; struct leaf_walk_data { kvm_pte_t pte; u32 level; }; struct stage2_attr_data { kvm_pte_t attr_set; kvm_pte_t attr_clr; kvm_pte_t pte; u32 level; }; struct stage2_age_data { bool mkold; bool young; }; struct kvm_power_domain_ops { int (*power_on)(struct kvm_power_domain *); int (*power_off)(struct kvm_power_domain *); }; struct ring_buffer_meta; struct hyp_buffer_page; struct hyp_rb_per_cpu { struct ring_buffer_meta *meta; struct hyp_buffer_page *tail_page; struct hyp_buffer_page *reader_page; struct hyp_buffer_page *head_page; struct hyp_buffer_page *bpages; unsigned long nr_pages; unsigned long last_overrun; u64 write_stamp; atomic_t status; }; struct reader_page { __u32 id; __u32 read; unsigned long lost_events; }; struct ring_buffer_meta { unsigned long entries; unsigned long overrun; unsigned long read; unsigned long pages_touched; unsigned long pages_lost; unsigned long pages_read; __u32 meta_page_size; __u32 nr_data_pages; struct reader_page reader_page; }; struct buffer_data_page; struct hyp_buffer_page { struct list_head list; struct buffer_data_page *page; unsigned long write; unsigned long entries; u32 id; }; struct buffer_data_page { u64 time_stamp; local_t commit; unsigned char data[0]; }; enum ring_buffer_type { RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, RINGBUF_TYPE_PADDING = 29, RINGBUF_TYPE_TIME_EXTEND = 30, RINGBUF_TYPE_TIME_STAMP = 31, }; typedef void (*dyn_hcall_t)(struct user_pt_regs *); struct hvc_power_domain { struct kvm_power_domain *pd; const struct kvm_power_domain_ops *ops; }; struct stage2_map_data___2 { const u64 phys; kvm_pte_t attr; u64 annotation; kvm_pte_t *anchor; kvm_pte_t *childp; struct kvm_s2_mmu *mmu; void *memcache; bool force_pte; }; enum gunyah_error { GUNYAH_ERROR_OK = 0, GUNYAH_ERROR_UNIMPLEMENTED = -1, GUNYAH_ERROR_RETRY = -2, GUNYAH_ERROR_ARG_INVAL = 1, GUNYAH_ERROR_ARG_SIZE = 2, GUNYAH_ERROR_ARG_ALIGN = 3, GUNYAH_ERROR_NOMEM = 10, GUNYAH_ERROR_ADDR_OVFL = 20, GUNYAH_ERROR_ADDR_UNFL = 21, GUNYAH_ERROR_ADDR_INVAL = 22, GUNYAH_ERROR_DENIED = 30, GUNYAH_ERROR_BUSY = 31, GUNYAH_ERROR_IDLE = 32, GUNYAH_ERROR_IRQ_BOUND = 40, GUNYAH_ERROR_IRQ_UNBOUND = 41, GUNYAH_ERROR_CSPACE_CAP_NULL = 50, GUNYAH_ERROR_CSPACE_CAP_REVOKED = 51, GUNYAH_ERROR_CSPACE_WRONG_OBJ_TYPE = 52, GUNYAH_ERROR_CSPACE_INSUF_RIGHTS = 53, GUNYAH_ERROR_CSPACE_FULL = 54, GUNYAH_ERROR_MSGQUEUE_EMPTY = 60, GUNYAH_ERROR_MSGQUEUE_FULL = 61, }; struct gunyah_hypercall_vcpu_run_resp { union { enum { GUNYAH_VCPU_STATE_READY = 0, GUNYAH_VCPU_STATE_EXPECTS_WAKEUP = 1, GUNYAH_VCPU_STATE_POWERED_OFF = 2, GUNYAH_VCPU_STATE_BLOCKED = 3, GUNYAH_VCPU_ADDRSPACE_VMMIO_READ = 4, GUNYAH_VCPU_ADDRSPACE_VMMIO_WRITE = 5, GUNYAH_VCPU_ADDRSPACE_PAGE_FAULT = 7, } state; u64 sized_state; }; u64 state_data[3]; }; struct arm_smccc_1_2_regs { unsigned long a0; unsigned long a1; unsigned long a2; unsigned long a3; unsigned long a4; unsigned long a5; unsigned long a6; unsigned long a7; unsigned long a8; unsigned long a9; unsigned long a10; unsigned long a11; unsigned long a12; unsigned long a13; unsigned long a14; unsigned long a15; unsigned long a16; unsigned long a17; }; struct gunyah_hypercall_hyp_identify_resp { u64 api_info; u64 flags[3]; }; typedef void (*exitcall_t)(); struct crypto_tfm; struct cipher_alg { unsigned int cia_min_keysize; unsigned int cia_max_keysize; int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); }; struct compress_alg { int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); }; struct crypto_type; struct crypto_alg { struct list_head cra_list; struct list_head cra_users; u32 cra_flags; unsigned int cra_blocksize; unsigned int cra_ctxsize; unsigned int cra_alignmask; int cra_priority; refcount_t cra_refcnt; char cra_name[128]; char cra_driver_name[128]; const struct crypto_type *cra_type; union { struct cipher_alg cipher; struct compress_alg compress; } cra_u; int (*cra_init)(struct crypto_tfm *); void (*cra_exit)(struct crypto_tfm *); void (*cra_destroy)(struct crypto_alg *); struct module *cra_module; }; struct hash_alg_common { unsigned int digestsize; unsigned int statesize; struct crypto_alg base; }; struct shash_desc; struct crypto_shash; struct shash_alg { int (*init)(struct shash_desc *); int (*update)(struct shash_desc *, const u8 *, unsigned int); int (*final)(struct shash_desc *, u8 *); int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *); int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *); int (*export)(struct shash_desc *, void *); int (*import)(struct shash_desc *, const void *); int (*setkey)(struct crypto_shash *, const u8 *, unsigned int); int (*init_tfm)(struct crypto_shash *); void (*exit_tfm)(struct crypto_shash *); int (*clone_tfm)(struct crypto_shash *, struct crypto_shash *); unsigned int descsize; union { int (*finup_mb)(struct shash_desc *, const u8 * const *, unsigned int, u8 * const *, unsigned int); struct { u64 android_backport_reserved1; }; union {}; }; union { unsigned int mb_max_msgs; struct { u64 android_backport_reserved2; }; union {}; }; union { struct { unsigned int digestsize; unsigned int statesize; struct crypto_alg base; }; struct hash_alg_common halg; }; }; struct shash_desc { struct crypto_shash *tfm; void *__ctx[0]; }; struct crypto_tfm { refcount_t refcnt; u32 crt_flags; int node; void (*exit)(struct crypto_tfm *); struct crypto_alg *__crt_alg; void *__crt_ctx[0]; }; struct crypto_shash { unsigned int descsize; struct crypto_tfm base; }; struct crypto_instance; struct crypto_type { unsigned int (*ctxsize)(struct crypto_alg *, u32, u32); unsigned int (*extsize)(struct crypto_alg *); int (*init_tfm)(struct crypto_tfm *); void (*show)(struct seq_file *, struct crypto_alg *); int (*report)(struct sk_buff *, struct crypto_alg *); void (*free)(struct crypto_instance *); unsigned int type; unsigned int maskclear; unsigned int maskset; unsigned int tfmsize; }; struct crypto_template; struct crypto_spawn; struct crypto_instance { struct crypto_alg alg; struct crypto_template *tmpl; union { struct hlist_node list; struct crypto_spawn *spawns; }; struct work_struct free_work; void *__ctx[0]; }; struct rtattr; struct crypto_template { struct list_head list; struct hlist_head instances; struct module *module; int (*create)(struct crypto_template *, struct rtattr **); char name[128]; }; struct crypto_spawn { struct list_head list; struct crypto_alg *alg; union { struct crypto_instance *inst; struct crypto_spawn *next; }; const struct crypto_type *frontend; u32 mask; bool dead; bool registered; }; struct sha256_state { u32 state[8]; u64 count; u8 buf[64]; }; struct sha256_ce_state { struct sha256_state sst; u32 finalize; }; typedef void sha256_block_fn(struct sha256_state *, const u8 *, int); struct sha512_state; typedef void sha512_block_fn(struct sha512_state *, const u8 *, int); struct sha512_state { u64 state[8]; u64 count[2]; u8 buf[128]; }; struct crypto_aead; struct aead_request; struct aead_alg { int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); int (*setauthsize)(struct crypto_aead *, unsigned int); int (*encrypt)(struct aead_request *); int (*decrypt)(struct aead_request *); int (*init)(struct crypto_aead *); void (*exit)(struct crypto_aead *); unsigned int ivsize; unsigned int maxauthsize; unsigned int chunksize; struct crypto_alg base; }; struct crypto_aead { unsigned int authsize; unsigned int reqsize; struct crypto_tfm base; }; typedef void (*crypto_completion_t)(void *, int); struct crypto_async_request { struct list_head list; crypto_completion_t complete; void *data; struct crypto_tfm *tfm; u32 flags; }; struct aead_request { struct crypto_async_request base; unsigned int assoclen; unsigned int cryptlen; u8 *iv; struct scatterlist *src; struct scatterlist *dst; void *__ctx[0]; }; typedef struct { __be64 a; __be64 b; } be128; struct crypto_aes_ctx { u32 key_enc[60]; u32 key_dec[60]; u32 key_length; }; struct scatter_walk { struct scatterlist *sg; unsigned int offset; }; struct skcipher_walk { union { struct { struct page *page; unsigned long offset; } phys; struct { u8 *page; void *addr; } virt; } src; union { struct { struct page *page; unsigned long offset; } phys; struct { u8 *page; void *addr; } virt; } dst; struct scatter_walk in; unsigned int nbytes; struct scatter_walk out; unsigned int total; struct list_head buffers; u8 *page; u8 *buffer; u8 *oiv; void *iv; unsigned int ivsize; int flags; unsigned int blocksize; unsigned int stride; unsigned int alignmask; }; struct ghash_key { be128 k; u64 h[0]; }; struct gcm_aes_ctx { struct crypto_aes_ctx aes_key; u8 nonce[4]; struct ghash_key ghash_key; }; struct ghash_desc_ctx { u64 digest[2]; u8 buf[16]; u32 count; }; struct polyval_tfm_ctx { u8 key_powers[128]; }; struct polyval_desc_ctx { u8 buffer[16]; u32 bytes; }; struct aes_block { u8 b[16]; }; struct crypto_skcipher; struct skcipher_request; struct skcipher_alg { int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); int (*encrypt)(struct skcipher_request *); int (*decrypt)(struct skcipher_request *); int (*init)(struct crypto_skcipher *); void (*exit)(struct crypto_skcipher *); unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; unsigned int chunksize; unsigned int walksize; struct crypto_alg base; }; struct crypto_skcipher { unsigned int reqsize; struct crypto_tfm base; }; struct skcipher_request { unsigned int cryptlen; u8 *iv; struct scatterlist *src; struct scatterlist *dst; struct crypto_async_request base; void *__ctx[0]; }; struct mac_tfm_ctx { struct crypto_aes_ctx key; long: 0; u8 consts[0]; }; struct crypto_aes_xts_ctx { struct crypto_aes_ctx key1; long: 0; struct crypto_aes_ctx key2; long: 0; }; struct crypto_aes_essiv_cbc_ctx { struct crypto_aes_ctx key1; long: 0; struct crypto_aes_ctx key2; struct crypto_shash *hash; }; struct mac_desc_ctx { unsigned int len; u8 dg[16]; }; enum chacha_constants { CHACHA_CONSTANT_EXPA = 1634760805, CHACHA_CONSTANT_ND_3 = 857760878, CHACHA_CONSTANT_2_BY = 2036477234, CHACHA_CONSTANT_TE_K = 1797285236, }; struct chacha_ctx { u32 key[8]; int nrounds; }; struct poly1305_key { union { u32 r[5]; u64 r64[3]; }; }; struct poly1305_core_key { struct poly1305_key key; struct poly1305_key precomputed_s; }; struct poly1305_state { union { u32 h[5]; u64 h64[3]; }; }; struct poly1305_desc_ctx { u8 buf[16]; unsigned int buflen; unsigned short rset; bool sset; u32 s[4]; struct poly1305_state h; union { struct poly1305_key opaque_r[9]; struct poly1305_core_key core_r; }; }; struct fdtable { unsigned int max_fds; struct file __attribute__((btf_type_tag("rcu"))) **fd; unsigned long *close_on_exec; unsigned long *open_fds; unsigned long *full_fds_bits; struct callback_head rcu; }; struct files_struct { atomic_t count; bool resize_in_progress; wait_queue_head_t resize_wait; struct fdtable __attribute__((btf_type_tag("rcu"))) *fdt; struct fdtable fdtab; long: 64; long: 64; long: 64; long: 64; spinlock_t file_lock; unsigned int next_fd; unsigned long close_on_exec_init[1]; unsigned long open_fds_init[1]; unsigned long full_fds_bits_init[1]; struct file __attribute__((btf_type_tag("rcu"))) *fd_array[64]; long: 64; long: 64; long: 64; long: 64; }; struct io_uring_sqe; struct io_uring_cmd { struct file *file; const struct io_uring_sqe *sqe; union { void (*task_work_cb)(struct io_uring_cmd *, unsigned int); void *cookie; }; u32 cmd_op; u32 flags; u8 pdu[32]; }; typedef int __kernel_rwf_t; struct io_uring_sqe { __u8 opcode; __u8 flags; __u16 ioprio; __s32 fd; union { __u64 off; __u64 addr2; struct { __u32 cmd_op; __u32 __pad1; }; }; union { __u64 addr; __u64 splice_off_in; }; __u32 len; union { __kernel_rwf_t rw_flags; __u32 fsync_flags; __u16 poll_events; __u32 poll32_events; __u32 sync_range_flags; __u32 msg_flags; __u32 timeout_flags; __u32 accept_flags; __u32 cancel_flags; __u32 open_flags; __u32 statx_flags; __u32 fadvise_advice; __u32 splice_flags; __u32 rename_flags; __u32 unlink_flags; __u32 hardlink_flags; __u32 xattr_flags; __u32 msg_ring_flags; __u32 uring_cmd_flags; }; __u64 user_data; union { __u16 buf_index; __u16 buf_group; }; __u16 personality; union { __s32 splice_fd_in; __u32 file_index; struct { __u16 addr_len; __u16 __pad3[1]; }; }; union { struct { __u64 addr3; __u64 __pad2[1]; }; __u8 cmd[0]; }; }; typedef void (*btf_trace_task_newtask)(void *, struct task_struct *, unsigned long); typedef void (*btf_trace_task_rename)(void *, struct task_struct *, const char *); enum { MM_FILEPAGES = 0, MM_ANONPAGES = 1, MM_SWAPENTS = 2, MM_SHMEMPAGES = 3, NR_MM_COUNTERS = 4, }; enum ucount_type { UCOUNT_USER_NAMESPACES = 0, UCOUNT_PID_NAMESPACES = 1, UCOUNT_UTS_NAMESPACES = 2, UCOUNT_IPC_NAMESPACES = 3, UCOUNT_NET_NAMESPACES = 4, UCOUNT_MNT_NAMESPACES = 5, UCOUNT_CGROUP_NAMESPACES = 6, UCOUNT_TIME_NAMESPACES = 7, UCOUNT_INOTIFY_INSTANCES = 8, UCOUNT_INOTIFY_WATCHES = 9, UCOUNT_COUNTS = 10, }; enum rlimit_type { UCOUNT_RLIMIT_NPROC = 0, UCOUNT_RLIMIT_MSGQUEUE = 1, UCOUNT_RLIMIT_SIGPENDING = 2, UCOUNT_RLIMIT_MEMLOCK = 3, UCOUNT_RLIMIT_COUNTS = 4, }; enum { TASK_COMM_LEN = 16, }; enum proc_hidepid { HIDEPID_OFF = 0, HIDEPID_NO_ACCESS = 1, HIDEPID_INVISIBLE = 2, HIDEPID_NOT_PTRACEABLE = 4, }; enum proc_pidonly { PROC_PIDONLY_OFF = 0, PROC_PIDONLY_ON = 1, }; enum { FUTEX_STATE_OK = 0, FUTEX_STATE_EXITING = 1, FUTEX_STATE_DEAD = 2, }; enum tk_offsets { TK_OFFS_REAL = 0, TK_OFFS_BOOT = 1, TK_OFFS_TAI = 2, TK_OFFS_MAX = 3, }; struct trace_event_raw_task_newtask { struct trace_entry ent; pid_t pid; char comm[16]; unsigned long clone_flags; short oom_score_adj; char __data[0]; }; struct trace_event_raw_task_rename { struct trace_entry ent; pid_t pid; char oldcomm[16]; char newcomm[16]; short oom_score_adj; char __data[0]; }; struct vm_stack { struct callback_head rcu; struct vm_struct *stack_vm_area; }; struct clone_args { __u64 flags; __u64 pidfd; __u64 child_tid; __u64 parent_tid; __u64 exit_signal; __u64 stack; __u64 stack_size; __u64 tls; __u64 set_tid; __u64 set_tid_size; __u64 cgroup; }; struct proc_fs_info { struct pid_namespace *pid_ns; struct dentry *proc_self; struct dentry *proc_thread_self; kgid_t pid_gid; enum proc_hidepid hide_pid; enum proc_pidonly pidonly; struct callback_head rcu; }; struct trace_event_data_offsets_task_newtask {}; struct trace_event_data_offsets_task_rename {}; struct multiprocess_signals { sigset_t signal; struct hlist_node node; }; typedef int (*proc_visitor)(struct task_struct *, void *); struct taint_flag { char c_true; char c_false; bool module; }; enum kmsg_dump_reason { KMSG_DUMP_UNDEF = 0, KMSG_DUMP_PANIC = 1, KMSG_DUMP_OOPS = 2, KMSG_DUMP_EMERG = 3, KMSG_DUMP_SHUTDOWN = 4, KMSG_DUMP_MAX = 5, }; enum con_flush_mode { CONSOLE_FLUSH_PENDING = 0, CONSOLE_REPLAY_ALL = 1, }; enum error_detector { ERROR_DETECTOR_KFENCE = 0, ERROR_DETECTOR_KASAN = 1, ERROR_DETECTOR_WARN = 2, }; enum ftrace_dump_mode { DUMP_NONE = 0, DUMP_ALL = 1, DUMP_ORIG = 2, }; enum ctx_state { CONTEXT_DISABLED = -1, CONTEXT_KERNEL = 0, CONTEXT_IDLE = 1, CONTEXT_USER = 2, CONTEXT_GUEST = 3, CONTEXT_MAX = 4, }; struct warn_args { const char *fmt; va_list args; }; typedef void (*btf_trace_cpuhp_enter)(void *, unsigned int, int, int, int (*)(unsigned int)); typedef void (*btf_trace_cpuhp_multi_enter)(void *, unsigned int, int, int, int (*)(unsigned int, struct hlist_node *), struct hlist_node *); typedef void (*btf_trace_cpuhp_exit)(void *, unsigned int, int, int, int); struct smp_hotplug_thread { struct task_struct * __attribute__((btf_type_tag("percpu"))) *store; struct list_head list; int (*thread_should_run)(unsigned int); void (*thread_fn)(unsigned int); void (*create)(unsigned int); void (*setup)(unsigned int); void (*cleanup)(unsigned int, bool); void (*park)(unsigned int); void (*unpark)(unsigned int); bool selfparking; const char *thread_comm; }; struct cpuhp_cpu_state { enum cpuhp_state state; enum cpuhp_state target; enum cpuhp_state fail; struct task_struct *thread; bool should_run; bool rollback; bool single; bool bringup; struct hlist_node *node; struct hlist_node *last; enum cpuhp_state cb_state; int result; atomic_t ap_sync_state; struct completion done_up; struct completion done_down; }; struct cpuhp_step { const char *name; union { int (*single)(unsigned int); int (*multi)(unsigned int, struct hlist_node *); } startup; union { int (*single)(unsigned int); int (*multi)(unsigned int, struct hlist_node *); } teardown; struct hlist_head list; bool cant_stop; bool multi_instance; }; enum cpu_mitigations { CPU_MITIGATIONS_OFF = 0, CPU_MITIGATIONS_AUTO = 1, CPU_MITIGATIONS_AUTO_NOSMT = 2, }; enum cpuhp_sync_state { SYNC_STATE_DEAD = 0, SYNC_STATE_KICKED = 1, SYNC_STATE_SHOULD_DIE = 2, SYNC_STATE_ALIVE = 3, SYNC_STATE_SHOULD_ONLINE = 4, SYNC_STATE_ONLINE = 5, }; enum hk_type { HK_TYPE_TIMER = 0, HK_TYPE_RCU = 1, HK_TYPE_MISC = 2, HK_TYPE_SCHED = 3, HK_TYPE_TICK = 4, HK_TYPE_DOMAIN = 5, HK_TYPE_WQ = 6, HK_TYPE_MANAGED_IRQ = 7, HK_TYPE_KTHREAD = 8, HK_TYPE_MAX = 9, }; enum cc_attr { CC_ATTR_MEM_ENCRYPT = 0, CC_ATTR_HOST_MEM_ENCRYPT = 1, CC_ATTR_GUEST_MEM_ENCRYPT = 2, CC_ATTR_GUEST_STATE_ENCRYPT = 3, CC_ATTR_GUEST_UNROLL_STRING_IO = 4, CC_ATTR_GUEST_SEV_SNP = 5, CC_ATTR_HOTPLUG_DISABLED = 6, }; enum cpuhp_smt_control { CPU_SMT_ENABLED = 0, CPU_SMT_DISABLED = 1, CPU_SMT_FORCE_DISABLED = 2, CPU_SMT_NOT_SUPPORTED = 3, CPU_SMT_NOT_IMPLEMENTED = 4, }; struct trace_event_raw_cpuhp_enter { struct trace_entry ent; unsigned int cpu; int target; int idx; void *fun; char __data[0]; }; struct trace_event_raw_cpuhp_multi_enter { struct trace_entry ent; unsigned int cpu; int target; int idx; void *fun; char __data[0]; }; struct trace_event_raw_cpuhp_exit { struct trace_entry ent; unsigned int cpu; int state; int idx; int ret; char __data[0]; }; struct cpu_down_work { unsigned int cpu; enum cpuhp_state target; }; struct trace_event_data_offsets_cpuhp_enter {}; struct trace_event_data_offsets_cpuhp_multi_enter {}; struct trace_event_data_offsets_cpuhp_exit {}; struct pipe_buffer; struct pipe_inode_info { struct mutex mutex; wait_queue_head_t rd_wait; wait_queue_head_t wr_wait; unsigned int head; unsigned int tail; unsigned int max_usage; unsigned int ring_size; unsigned int nr_accounted; unsigned int readers; unsigned int writers; unsigned int files; unsigned int r_counter; unsigned int w_counter; bool poll_usage; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; }; struct pipe_buf_operations; struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; }; struct pipe_buf_operations { int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); }; struct __kernel_old_timeval { __kernel_long_t tv_sec; __kernel_long_t tv_usec; }; struct rusage { struct __kernel_old_timeval ru_utime; struct __kernel_old_timeval ru_stime; __kernel_long_t ru_maxrss; __kernel_long_t ru_ixrss; __kernel_long_t ru_idrss; __kernel_long_t ru_isrss; __kernel_long_t ru_minflt; __kernel_long_t ru_majflt; __kernel_long_t ru_nswap; __kernel_long_t ru_inblock; __kernel_long_t ru_oublock; __kernel_long_t ru_msgsnd; __kernel_long_t ru_msgrcv; __kernel_long_t ru_nsignals; __kernel_long_t ru_nvcsw; __kernel_long_t ru_nivcsw; }; struct waitid_info; struct wait_opts { enum pid_type wo_type; int wo_flags; struct pid *wo_pid; struct waitid_info *wo_info; int wo_stat; struct rusage *wo_rusage; wait_queue_entry_t child_wait; int notask_error; }; struct waitid_info { pid_t pid; uid_t uid; int status; int cause; }; typedef u32 compat_uint_t; struct old_timeval32 { old_time32_t tv_sec; s32 tv_usec; }; struct compat_rusage { struct old_timeval32 ru_utime; struct old_timeval32 ru_stime; compat_long_t ru_maxrss; compat_long_t ru_ixrss; compat_long_t ru_idrss; compat_long_t ru_isrss; compat_long_t ru_minflt; compat_long_t ru_majflt; compat_long_t ru_nswap; compat_long_t ru_inblock; compat_long_t ru_oublock; compat_long_t ru_msgsnd; compat_long_t ru_msgrcv; compat_long_t ru_nsignals; compat_long_t ru_nvcsw; compat_long_t ru_nivcsw; }; typedef void (*btf_trace_irq_handler_entry)(void *, int, struct irqaction *); typedef void (*btf_trace_irq_handler_exit)(void *, int, struct irqaction *, int); typedef void (*btf_trace_softirq_entry)(void *, unsigned int); typedef void (*btf_trace_softirq_exit)(void *, unsigned int); typedef void (*btf_trace_softirq_raise)(void *, unsigned int); struct tasklet_struct; typedef void (*btf_trace_tasklet_entry)(void *, struct tasklet_struct *, void *); struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; bool use_callback; union { void (*func)(unsigned long); void (*callback)(struct tasklet_struct *); }; unsigned long data; }; typedef void (*btf_trace_tasklet_exit)(void *, struct tasklet_struct *, void *); typedef struct { unsigned int __softirq_pending; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; } irq_cpustat_t; struct softirq_action { void (*action)(struct softirq_action *); }; struct tasklet_head { struct tasklet_struct *head; struct tasklet_struct **tail; }; enum { HI_SOFTIRQ = 0, TIMER_SOFTIRQ = 1, NET_TX_SOFTIRQ = 2, NET_RX_SOFTIRQ = 3, BLOCK_SOFTIRQ = 4, IRQ_POLL_SOFTIRQ = 5, TASKLET_SOFTIRQ = 6, SCHED_SOFTIRQ = 7, HRTIMER_SOFTIRQ = 8, RCU_SOFTIRQ = 9, NR_SOFTIRQS = 10, }; enum { TASKLET_STATE_SCHED = 0, TASKLET_STATE_RUN = 1, }; struct trace_event_raw_irq_handler_entry { struct trace_entry ent; int irq; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_irq_handler_exit { struct trace_entry ent; int irq; int ret; char __data[0]; }; struct trace_event_raw_softirq { struct trace_entry ent; unsigned int vec; char __data[0]; }; struct trace_event_raw_tasklet { struct trace_entry ent; void *tasklet; void *func; char __data[0]; }; struct trace_event_data_offsets_irq_handler_entry { u32 name; }; struct wait_bit_key { void *flags; int bit_nr; unsigned long timeout; }; struct wait_bit_queue_entry { struct wait_bit_key key; struct wait_queue_entry wq_entry; }; struct trace_event_data_offsets_irq_handler_exit {}; struct trace_event_data_offsets_softirq {}; struct trace_event_data_offsets_tasklet {}; enum { IORES_DESC_NONE = 0, IORES_DESC_CRASH_KERNEL = 1, IORES_DESC_ACPI_TABLES = 2, IORES_DESC_ACPI_NV_STORAGE = 3, IORES_DESC_PERSISTENT_MEMORY = 4, IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, IORES_DESC_RESERVED = 7, IORES_DESC_SOFT_RESERVED = 8, IORES_DESC_CXL = 9, }; enum { MAX_IORES_LEVEL = 5, }; enum { REGION_INTERSECTS = 0, REGION_DISJOINT = 1, REGION_MIXED = 2, }; struct resource_entry { struct list_head node; struct resource *res; resource_size_t offset; struct resource __res; }; struct resource_constraint { resource_size_t min; resource_size_t max; resource_size_t align; resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t); void *alignf_data; }; typedef void (*dr_release_t)(struct device *, void *); typedef int (*dr_match_t)(struct device *, void *, void *); struct pseudo_fs_context { const struct super_operations *ops; const struct xattr_handler **xattr; const struct dentry_operations *dops; unsigned long magic; }; struct region_devres { struct resource *parent; resource_size_t start; resource_size_t n; }; enum sysctl_writes_mode { SYSCTL_WRITES_LEGACY = -1, SYSCTL_WRITES_WARN = 0, SYSCTL_WRITES_STRICT = 1, }; typedef __kernel_clock_t clock_t; struct do_proc_dointvec_minmax_conv_param { int *min; int *max; }; struct do_proc_douintvec_minmax_conv_param { unsigned int *min; unsigned int *max; }; struct __user_cap_header_struct; typedef struct __user_cap_header_struct *cap_user_header_t; struct __user_cap_header_struct { __u32 version; int pid; }; struct __user_cap_data_struct; typedef struct __user_cap_data_struct __attribute__((btf_type_tag("user"))) *cap_user_data_t; struct __user_cap_data_struct { __u32 effective; __u32 permitted; __u32 inheritable; }; struct compat_iovec { compat_uptr_t iov_base; compat_size_t iov_len; }; struct sigqueue { struct list_head list; int flags; kernel_siginfo_t info; struct ucounts *ucounts; }; struct ptrace_peeksiginfo_args { __u64 off; __u32 flags; __s32 nr; }; typedef struct compat_siginfo compat_siginfo_t; struct ptrace_syscall_info { __u8 op; __u8 pad[3]; __u32 arch; __u64 instruction_pointer; __u64 stack_pointer; union { struct { __u64 nr; __u64 args[6]; } entry; struct { __s64 rval; __u8 is_error; } exit; struct { __u64 nr; __u64 args[6]; __u32 ret_data; } seccomp; }; }; typedef int wait_bit_action_f(struct wait_bit_key *, int); typedef void (*btf_trace_signal_generate)(void *, int, struct kernel_siginfo *, struct task_struct *, int, int); typedef void (*btf_trace_signal_deliver)(void *, int, struct kernel_siginfo *, struct k_sigaction *); enum sig_handler { HANDLER_CURRENT = 0, HANDLER_SIG_DFL = 1, HANDLER_EXIT = 2, }; enum { TRACE_SIGNAL_DELIVERED = 0, TRACE_SIGNAL_IGNORED = 1, TRACE_SIGNAL_ALREADY_PENDING = 2, TRACE_SIGNAL_OVERFLOW_FAIL = 3, TRACE_SIGNAL_LOSE_INFO = 4, }; struct trace_event_raw_signal_generate { struct trace_entry ent; int sig; int errno; int code; char comm[16]; pid_t pid; int group; int result; char __data[0]; }; struct trace_event_raw_signal_deliver { struct trace_entry ent; int sig; int errno; int code; unsigned long sa_handler; unsigned long sa_flags; char __data[0]; }; typedef unsigned long old_sigset_t; typedef u32 compat_old_sigset_t; struct compat_sigaction { compat_uptr_t sa_handler; compat_ulong_t sa_flags; compat_uptr_t sa_restorer; compat_sigset_t sa_mask; }; struct compat_old_sigaction { compat_uptr_t sa_handler; compat_old_sigset_t sa_mask; compat_ulong_t sa_flags; compat_uptr_t sa_restorer; }; struct trace_event_data_offsets_signal_generate {}; struct trace_event_data_offsets_signal_deliver {}; enum uts_proc { UTS_PROC_ARCH = 0, UTS_PROC_OSTYPE = 1, UTS_PROC_OSRELEASE = 2, UTS_PROC_VERSION = 3, UTS_PROC_HOSTNAME = 4, UTS_PROC_DOMAINNAME = 5, }; struct tms { __kernel_clock_t tms_utime; __kernel_clock_t tms_stime; __kernel_clock_t tms_cutime; __kernel_clock_t tms_cstime; }; struct compat_tms { compat_clock_t tms_utime; compat_clock_t tms_stime; compat_clock_t tms_cutime; compat_clock_t tms_cstime; }; struct compat_rlimit { compat_ulong_t rlim_cur; compat_ulong_t rlim_max; }; struct rlimit64 { __u64 rlim_cur; __u64 rlim_max; }; struct getcpu_cache { unsigned long blob[16]; }; struct sysinfo { __kernel_long_t uptime; __kernel_ulong_t loads[3]; __kernel_ulong_t totalram; __kernel_ulong_t freeram; __kernel_ulong_t sharedram; __kernel_ulong_t bufferram; __kernel_ulong_t totalswap; __kernel_ulong_t freeswap; __u16 procs; __u16 pad; __kernel_ulong_t totalhigh; __kernel_ulong_t freehigh; __u32 mem_unit; char _f[0]; }; struct compat_sysinfo { s32 uptime; u32 loads[3]; u32 totalram; u32 freeram; u32 sharedram; u32 bufferram; u32 totalswap; u32 freeswap; u16 procs; u16 pad; u32 totalhigh; u32 freehigh; u32 mem_unit; char _f[8]; }; struct prctl_mm_map { __u64 start_code; __u64 end_code; __u64 start_data; __u64 end_data; __u64 start_brk; __u64 brk; __u64 start_stack; __u64 arg_start; __u64 arg_end; __u64 env_start; __u64 env_end; __u64 *auxv; __u32 auxv_size; __u32 exe_fd; }; struct wq_flusher; struct worker; struct workqueue_attrs; struct pool_workqueue; struct wq_device; struct workqueue_struct { struct list_head pwqs; struct list_head list; struct mutex mutex; int work_color; int flush_color; atomic_t nr_pwqs_to_flush; struct wq_flusher *first_flusher; struct list_head flusher_queue; struct list_head flusher_overflow; struct list_head maydays; struct worker *rescuer; int nr_drainers; int saved_max_active; struct workqueue_attrs *unbound_attrs; struct pool_workqueue *dfl_pwq; struct wq_device *wq_dev; char name[24]; struct callback_head rcu; long: 64; long: 64; long: 64; unsigned int flags; struct pool_workqueue __attribute__((btf_type_tag("percpu"))) __attribute__((btf_type_tag("rcu"))) **cpu_pwq; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct wq_flusher { struct list_head list; int flush_color; struct completion done; }; struct worker_pool; struct worker { union { struct list_head entry; struct hlist_node hentry; }; struct work_struct *current_work; work_func_t current_func; struct pool_workqueue *current_pwq; u64 current_at; unsigned int current_color; int sleeping; work_func_t last_func; struct list_head scheduled; struct task_struct *task; struct worker_pool *pool; struct list_head node; unsigned long last_active; unsigned int flags; int id; char desc[24]; struct workqueue_struct *rescue_wq; }; struct kthread_work; typedef void (*kthread_work_func_t)(struct kthread_work *); struct kthread_worker; struct kthread_work { struct list_head node; kthread_work_func_t func; struct kthread_worker *worker; int canceling; }; struct pool_workqueue { struct worker_pool *pool; struct workqueue_struct *wq; int work_color; int flush_color; int refcnt; int nr_in_flight[16]; int nr_active; int max_active; struct list_head inactive_works; struct list_head pwqs_node; struct list_head mayday_node; u64 stats[8]; struct kthread_work release_work; struct callback_head rcu; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct ida { struct xarray xa; }; struct worker_pool { raw_spinlock_t lock; int cpu; int node; int id; unsigned int flags; unsigned long watchdog_ts; bool cpu_stall; int nr_running; struct list_head worklist; int nr_workers; int nr_idle; struct list_head idle_list; struct timer_list idle_timer; struct work_struct idle_cull_work; struct timer_list mayday_timer; struct hlist_head busy_hash[64]; struct worker *manager; struct list_head workers; struct list_head dying_workers; struct completion *detach_completion; struct ida worker_ida; struct workqueue_attrs *attrs; struct hlist_node hash_node; int refcnt; struct callback_head rcu; }; enum wq_affn_scope { WQ_AFFN_DFL = 0, WQ_AFFN_CPU = 1, WQ_AFFN_SMT = 2, WQ_AFFN_CACHE = 3, WQ_AFFN_NUMA = 4, WQ_AFFN_SYSTEM = 5, WQ_AFFN_NR_TYPES = 6, }; struct workqueue_attrs { int nice; cpumask_var_t cpumask; cpumask_var_t __pod_cpumask; bool affn_strict; enum wq_affn_scope affn_scope; bool ordered; }; struct kthread_worker { unsigned int flags; raw_spinlock_t lock; struct list_head work_list; struct list_head delayed_work_list; struct task_struct *task; struct kthread_work *current_work; }; struct wq_device { struct workqueue_struct *wq; struct device dev; }; typedef void (*btf_trace_workqueue_queue_work)(void *, int, struct pool_workqueue *, struct work_struct *); typedef void (*btf_trace_workqueue_activate_work)(void *, struct work_struct *); typedef void (*btf_trace_workqueue_execute_start)(void *, struct work_struct *); typedef void (*btf_trace_workqueue_execute_end)(void *, struct work_struct *, work_func_t); struct wq_pod_type { int nr_pods; cpumask_var_t *pod_cpus; int *pod_node; int *cpu_pod; }; enum { POOL_MANAGER_ACTIVE = 1, POOL_DISASSOCIATED = 4, WORKER_DIE = 2, WORKER_IDLE = 4, WORKER_PREP = 8, WORKER_CPU_INTENSIVE = 64, WORKER_UNBOUND = 128, WORKER_REBOUND = 256, WORKER_NOT_RUNNING = 456, NR_STD_WORKER_POOLS = 2, UNBOUND_POOL_HASH_ORDER = 6, BUSY_WORKER_HASH_ORDER = 6, MAX_IDLE_WORKERS_RATIO = 4, IDLE_WORKER_TIMEOUT = 75000, MAYDAY_INITIAL_TIMEOUT = 2, MAYDAY_INTERVAL = 25, CREATE_COOLDOWN = 250, RESCUER_NICE_LEVEL = -20, HIGHPRI_NICE_LEVEL = -20, WQ_NAME_LEN = 24, }; enum pool_workqueue_stats { PWQ_STAT_STARTED = 0, PWQ_STAT_COMPLETED = 1, PWQ_STAT_CPU_TIME = 2, PWQ_STAT_CPU_INTENSIVE = 3, PWQ_STAT_CM_WAKEUP = 4, PWQ_STAT_REPATRIATED = 5, PWQ_STAT_MAYDAY = 6, PWQ_STAT_RESCUED = 7, PWQ_NR_STATS = 8, }; enum { WQ_UNBOUND = 2, WQ_FREEZABLE = 4, WQ_MEM_RECLAIM = 8, WQ_HIGHPRI = 16, WQ_CPU_INTENSIVE = 32, WQ_SYSFS = 64, WQ_POWER_EFFICIENT = 128, __WQ_DESTROYING = 32768, __WQ_DRAINING = 65536, __WQ_ORDERED = 131072, __WQ_LEGACY = 262144, __WQ_ORDERED_EXPLICIT = 524288, WQ_MAX_ACTIVE = 512, WQ_UNBOUND_MAX_ACTIVE = 512, WQ_DFL_ACTIVE = 256, }; enum xa_lock_type { XA_LOCK_IRQ = 1, XA_LOCK_BH = 2, }; struct trace_event_raw_workqueue_queue_work { struct trace_entry ent; void *work; void *function; u32 __data_loc_workqueue; int req_cpu; int cpu; char __data[0]; }; struct trace_event_raw_workqueue_activate_work { struct trace_entry ent; void *work; char __data[0]; }; struct trace_event_raw_workqueue_execute_start { struct trace_entry ent; void *work; void *function; char __data[0]; }; struct trace_event_raw_workqueue_execute_end { struct trace_entry ent; void *work; void *function; char __data[0]; }; struct wq_barrier { struct work_struct work; struct completion done; struct task_struct *task; }; struct cwt_wait { wait_queue_entry_t wait; struct work_struct *work; }; struct __una_u32 { u32 x; }; struct work_for_cpu { struct work_struct work; long (*fn)(void *); void *arg; long ret; }; struct apply_wqattrs_ctx { struct workqueue_struct *wq; struct workqueue_attrs *attrs; struct list_head list; struct pool_workqueue *dfl_pwq; struct pool_workqueue *pwq_tbl[0]; }; struct trace_event_data_offsets_workqueue_queue_work { u32 workqueue; }; struct pr_cont_work_struct { bool comma; work_func_t func; long ctr; }; struct trace_event_data_offsets_workqueue_activate_work {}; struct trace_event_data_offsets_workqueue_execute_start {}; struct trace_event_data_offsets_workqueue_execute_end {}; struct execute_work { struct work_struct work; }; typedef struct {} local_lock_t; enum task_work_notify_mode { TWA_NONE = 0, TWA_RESUME = 1, TWA_SIGNAL = 2, TWA_SIGNAL_NO_IPI = 3, }; typedef void (*task_work_func_t)(struct callback_head *); struct param_attribute { struct module_attribute mattr; const struct kernel_param *param; }; struct module_param_attrs { unsigned int num; struct attribute_group grp; struct param_attribute attrs[0]; }; enum { KERNEL_PARAM_OPS_FL_NOARG = 1, }; enum { KERNEL_PARAM_FL_UNSAFE = 1, KERNEL_PARAM_FL_HWPARAM = 2, }; enum lockdown_reason { LOCKDOWN_NONE = 0, LOCKDOWN_MODULE_SIGNATURE = 1, LOCKDOWN_DEV_MEM = 2, LOCKDOWN_EFI_TEST = 3, LOCKDOWN_KEXEC = 4, LOCKDOWN_HIBERNATION = 5, LOCKDOWN_PCI_ACCESS = 6, LOCKDOWN_IOPORT = 7, LOCKDOWN_MSR = 8, LOCKDOWN_ACPI_TABLES = 9, LOCKDOWN_DEVICE_TREE = 10, LOCKDOWN_PCMCIA_CIS = 11, LOCKDOWN_TIOCSSERIAL = 12, LOCKDOWN_MODULE_PARAMETERS = 13, LOCKDOWN_MMIOTRACE = 14, LOCKDOWN_DEBUGFS = 15, LOCKDOWN_XMON_WR = 16, LOCKDOWN_BPF_WRITE_USER = 17, LOCKDOWN_DBG_WRITE_KERNEL = 18, LOCKDOWN_RTAS_ERROR_INJECTION = 19, LOCKDOWN_INTEGRITY_MAX = 20, LOCKDOWN_KCORE = 21, LOCKDOWN_KPROBES = 22, LOCKDOWN_BPF_READ_KERNEL = 23, LOCKDOWN_DBG_READ_KERNEL = 24, LOCKDOWN_PERF = 25, LOCKDOWN_TRACEFS = 26, LOCKDOWN_XMON_RW = 27, LOCKDOWN_XFRM_SECRET = 28, LOCKDOWN_CONFIDENTIALITY_MAX = 29, }; struct module_version_attribute { struct module_attribute mattr; const char *module_name; const char *version; }; struct kmalloced_param { struct list_head list; char val[0]; }; struct sched_param { int sched_priority; }; enum KTHREAD_BITS { KTHREAD_IS_PER_CPU = 0, KTHREAD_SHOULD_STOP = 1, KTHREAD_SHOULD_PARK = 2, }; enum { KTW_FREEZABLE = 1, }; enum { CSS_NO_REF = 1, CSS_ONLINE = 2, CSS_RELEASED = 4, CSS_VISIBLE = 8, CSS_DYING = 16, }; enum { __PERCPU_REF_ATOMIC = 1, __PERCPU_REF_DEAD = 2, __PERCPU_REF_ATOMIC_DEAD = 3, __PERCPU_REF_FLAG_BITS = 2, }; struct kthread_create_info { char *full_name; int (*threadfn)(void *); void *data; int node; struct task_struct *result; struct completion *done; struct list_head list; }; struct kthread_delayed_work { struct kthread_work work; struct timer_list timer; }; struct kthread_flush_work { struct kthread_work work; struct completion done; }; struct kthread { unsigned long flags; unsigned int cpu; int result; int (*threadfn)(void *); void *data; struct completion parked; struct completion exited; struct cgroup_subsys_state *blkcg_css; char *full_name; }; struct ipc_ids { int in_use; unsigned short seq; struct rw_semaphore rwsem; struct idr ipcs_idr; int max_idx; int last_idx; struct rhashtable key_ht; }; struct ipc_namespace { struct ipc_ids ids[3]; int sem_ctls[4]; int used_sems; unsigned int msg_ctlmax; unsigned int msg_ctlmnb; unsigned int msg_ctlmni; struct percpu_counter percpu_msg_bytes; struct percpu_counter percpu_msg_hdrs; size_t shm_ctlmax; size_t shm_ctlall; unsigned long shm_tot; int shm_ctlmni; int shm_rmid_forced; struct notifier_block ipcns_nb; struct vfsmount *mq_mnt; unsigned int mq_queues_count; unsigned int mq_queues_max; unsigned int mq_msg_max; unsigned int mq_msgsize_max; unsigned int mq_msg_default; unsigned int mq_msgsize_default; struct ctl_table_set mq_set; struct ctl_table_header *mq_sysctls; struct ctl_table_set ipc_set; struct ctl_table_header *ipc_sysctls; struct user_namespace *user_ns; struct ucounts *ucounts; struct llist_node mnt_llist; struct ns_common ns; }; typedef void (*btf_trace_notifier_register)(void *, void *); typedef void (*btf_trace_notifier_unregister)(void *, void *); typedef void (*btf_trace_notifier_run)(void *, void *); struct trace_event_raw_notifier_info { struct trace_entry ent; void *cb; char __data[0]; }; struct trace_event_data_offsets_notifier_info {}; struct srcu_notifier_head { struct mutex mutex; struct srcu_usage srcuu; struct srcu_struct srcu; struct notifier_block __attribute__((btf_type_tag("rcu"))) *head; }; struct die_args { struct pt_regs *regs; const char *str; long err; int trapnr; int signr; }; enum proc_cn_event { PROC_EVENT_NONE = 0, PROC_EVENT_FORK = 1, PROC_EVENT_EXEC = 2, PROC_EVENT_UID = 4, PROC_EVENT_GID = 64, PROC_EVENT_SID = 128, PROC_EVENT_PTRACE = 256, PROC_EVENT_COMM = 512, PROC_EVENT_NONZERO_EXIT = 536870912, PROC_EVENT_COREDUMP = 1073741824, PROC_EVENT_EXIT = 2147483648, }; enum reboot_type { BOOT_TRIPLE = 116, BOOT_KBD = 107, BOOT_BIOS = 98, BOOT_ACPI = 97, BOOT_EFI = 101, BOOT_CF9_FORCE = 112, BOOT_CF9_SAFE = 113, }; enum sys_off_mode { SYS_OFF_MODE_POWER_OFF_PREPARE = 0, SYS_OFF_MODE_POWER_OFF = 1, SYS_OFF_MODE_RESTART_PREPARE = 2, SYS_OFF_MODE_RESTART = 3, }; struct sys_off_data; struct sys_off_handler { struct notifier_block nb; int (*sys_off_cb)(struct sys_off_data *); void *cb_data; enum sys_off_mode mode; bool blocking; void *list; }; struct sys_off_data { int mode; void *cb_data; const char *cmd; }; struct async_entry { struct list_head domain_list; struct list_head global_list; struct work_struct work; async_cookie_t cookie; async_func_t func; void *data; struct async_domain *domain; }; enum { HP_THREAD_NONE = 0, HP_THREAD_ACTIVE = 1, HP_THREAD_PARKED = 2, }; struct smpboot_thread_data { unsigned int cpu; unsigned int status; struct smp_hotplug_thread *ht; }; enum vhost_task_flags { VHOST_TASK_FLAGS_STOP = 0, }; struct vhost_task { bool (*fn)(void *); void *data; struct completion exited; unsigned long flags; struct task_struct *task; }; struct cfs_rq { struct load_weight load; unsigned int nr_running; unsigned int h_nr_running; unsigned int idle_nr_running; unsigned int idle_h_nr_running; s64 avg_vruntime; u64 avg_load; u64 exec_clock; u64 min_vruntime; struct rb_root_cached tasks_timeline; struct sched_entity *curr; struct sched_entity *next; unsigned int nr_spread_over; long: 64; long: 64; long: 64; struct sched_avg avg; struct { raw_spinlock_t lock; int nr; unsigned long load_avg; unsigned long util_avg; unsigned long runnable_avg; long: 64; long: 64; long: 64; long: 64; } removed; unsigned long tg_load_avg_contrib; long propagate; long prop_runnable_sum; unsigned long h_load; u64 last_h_load_update; struct sched_entity *h_load_next; bool decayed; struct rq *rq; int on_list; struct list_head leaf_cfs_rq_list; struct task_group *tg; int idle; long: 64; long: 64; long: 64; }; struct __call_single_data { struct __call_single_node node; smp_call_func_t func; void *info; }; typedef struct __call_single_data call_single_data_t; struct uclamp_bucket { unsigned long value: 11; unsigned long tasks: 53; }; struct uclamp_rq { unsigned int value; struct uclamp_bucket bucket[20]; }; struct rt_prio_array { unsigned long bitmap[2]; struct list_head queue[100]; }; struct rt_rq { struct rt_prio_array active; unsigned int rt_nr_running; unsigned int rr_nr_running; struct { int curr; int next; } highest_prio; unsigned int rt_nr_migratory; unsigned int rt_nr_total; int overloaded; struct plist_head pushable_tasks; int rt_queued; int rt_throttled; u64 rt_time; u64 rt_runtime; raw_spinlock_t rt_runtime_lock; }; struct dl_rq { struct rb_root_cached root; unsigned int dl_nr_running; struct { u64 curr; u64 next; } earliest_dl; unsigned int dl_nr_migratory; int overloaded; struct rb_root_cached pushable_dl_tasks_root; u64 running_bw; u64 this_bw; u64 extra_bw; u64 max_bw; u64 bw_ratio; }; struct cpu_stop_done; struct cpu_stop_work { struct list_head list; cpu_stop_fn_t fn; unsigned long caller; void *arg; struct cpu_stop_done *done; }; enum misfit_reason { MISFIT_PERF = 0, }; typedef enum misfit_reason misfit_reason_t; struct root_domain; struct sched_domain; struct balance_callback; struct cpuidle_state; struct rq { raw_spinlock_t __lock; unsigned int nr_running; unsigned long last_blocked_load_update_tick; unsigned int has_blocked_load; long: 64; call_single_data_t nohz_csd; unsigned int nohz_tick_stopped; atomic_t nohz_flags; unsigned int ttwu_pending; u64 nr_switches; long: 64; long: 64; long: 64; long: 64; long: 64; struct uclamp_rq uclamp[2]; unsigned int uclamp_flags; long: 64; long: 64; long: 64; long: 64; long: 64; struct cfs_rq cfs; struct rt_rq rt; struct dl_rq dl; struct list_head leaf_cfs_rq_list; struct list_head *tmp_alone_branch; unsigned int nr_uninterruptible; struct task_struct __attribute__((btf_type_tag("rcu"))) *curr; struct task_struct *idle; struct task_struct *stop; unsigned long next_balance; struct mm_struct *prev_mm; unsigned int clock_update_flags; u64 clock; long: 64; long: 64; long: 64; long: 64; u64 clock_task; u64 clock_task_mult; u64 clock_pelt; unsigned long lost_idle_time; u64 clock_pelt_idle; u64 clock_idle; atomic_t nr_iowait; u64 last_seen_need_resched_ns; int ticks_without_resched; int membarrier_state; struct root_domain *rd; struct sched_domain __attribute__((btf_type_tag("rcu"))) *sd; unsigned long cpu_capacity; unsigned long cpu_capacity_orig; struct balance_callback *balance_callback; unsigned char nohz_idle_balance; unsigned char idle_balance; unsigned long misfit_task_load; int active_balance; int push_cpu; struct cpu_stop_work active_balance_work; int cpu; int online; struct list_head cfs_tasks; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct sched_avg avg_rt; struct sched_avg avg_dl; struct sched_avg avg_irq; struct sched_avg avg_thermal; u64 idle_stamp; u64 avg_idle; unsigned long wake_stamp; u64 wake_avg_idle; u64 max_idle_balance_cost; struct rcuwait hotplug_wait; u64 prev_irq_time; u64 prev_steal_time; u64 prev_steal_time_rq; unsigned long calc_load_update; long calc_load_active; long: 64; call_single_data_t hrtick_csd; struct hrtimer hrtick_timer; ktime_t hrtick_time; struct sched_info rq_sched_info; unsigned long long rq_cpu_time; unsigned int yld_count; unsigned int sched_count; unsigned int sched_goidle; unsigned int ttwu_count; unsigned int ttwu_local; struct cpuidle_state *idle_state; unsigned int nr_pinned; unsigned int push_busy; struct cpu_stop_work push_work; cpumask_var_t scratch_mask; misfit_reason_t misfit_reason; u64 android_oem_data1[16]; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct dl_bw { raw_spinlock_t lock; u64 bw; u64 total_bw; }; struct cpudl_item; struct cpudl { raw_spinlock_t lock; int size; cpumask_var_t free_cpus; struct cpudl_item *elements; }; struct cpupri_vec { atomic_t count; cpumask_var_t mask; }; struct cpupri { struct cpupri_vec pri_to_cpu[101]; int *cpu_to_pri; }; struct perf_domain; struct root_domain { atomic_t refcount; atomic_t rto_count; struct callback_head rcu; cpumask_var_t span; cpumask_var_t online; int overload; int overutilized; cpumask_var_t dlo_mask; atomic_t dlo_count; struct dl_bw dl_bw; struct cpudl cpudl; u64 visit_gen; struct irq_work rto_push_work; raw_spinlock_t rto_lock; int rto_loop; int rto_cpu; atomic_t rto_loop_next; atomic_t rto_loop_start; cpumask_var_t rto_mask; struct cpupri cpupri; unsigned long max_cpu_capacity; struct perf_domain __attribute__((btf_type_tag("rcu"))) *pd; u64 android_vendor_data1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct cpudl_item { u64 dl; int cpu; int idx; }; struct perf_domain { struct em_perf_domain *em_pd; struct perf_domain *next; struct callback_head rcu; }; struct sched_group; struct sched_domain_shared; struct sched_domain { struct sched_domain __attribute__((btf_type_tag("rcu"))) *parent; struct sched_domain __attribute__((btf_type_tag("rcu"))) *child; struct sched_group *groups; unsigned long min_interval; unsigned long max_interval; unsigned int busy_factor; unsigned int imbalance_pct; unsigned int cache_nice_tries; unsigned int imb_numa_nr; int nohz_idle; int flags; int level; unsigned long last_balance; unsigned int balance_interval; unsigned int nr_balance_failed; u64 max_newidle_lb_cost; unsigned long last_decay_max_lb_cost; u64 avg_scan_cost; unsigned int lb_count[3]; unsigned int lb_failed[3]; unsigned int lb_balanced[3]; unsigned int lb_imbalance[3]; unsigned int lb_gained[3]; unsigned int lb_hot_gained[3]; unsigned int lb_nobusyg[3]; unsigned int lb_nobusyq[3]; unsigned int alb_count; unsigned int alb_failed; unsigned int alb_pushed; unsigned int sbe_count; unsigned int sbe_balanced; unsigned int sbe_pushed; unsigned int sbf_count; unsigned int sbf_balanced; unsigned int sbf_pushed; unsigned int ttwu_wake_remote; unsigned int ttwu_move_affine; unsigned int ttwu_move_balance; char *name; union { void *private; struct callback_head rcu; }; struct sched_domain_shared *shared; unsigned int span_weight; u64 android_kabi_reserved1; u64 android_kabi_reserved2; unsigned long span[0]; }; struct sched_group_capacity; struct sched_group { struct sched_group *next; atomic_t ref; unsigned int group_weight; unsigned int cores; struct sched_group_capacity *sgc; int asym_prefer_cpu; int flags; unsigned long cpumask[0]; }; struct sched_group_capacity { atomic_t ref; unsigned long capacity; unsigned long min_capacity; unsigned long max_capacity; unsigned long next_update; int imbalance; int id; unsigned long cpumask[0]; }; struct sched_domain_shared { atomic_t ref; atomic_t nr_busy_cpus; int has_idle_cores; int nr_idle_scan; }; struct balance_callback { struct balance_callback *next; void (*func)(struct rq *); }; struct cfs_bandwidth {}; struct task_group { struct cgroup_subsys_state css; struct sched_entity **se; struct cfs_rq **cfs_rq; unsigned long shares; int idle; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; atomic_long_t load_avg; struct callback_head rcu; struct list_head list; struct task_group *parent; struct list_head siblings; struct list_head children; struct cfs_bandwidth cfs_bandwidth; unsigned int uclamp_pct[2]; struct uclamp_se uclamp_req[2]; struct uclamp_se uclamp[2]; unsigned int latency_sensitive; u64 android_vendor_data1[4]; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; long: 64; long: 64; }; struct pin_cookie {}; struct rq_flags { unsigned long flags; struct pin_cookie cookie; unsigned int clock_update_flags; }; struct affinity_context { const struct cpumask *new_mask; struct cpumask *user_mask; unsigned int flags; }; struct io_ring_ctx; struct io_wq; struct io_uring_task { int cached_refs; const struct io_ring_ctx *last; struct io_wq *io_wq; struct file *registered_rings[16]; struct xarray xa; struct wait_queue_head wait; atomic_t in_cancel; atomic_t inflight_tracked; struct percpu_counter inflight; long: 64; long: 64; struct { struct llist_head task_list; struct callback_head task_work; long: 64; long: 64; long: 64; long: 64; long: 64; }; }; struct io_fixed_file; struct io_file_table { struct io_fixed_file *files; unsigned long *bitmap; unsigned int alloc_hint; }; struct io_wq_work_node { struct io_wq_work_node *next; }; struct io_wq_work_list { struct io_wq_work_node *first; struct io_wq_work_node *last; }; struct io_kiocb; struct io_submit_link { struct io_kiocb *head; struct io_kiocb *last; }; struct io_submit_state { struct io_wq_work_node free_list; struct io_wq_work_list compl_reqs; struct io_submit_link link; bool plug_started; bool need_plug; unsigned short submit_nr; unsigned int cqes_count; struct blk_plug plug; }; struct io_hash_bucket; struct io_hash_table { struct io_hash_bucket *hbs; unsigned int hash_bits; }; struct io_alloc_cache { struct io_wq_work_node list; unsigned int nr_cached; unsigned int max_cached; size_t elem_size; }; struct io_uring_cqe { __u64 user_data; __s32 res; __u32 flags; __u64 big_cqe[0]; }; struct io_restriction { unsigned long register_op[1]; unsigned long sqe_op[1]; u8 sqe_flags_allowed; u8 sqe_flags_required; bool registered; }; struct io_rings; struct io_rsrc_node; struct io_mapped_ubuf; struct io_ev_fd; struct io_sq_data; struct io_rsrc_data; struct io_wq_hash; struct io_ring_ctx { struct { unsigned int flags; unsigned int drain_next: 1; unsigned int restricted: 1; unsigned int off_timeout_used: 1; unsigned int drain_active: 1; unsigned int has_evfd: 1; unsigned int task_complete: 1; unsigned int lockless_cq: 1; unsigned int syscall_iopoll: 1; unsigned int poll_activated: 1; unsigned int drain_disabled: 1; unsigned int compat: 1; struct task_struct *submitter_task; struct io_rings *rings; struct percpu_ref refs; enum task_work_notify_mode notify_method; long: 64; long: 64; }; struct { struct mutex uring_lock; u32 *sq_array; struct io_uring_sqe *sq_sqes; unsigned int cached_sq_head; unsigned int sq_entries; struct io_rsrc_node *rsrc_node; atomic_t cancel_seq; struct io_file_table file_table; unsigned int nr_user_files; unsigned int nr_user_bufs; struct io_mapped_ubuf **user_bufs; struct io_submit_state submit_state; struct xarray io_bl_xa; struct io_hash_table cancel_table_locked; struct io_alloc_cache apoll_cache; struct io_alloc_cache netmsg_cache; struct io_wq_work_list iopoll_list; bool poll_multi_queue; }; struct { struct io_uring_cqe *cqe_cached; struct io_uring_cqe *cqe_sentinel; unsigned int cached_cq_tail; unsigned int cq_entries; struct io_ev_fd __attribute__((btf_type_tag("rcu"))) *io_ev_fd; unsigned int cq_extra; long: 64; long: 64; long: 64; }; struct { struct llist_head work_llist; unsigned long check_cq; atomic_t cq_wait_nr; atomic_t cq_timeouts; struct wait_queue_head cq_wait; long: 64; long: 64; }; struct { spinlock_t timeout_lock; struct list_head timeout_list; struct list_head ltimeout_list; unsigned int cq_last_tm_flush; long: 64; long: 64; }; struct io_uring_cqe completion_cqes[16]; spinlock_t completion_lock; struct io_wq_work_list locked_free_list; unsigned int locked_free_nr; struct list_head io_buffers_comp; struct list_head cq_overflow_list; struct io_hash_table cancel_table; const struct cred *sq_creds; struct io_sq_data *sq_data; struct wait_queue_head sqo_sq_wait; struct list_head sqd_list; unsigned int file_alloc_start; unsigned int file_alloc_end; struct xarray personalities; u32 pers_next; struct list_head io_buffers_cache; struct hlist_head io_buf_list; struct wait_queue_head poll_wq; struct io_restriction restrictions; struct io_mapped_ubuf *dummy_ubuf; struct io_rsrc_data *file_data; struct io_rsrc_data *buf_data; struct list_head rsrc_ref_list; struct io_alloc_cache rsrc_node_cache; struct wait_queue_head rsrc_quiesce_wq; unsigned int rsrc_quiesce; struct list_head io_buffers_pages; struct io_wq_hash *hash_map; struct user_struct *user; struct mm_struct *mm_account; struct llist_head fallback_llist; struct delayed_work fallback_work; struct work_struct exit_work; struct list_head tctx_list; struct completion ref_comp; u32 iowq_limits[2]; bool iowq_limits_set; struct callback_head poll_wq_task_work; struct list_head defer_list; unsigned int sq_thread_idle; unsigned int evfd_last_cq_tail; unsigned short n_ring_pages; unsigned short n_sqe_pages; struct page **ring_pages; struct page **sqe_pages; long: 64; }; struct io_uring { u32 head; u32 tail; }; struct io_rings { struct io_uring sq; struct io_uring cq; u32 sq_ring_mask; u32 cq_ring_mask; u32 sq_ring_entries; u32 cq_ring_entries; u32 sq_dropped; atomic_t sq_flags; u32 cq_flags; u32 cq_overflow; long: 64; long: 64; struct io_uring_cqe cqes[0]; }; struct io_fixed_file { unsigned long file_ptr; }; struct io_cmd_data { struct file *file; __u8 data[56]; }; struct io_cqe { __u64 user_data; __s32 res; union { __u32 flags; int fd; }; }; struct io_tw_state; typedef void (*io_req_tw_func_t)(struct io_kiocb *, struct io_tw_state *); struct io_task_work { struct llist_node node; io_req_tw_func_t func; }; struct io_wq_work { struct io_wq_work_node list; unsigned int flags; int cancel_seq; }; struct io_buffer; struct io_buffer_list; struct async_poll; struct io_kiocb { union { struct file *file; struct io_cmd_data cmd; }; u8 opcode; u8 iopoll_completed; u16 buf_index; unsigned int flags; struct io_cqe cqe; struct io_ring_ctx *ctx; struct task_struct *task; struct io_rsrc_node *rsrc_node; union { struct io_mapped_ubuf *imu; struct io_buffer *kbuf; struct io_buffer_list *buf_list; }; union { struct io_wq_work_node comp_list; __poll_t apoll_events; }; atomic_t refs; atomic_t poll_refs; struct io_task_work io_task_work; unsigned int nr_tw; struct hlist_node hash_node; struct async_poll *apoll; void *async_data; struct io_kiocb *link; const struct cred *creds; struct io_wq_work work; struct { u64 extra1; u64 extra2; } big_cqe; }; struct io_tw_state { bool locked; }; struct io_hash_bucket { spinlock_t lock; struct hlist_head list; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct io_ev_fd { struct eventfd_ctx *cq_ev_fd; unsigned int eventfd_async: 1; struct callback_head rcu; atomic_t refs; atomic_t ops; }; struct io_wq_hash { refcount_t refs; unsigned long map; struct wait_queue_head wait; }; typedef void (*btf_trace_sched_kthread_stop)(void *, struct task_struct *); typedef void (*btf_trace_sched_kthread_stop_ret)(void *, int); typedef void (*btf_trace_sched_kthread_work_queue_work)(void *, struct kthread_worker *, struct kthread_work *); typedef void (*btf_trace_sched_kthread_work_execute_start)(void *, struct kthread_work *); typedef void (*btf_trace_sched_kthread_work_execute_end)(void *, struct kthread_work *, kthread_work_func_t); typedef void (*btf_trace_sched_waking)(void *, struct task_struct *); typedef void (*btf_trace_sched_wakeup)(void *, struct task_struct *); typedef void (*btf_trace_sched_wakeup_new)(void *, struct task_struct *); typedef void (*btf_trace_sched_switch)(void *, bool, struct task_struct *, struct task_struct *, unsigned int); typedef void (*btf_trace_sched_migrate_task)(void *, struct task_struct *, int); typedef void (*btf_trace_sched_process_free)(void *, struct task_struct *); typedef void (*btf_trace_sched_process_exit)(void *, struct task_struct *); typedef void (*btf_trace_sched_wait_task)(void *, struct task_struct *); typedef void (*btf_trace_sched_process_wait)(void *, struct pid *); typedef void (*btf_trace_sched_process_fork)(void *, struct task_struct *, struct task_struct *); typedef void (*btf_trace_sched_process_exec)(void *, struct task_struct *, pid_t, struct linux_binprm *); typedef void (*btf_trace_sched_stat_wait)(void *, struct task_struct *, u64); typedef void (*btf_trace_sched_stat_sleep)(void *, struct task_struct *, u64); typedef void (*btf_trace_sched_stat_iowait)(void *, struct task_struct *, u64); typedef void (*btf_trace_sched_stat_blocked)(void *, struct task_struct *, u64); typedef void (*btf_trace_sched_blocked_reason)(void *, struct task_struct *); typedef void (*btf_trace_sched_stat_runtime)(void *, struct task_struct *, u64, u64); typedef void (*btf_trace_sched_pi_setprio)(void *, struct task_struct *, struct task_struct *); typedef void (*btf_trace_sched_process_hang)(void *, struct task_struct *); typedef void (*btf_trace_sched_move_numa)(void *, struct task_struct *, int, int); typedef void (*btf_trace_sched_stick_numa)(void *, struct task_struct *, int, struct task_struct *, int); typedef void (*btf_trace_sched_swap_numa)(void *, struct task_struct *, int, struct task_struct *, int); typedef void (*btf_trace_sched_wake_idle_without_ipi)(void *, int); typedef void (*btf_trace_pelt_cfs_tp)(void *, struct cfs_rq *); typedef void (*btf_trace_pelt_rt_tp)(void *, struct rq *); typedef void (*btf_trace_pelt_dl_tp)(void *, struct rq *); typedef void (*btf_trace_pelt_thermal_tp)(void *, struct rq *); typedef void (*btf_trace_pelt_irq_tp)(void *, struct rq *); typedef void (*btf_trace_pelt_se_tp)(void *, struct sched_entity *); typedef void (*btf_trace_sched_cpu_capacity_tp)(void *, struct rq *); typedef void (*btf_trace_sched_overutilized_tp)(void *, struct root_domain *, bool); typedef void (*btf_trace_sched_util_est_cfs_tp)(void *, struct cfs_rq *); typedef void (*btf_trace_sched_util_est_se_tp)(void *, struct sched_entity *); typedef void (*btf_trace_sched_update_nr_running_tp)(void *, struct rq *, int); typedef void (*btf_trace_ipi_raise)(void *, const struct cpumask *, const char *); typedef void (*btf_trace_ipi_send_cpu)(void *, const unsigned int, unsigned long, void *); typedef void (*btf_trace_ipi_send_cpumask)(void *, const struct cpumask *, unsigned long, void *); typedef void (*btf_trace_ipi_entry)(void *, const char *); typedef void (*btf_trace_ipi_exit)(void *, const char *); struct kernel_stat { unsigned long irqs_sum; unsigned int softirqs[10]; }; struct kernel_cpustat { u64 cpustat[10]; }; enum { __SCHED_FEAT_PLACE_LAG = 0, __SCHED_FEAT_PLACE_DEADLINE_INITIAL = 1, __SCHED_FEAT_RUN_TO_PARITY = 2, __SCHED_FEAT_NEXT_BUDDY = 3, __SCHED_FEAT_CACHE_HOT_BUDDY = 4, __SCHED_FEAT_WAKEUP_PREEMPTION = 5, __SCHED_FEAT_HRTICK = 6, __SCHED_FEAT_HRTICK_DL = 7, __SCHED_FEAT_DOUBLE_TICK = 8, __SCHED_FEAT_NONTASK_CAPACITY = 9, __SCHED_FEAT_TTWU_QUEUE = 10, __SCHED_FEAT_SIS_PROP = 11, __SCHED_FEAT_SIS_UTIL = 12, __SCHED_FEAT_WARN_DOUBLE_CLOCK = 13, __SCHED_FEAT_RT_PUSH_IPI = 14, __SCHED_FEAT_RT_RUNTIME_SHARE = 15, __SCHED_FEAT_LB_MIN = 16, __SCHED_FEAT_ATTACH_AGE_LOAD = 17, __SCHED_FEAT_WA_IDLE = 18, __SCHED_FEAT_WA_WEIGHT = 19, __SCHED_FEAT_WA_BIAS = 20, __SCHED_FEAT_UTIL_EST = 21, __SCHED_FEAT_LATENCY_WARN = 22, __SCHED_FEAT_HZ_BW = 23, __SCHED_FEAT_NR = 24, }; enum uclamp_id { UCLAMP_MIN = 0, UCLAMP_MAX = 1, UCLAMP_CNT = 2, }; enum { cpuset = 0, possible = 1, fail = 2, }; enum cgroup_subsys_id { cpuset_cgrp_id = 0, cpu_cgrp_id = 1, cpuacct_cgrp_id = 2, io_cgrp_id = 3, memory_cgrp_id = 4, freezer_cgrp_id = 5, net_prio_cgrp_id = 6, CGROUP_SUBSYS_COUNT = 7, }; enum cpu_util_type { FREQUENCY_UTIL = 0, ENERGY_UTIL = 1, }; enum psi_task_count { NR_IOWAIT = 0, NR_MEMSTALL = 1, NR_RUNNING = 2, NR_MEMSTALL_RUNNING = 3, NR_PSI_TASK_COUNTS = 4, }; enum { CSD_FLAG_LOCK = 1, IRQ_WORK_PENDING = 1, IRQ_WORK_BUSY = 2, IRQ_WORK_LAZY = 4, IRQ_WORK_HARD_IRQ = 8, IRQ_WORK_CLAIMED = 3, CSD_TYPE_ASYNC = 0, CSD_TYPE_SYNC = 16, CSD_TYPE_IRQ_WORK = 32, CSD_TYPE_TTWU = 48, CSD_FLAG_TYPE_MASK = 240, }; enum { MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = 64, MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = 128, }; union cpumask_rcuhead { cpumask_t cpumask; struct callback_head rcu; }; struct trace_event_raw_sched_kthread_stop { struct trace_entry ent; char comm[16]; pid_t pid; char __data[0]; }; struct trace_event_raw_sched_kthread_stop_ret { struct trace_entry ent; int ret; char __data[0]; }; struct trace_event_raw_sched_kthread_work_queue_work { struct trace_entry ent; void *work; void *function; void *worker; char __data[0]; }; struct trace_event_raw_sched_kthread_work_execute_start { struct trace_entry ent; void *work; void *function; char __data[0]; }; struct trace_event_raw_sched_kthread_work_execute_end { struct trace_entry ent; void *work; void *function; char __data[0]; }; struct trace_event_raw_sched_wakeup_template { struct trace_entry ent; char comm[16]; pid_t pid; int prio; int target_cpu; char __data[0]; }; struct trace_event_raw_sched_switch { struct trace_entry ent; char prev_comm[16]; pid_t prev_pid; int prev_prio; long prev_state; char next_comm[16]; pid_t next_pid; int next_prio; char __data[0]; }; struct trace_event_raw_sched_migrate_task { struct trace_entry ent; char comm[16]; pid_t pid; int prio; int orig_cpu; int dest_cpu; char __data[0]; }; struct trace_event_raw_sched_process_template { struct trace_entry ent; char comm[16]; pid_t pid; int prio; char __data[0]; }; struct trace_event_raw_sched_process_wait { struct trace_entry ent; char comm[16]; pid_t pid; int prio; char __data[0]; }; struct trace_event_raw_sched_process_fork { struct trace_entry ent; char parent_comm[16]; pid_t parent_pid; char child_comm[16]; pid_t child_pid; char __data[0]; }; struct trace_event_raw_sched_process_exec { struct trace_entry ent; u32 __data_loc_filename; pid_t pid; pid_t old_pid; char __data[0]; }; struct trace_event_raw_sched_stat_template { struct trace_entry ent; char comm[16]; pid_t pid; u64 delay; char __data[0]; }; struct trace_event_raw_sched_blocked_reason { struct trace_entry ent; pid_t pid; void *caller; bool io_wait; char __data[0]; }; struct trace_event_raw_sched_stat_runtime { struct trace_entry ent; char comm[16]; pid_t pid; u64 runtime; u64 vruntime; char __data[0]; }; struct trace_event_raw_sched_pi_setprio { struct trace_entry ent; char comm[16]; pid_t pid; int oldprio; int newprio; char __data[0]; }; struct trace_event_raw_sched_process_hang { struct trace_entry ent; char comm[16]; pid_t pid; char __data[0]; }; struct trace_event_raw_sched_move_numa { struct trace_entry ent; pid_t pid; pid_t tgid; pid_t ngid; int src_cpu; int src_nid; int dst_cpu; int dst_nid; char __data[0]; }; struct trace_event_raw_sched_numa_pair_template { struct trace_entry ent; pid_t src_pid; pid_t src_tgid; pid_t src_ngid; int src_cpu; int src_nid; pid_t dst_pid; pid_t dst_tgid; pid_t dst_ngid; int dst_cpu; int dst_nid; char __data[0]; }; struct trace_event_raw_sched_wake_idle_without_ipi { struct trace_entry ent; int cpu; char __data[0]; }; struct trace_event_raw_ipi_raise { struct trace_entry ent; u32 __data_loc_target_cpus; const char *reason; char __data[0]; }; struct trace_event_raw_ipi_send_cpu { struct trace_entry ent; unsigned int cpu; void *callsite; void *callback; char __data[0]; }; struct trace_event_raw_ipi_send_cpumask { struct trace_entry ent; u32 __data_loc_cpumask; void *callsite; void *callback; char __data[0]; }; struct trace_event_raw_ipi_handler { struct trace_entry ent; const char *reason; char __data[0]; }; struct irqtime { u64 total; u64 tick_delta; u64 irq_start_time; struct u64_stats_sync sync; }; struct sched_attr { __u32 size; __u32 sched_policy; __u64 sched_flags; __s32 sched_nice; __u32 sched_priority; __u64 sched_runtime; __u64 sched_deadline; __u64 sched_period; __u32 sched_util_min; __u32 sched_util_max; }; struct trace_event_data_offsets_sched_process_exec { u32 filename; }; struct trace_event_data_offsets_ipi_raise { u32 target_cpus; }; struct trace_event_data_offsets_ipi_send_cpumask { u32 cpumask; }; struct wake_q_head { struct wake_q_node *first; struct wake_q_node **lastp; int count; }; typedef struct { void *lock; } class_rcu_t; typedef struct { raw_spinlock_t *lock; raw_spinlock_t *lock2; } class_double_raw_spinlock_t; typedef struct { struct rq *lock; struct rq *lock2; } class_double_rq_lock_t; typedef struct { struct rq *lock; struct rq_flags rf; } class_rq_lock_irqsave_t; typedef struct { void *lock; } class_preempt_t; typedef struct { raw_spinlock_t *lock; unsigned long flags; } class_raw_spinlock_irqsave_t; struct sched_domain_attr { int relax_domain_level; }; struct rt_bandwidth { raw_spinlock_t rt_runtime_lock; ktime_t rt_period; u64 rt_runtime; struct hrtimer rt_period_timer; unsigned int rt_period_active; }; struct set_affinity_pending; struct migration_arg { struct task_struct *task; int dest_cpu; struct set_affinity_pending *pending; }; struct set_affinity_pending { refcount_t refs; unsigned int stop_pending; struct completion done; struct cpu_stop_work stop_work; struct migration_arg arg; }; struct css_task_iter { struct cgroup_subsys *ss; unsigned int flags; struct list_head *cset_pos; struct list_head *cset_head; struct list_head *tcset_pos; struct list_head *tcset_head; struct list_head *task_pos; struct list_head *cur_tasks_head; struct css_set *cur_cset; struct css_set *cur_dcset; struct task_struct *cur_task; struct list_head iters_node; u64 android_kabi_reserved1; }; struct uclamp_request { s64 percent; u64 util; int ret; }; struct trace_event_data_offsets_sched_kthread_stop {}; struct trace_event_data_offsets_sched_kthread_stop_ret {}; struct trace_event_data_offsets_sched_kthread_work_queue_work {}; struct trace_event_data_offsets_sched_kthread_work_execute_start {}; struct trace_event_data_offsets_sched_kthread_work_execute_end {}; struct trace_event_data_offsets_sched_wakeup_template {}; struct trace_event_data_offsets_sched_switch {}; struct trace_event_data_offsets_sched_migrate_task {}; struct trace_event_data_offsets_sched_process_template {}; struct trace_event_data_offsets_sched_process_wait {}; struct trace_event_data_offsets_sched_process_fork {}; struct trace_event_data_offsets_sched_stat_template {}; struct trace_event_data_offsets_sched_blocked_reason {}; struct trace_event_data_offsets_sched_stat_runtime {}; struct trace_event_data_offsets_sched_pi_setprio {}; struct trace_event_data_offsets_sched_process_hang {}; struct trace_event_data_offsets_sched_move_numa {}; struct trace_event_data_offsets_sched_numa_pair_template {}; struct trace_event_data_offsets_sched_wake_idle_without_ipi {}; struct trace_event_data_offsets_ipi_send_cpu {}; struct trace_event_data_offsets_ipi_handler {}; typedef int (*tg_visitor)(struct task_group *, void *); struct migration_swap_arg { struct task_struct *src_task; struct task_struct *dst_task; int src_cpu; int dst_cpu; }; typedef int (*task_call_f)(struct task_struct *, void *); typedef struct mutex *class_mutex_t; struct cpuidle_device; struct cpuidle_driver; struct cpuidle_state { char name[16]; char desc[32]; s64 exit_latency_ns; s64 target_residency_ns; unsigned int flags; unsigned int exit_latency; int power_usage; unsigned int target_residency; int (*enter)(struct cpuidle_device *, struct cpuidle_driver *, int); int (*enter_dead)(struct cpuidle_device *, int); int (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver *, int); }; struct cpuidle_state_usage { unsigned long long disable; unsigned long long usage; u64 time_ns; unsigned long long above; unsigned long long below; unsigned long long rejected; unsigned long long s2idle_usage; unsigned long long s2idle_time; }; struct cpuidle_state_kobj; struct cpuidle_driver_kobj; struct cpuidle_device_kobj; struct cpuidle_device { unsigned int registered: 1; unsigned int enabled: 1; unsigned int poll_time_limit: 1; unsigned int cpu; ktime_t next_hrtimer; int last_state_idx; u64 last_residency_ns; u64 poll_limit_ns; u64 forced_idle_latency_limit_ns; struct cpuidle_state_usage states_usage[10]; struct cpuidle_state_kobj *kobjs[10]; struct cpuidle_driver_kobj *kobj_driver; struct cpuidle_device_kobj *kobj_dev; struct list_head device_list; u64 android_kabi_reserved1; }; struct cpuidle_driver { const char *name; struct module *owner; unsigned int bctimer: 1; struct cpuidle_state states[10]; int state_count; int safe_state_index; struct cpumask *cpumask; const char *governor; u64 android_kabi_reserved1; }; struct rb_augment_callbacks { void (*propagate)(struct rb_node *, struct rb_node *); void (*copy)(struct rb_node *, struct rb_node *); void (*rotate)(struct rb_node *, struct rb_node *); }; enum { SD_BALANCE_NEWIDLE = 1, SD_BALANCE_EXEC = 2, SD_BALANCE_FORK = 4, SD_BALANCE_WAKE = 8, SD_WAKE_AFFINE = 16, SD_ASYM_CPUCAPACITY = 32, SD_ASYM_CPUCAPACITY_FULL = 64, SD_SHARE_CPUCAPACITY = 128, SD_SHARE_PKG_RESOURCES = 256, SD_SERIALIZE = 512, SD_ASYM_PACKING = 1024, SD_PREFER_SIBLING = 2048, SD_OVERLAP = 4096, SD_NUMA = 8192, }; enum sched_tunable_scaling { SCHED_TUNABLESCALING_NONE = 0, SCHED_TUNABLESCALING_LOG = 1, SCHED_TUNABLESCALING_LINEAR = 2, SCHED_TUNABLESCALING_END = 3, }; enum cpu_idle_type { CPU_IDLE = 0, CPU_NOT_IDLE = 1, CPU_NEWLY_IDLE = 2, CPU_MAX_IDLE_TYPES = 3, }; enum fbq_type { regular = 0, remote = 1, all = 2, }; enum migration_type { migrate_load = 0, migrate_util = 1, migrate_task = 2, migrate_misfit = 3, }; enum group_type { group_has_spare = 0, group_fully_busy = 1, group_misfit_task = 2, group_smt_balance = 3, group_asym_packing = 4, group_imbalanced = 5, group_overloaded = 6, }; struct update_util_data { void (*func)(struct update_util_data *, u64, unsigned int); }; struct sched_entity_stats { struct sched_entity se; struct sched_statistics stats; }; struct energy_env { unsigned long task_busy_time; unsigned long pd_busy_time; unsigned long cpu_cap; unsigned long pd_cap; }; struct lb_env { struct sched_domain *sd; struct rq *src_rq; int src_cpu; int dst_cpu; struct rq *dst_rq; struct cpumask *dst_grpmask; int new_dst_cpu; enum cpu_idle_type idle; long imbalance; struct cpumask *cpus; unsigned int flags; unsigned int loop; unsigned int loop_break; unsigned int loop_max; enum fbq_type fbq_type; enum migration_type migration_type; struct list_head tasks; struct rq_flags *src_rq_rf; }; struct sg_lb_stats { unsigned long avg_load; unsigned long group_load; unsigned long group_capacity; unsigned long group_util; unsigned long group_runnable; unsigned int sum_nr_running; unsigned int sum_h_nr_running; unsigned int idle_cpus; unsigned int group_weight; enum group_type group_type; unsigned int group_asym_packing; unsigned int group_smt_balance; unsigned long group_misfit_task_load; misfit_reason_t group_misfit_reason; }; struct sd_lb_stats { struct sched_group *busiest; struct sched_group *local; unsigned long total_load; unsigned long total_capacity; unsigned long avg_load; unsigned int prefer_sibling; struct sg_lb_stats busiest_stat; struct sg_lb_stats local_stat; }; enum cpu_usage_stat { CPUTIME_USER = 0, CPUTIME_NICE = 1, CPUTIME_SYSTEM = 2, CPUTIME_SOFTIRQ = 3, CPUTIME_IRQ = 4, CPUTIME_IDLE = 5, CPUTIME_IOWAIT = 6, CPUTIME_STEAL = 7, CPUTIME_GUEST = 8, CPUTIME_GUEST_NICE = 9, NR_STATS = 10, }; enum dl_bw_request { dl_bw_req_check_overflow = 0, dl_bw_req_alloc = 1, dl_bw_req_free = 2, }; enum s2idle_states { S2IDLE_STATE_NONE = 0, S2IDLE_STATE_ENTER = 1, S2IDLE_STATE_WAKE = 2, }; struct idle_timer { struct hrtimer timer; int done; }; typedef struct rt_rq *rt_rq_iter_t; struct sd_flag_debug { unsigned int meta_flags; char *name; }; typedef const struct cpumask * (*sched_domain_mask_f)(int); typedef int (*sched_domain_flags_f)(); struct sd_data { struct sched_domain * __attribute__((btf_type_tag("percpu"))) *sd; struct sched_domain_shared * __attribute__((btf_type_tag("percpu"))) *sds; struct sched_group * __attribute__((btf_type_tag("percpu"))) *sg; struct sched_group_capacity * __attribute__((btf_type_tag("percpu"))) *sgc; }; struct sched_domain_topology_level { sched_domain_mask_f mask; sched_domain_flags_f sd_flags; int flags; int numa_level; struct sd_data data; char *name; }; struct housekeeping { cpumask_var_t cpumasks[9]; unsigned long flags; }; struct cpuacct { struct cgroup_subsys_state css; u64 __attribute__((btf_type_tag("percpu"))) *cpuusage; struct kernel_cpustat __attribute__((btf_type_tag("percpu"))) *cpustat; }; struct gov_attr_set { struct kobject kobj; struct list_head policy_list; struct mutex update_lock; int usage_count; }; struct sugov_tunables { struct gov_attr_set attr_set; unsigned int rate_limit_us; }; struct governor_attr { struct attribute attr; ssize_t (*show)(struct gov_attr_set *, char *); ssize_t (*store)(struct gov_attr_set *, const char *, size_t); }; struct sugov_policy; struct sugov_cpu { struct update_util_data update_util; struct sugov_policy *sg_policy; unsigned int cpu; bool iowait_boost_pending; unsigned int iowait_boost; u64 last_update; unsigned long util; unsigned long bw_dl; unsigned long saved_idle_calls; }; struct sugov_policy { struct cpufreq_policy *policy; struct sugov_tunables *tunables; struct list_head tunables_hook; raw_spinlock_t update_lock; u64 last_freq_update_time; s64 freq_update_delay_ns; unsigned int next_freq; unsigned int cached_raw_freq; struct irq_work irq_work; struct kthread_work work; struct mutex work_lock; struct kthread_worker worker; struct task_struct *thread; bool work_in_progress; bool limits_changed; bool need_freq_update; }; struct proc_ops { unsigned int proc_flags; int (*proc_open)(struct inode *, struct file *); ssize_t (*proc_read)(struct file *, char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*proc_write)(struct file *, const char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); loff_t (*proc_lseek)(struct file *, loff_t, int); int (*proc_release)(struct inode *, struct file *); __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); long (*proc_ioctl)(struct file *, unsigned int, unsigned long); long (*proc_compat_ioctl)(struct file *, unsigned int, unsigned long); int (*proc_mmap)(struct file *, struct vm_area_struct *); unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); }; enum psi_states { PSI_IO_SOME = 0, PSI_IO_FULL = 1, PSI_MEM_SOME = 2, PSI_MEM_FULL = 3, PSI_CPU_SOME = 4, PSI_CPU_FULL = 5, PSI_IRQ_FULL = 6, PSI_NONIDLE = 7, NR_PSI_STATES = 8, }; enum psi_res { PSI_IO = 0, PSI_MEM = 1, PSI_CPU = 2, PSI_IRQ = 3, NR_PSI_RESOURCES = 4, }; enum psi_aggregators { PSI_AVGS = 0, PSI_POLL = 1, NR_PSI_AGGREGATORS = 2, }; enum hk_flags { HK_FLAG_TIMER = 1, HK_FLAG_RCU = 2, HK_FLAG_MISC = 4, HK_FLAG_SCHED = 8, HK_FLAG_TICK = 16, HK_FLAG_DOMAIN = 32, HK_FLAG_WQ = 64, HK_FLAG_MANAGED_IRQ = 128, HK_FLAG_KTHREAD = 256, }; enum cpuacct_stat_index { CPUACCT_STAT_USER = 0, CPUACCT_STAT_SYSTEM = 1, CPUACCT_STAT_NSTATS = 2, }; enum { __SD_BALANCE_NEWIDLE = 0, __SD_BALANCE_EXEC = 1, __SD_BALANCE_FORK = 2, __SD_BALANCE_WAKE = 3, __SD_WAKE_AFFINE = 4, __SD_ASYM_CPUCAPACITY = 5, __SD_ASYM_CPUCAPACITY_FULL = 6, __SD_SHARE_CPUCAPACITY = 7, __SD_SHARE_PKG_RESOURCES = 8, __SD_SERIALIZE = 9, __SD_ASYM_PACKING = 10, __SD_PREFER_SIBLING = 11, __SD_OVERLAP = 12, __SD_NUMA = 13, __SD_FLAG_CNT = 14, }; enum s_alloc { sa_rootdomain = 0, sa_sd = 1, sa_sd_storage = 2, sa_none = 3, }; enum membarrier_cmd { MEMBARRIER_CMD_QUERY = 0, MEMBARRIER_CMD_GLOBAL = 1, MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2, MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4, MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8, MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16, MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32, MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64, MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = 128, MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = 256, MEMBARRIER_CMD_GET_REGISTRATIONS = 512, MEMBARRIER_CMD_SHARED = 1, }; enum membarrier_cmd_flag { MEMBARRIER_CMD_FLAG_CPU = 1, }; enum { MEMBARRIER_FLAG_SYNC_CORE = 1, MEMBARRIER_FLAG_RSEQ = 2, }; struct swait_queue { struct task_struct *task; struct list_head task_list; }; struct psi_window { u64 size; u64 start_time; u64 start_value; u64 prev_growth; }; struct psi_trigger { enum psi_states state; u64 threshold; struct list_head node; struct psi_group *group; wait_queue_head_t event_wait; struct kernfs_open_file *of; int event; struct psi_window win; u64 last_event_time; bool pending_event; enum psi_aggregators aggregator; }; struct asym_cap_data { struct list_head link; unsigned long capacity; unsigned long cpus[0]; }; struct s_data { struct sched_domain * __attribute__((btf_type_tag("percpu"))) *sd; struct root_domain *rd; }; typedef __kernel_ulong_t ino_t; typedef void (*btf_trace_contention_begin)(void *, void *, unsigned int); typedef void (*btf_trace_contention_end)(void *, void *, int); struct trace_event_raw_contention_begin { struct trace_entry ent; void *lock_addr; unsigned int flags; char __data[0]; }; struct trace_event_raw_contention_end { struct trace_entry ent; void *lock_addr; int ret; char __data[0]; }; struct ww_acquire_ctx; struct mutex_waiter { struct list_head list; struct task_struct *task; struct ww_acquire_ctx *ww_ctx; }; struct ww_acquire_ctx { struct task_struct *task; unsigned long stamp; unsigned int acquired; unsigned short wounded; unsigned short is_wait_die; }; struct ww_mutex { struct mutex base; struct ww_acquire_ctx *ctx; }; struct trace_event_data_offsets_contention_begin {}; struct trace_event_data_offsets_contention_end {}; struct semaphore_waiter { struct list_head list; struct task_struct *task; bool up; }; struct semaphore { raw_spinlock_t lock; unsigned int count; struct list_head wait_list; }; enum rwsem_waiter_type { RWSEM_WAITING_FOR_WRITE = 0, RWSEM_WAITING_FOR_READ = 1, }; enum rwsem_wake_type { RWSEM_WAKE_ANY = 0, RWSEM_WAKE_READERS = 1, RWSEM_WAKE_READ_OWNED = 2, }; enum owner_state { OWNER_NULL = 1, OWNER_WRITER = 2, OWNER_READER = 4, OWNER_NONSPINNABLE = 8, }; struct rwsem_waiter { struct list_head list; struct task_struct *task; enum rwsem_waiter_type type; unsigned long timeout; bool handoff_set; }; struct optimistic_spin_node { struct optimistic_spin_node *next; struct optimistic_spin_node *prev; int locked; int cpu; }; struct mcs_spinlock { struct mcs_spinlock *next; int locked; int count; }; struct qnode { struct mcs_spinlock mcs; }; enum rtmutex_chainwalk { RT_MUTEX_MIN_CHAINWALK = 0, RT_MUTEX_FULL_CHAINWALK = 1, }; struct rt_waiter_node { struct rb_node entry; int prio; u64 deadline; }; struct rt_mutex_base; struct rt_mutex_waiter { struct rt_waiter_node tree; struct rt_waiter_node pi_tree; struct task_struct *task; struct rt_mutex_base *lock; unsigned int wake_state; struct ww_acquire_ctx *ww_ctx; }; struct rt_mutex_base { raw_spinlock_t wait_lock; struct rb_root_cached waiters; struct task_struct *owner; }; struct rt_mutex { struct rt_mutex_base rtmutex; }; struct rt_wake_q_head { struct wake_q_head head; struct task_struct *rtlock_task; }; struct hrtimer_sleeper { struct hrtimer timer; struct task_struct *task; }; enum pm_qos_req_action { PM_QOS_ADD_REQ = 0, PM_QOS_UPDATE_REQ = 1, PM_QOS_REMOVE_REQ = 2, }; struct pm_qos_request { struct plist_node node; struct pm_qos_constraints *qos; }; enum suspend_stat_step { SUSPEND_FREEZE = 1, SUSPEND_PREPARE = 2, SUSPEND_SUSPEND = 3, SUSPEND_SUSPEND_LATE = 4, SUSPEND_SUSPEND_NOIRQ = 5, SUSPEND_RESUME_NOIRQ = 6, SUSPEND_RESUME_EARLY = 7, SUSPEND_RESUME = 8, }; enum { TEST_NONE = 0, TEST_CORE = 1, TEST_CPUS = 2, TEST_PLATFORM = 3, TEST_DEVICES = 4, TEST_FREEZER = 5, __TEST_AFTER_LAST = 6, }; typedef int suspend_state_t; struct platform_suspend_ops { int (*valid)(suspend_state_t); int (*begin)(suspend_state_t); int (*prepare)(); int (*prepare_late)(); int (*enter)(suspend_state_t); void (*wake)(); void (*finish)(); bool (*suspend_again)(); void (*end)(); void (*recover)(); u64 android_kabi_reserved1; }; struct platform_s2idle_ops { int (*begin)(); int (*prepare)(); int (*prepare_late)(); void (*check)(); bool (*wake)(); void (*restore_early)(); void (*restore)(); void (*end)(); u64 android_kabi_reserved1; }; struct wakelock { char *name; struct rb_node node; struct wakeup_source *ws; }; struct sysrq_key_op { void (* const handler)(u8); const char * const help_msg; const char * const action_msg; const int enable_mask; }; enum wakeup_reason_flag { RESUME_NONE = 0, RESUME_IRQ = 1, RESUME_ABORT = 2, RESUME_ABNORMAL = 3, }; struct wakeup_irq_node { struct list_head siblings; int irq; const char *irq_name; }; struct em_data_callback { int (*active_power)(struct device *, unsigned long *, unsigned long *); int (*get_cost)(struct device *, unsigned long, unsigned long *); }; struct em_dbg_info { struct em_perf_domain *pd; int ps_id; }; typedef void (*btf_trace_console)(void *, const char *, size_t); struct prb_desc; struct printk_info; struct prb_desc_ring { unsigned int count_bits; struct prb_desc *descs; struct printk_info *infos; atomic_long_t head_id; atomic_long_t tail_id; atomic_long_t last_finalized_id; }; struct prb_data_ring { unsigned int size_bits; char *data; atomic_long_t head_lpos; atomic_long_t tail_lpos; }; struct printk_ringbuffer { struct prb_desc_ring desc_ring; struct prb_data_ring text_data_ring; atomic_long_t fail; }; struct prb_data_blk_lpos { unsigned long begin; unsigned long next; }; struct prb_desc { atomic_long_t state_var; struct prb_data_blk_lpos text_blk_lpos; }; struct dev_printk_info { char subsystem[16]; char device[48]; }; struct printk_info { u64 seq; u64 ts_nsec; u16 text_len; u8 facility; u8 flags: 5; u8 level: 3; u32 caller_id; struct dev_printk_info dev_info; }; struct console_cmdline { char name[16]; int index; bool user_specified; char *options; }; struct printk_buffers { char outbuf[2048]; char scratchbuf[1024]; }; typedef struct { seqcount_t seqcount; } seqcount_latch_t; struct latched_seq { seqcount_latch_t latch; u64 val[2]; }; enum devkmsg_log_masks { DEVKMSG_LOG_MASK_ON = 1, DEVKMSG_LOG_MASK_OFF = 2, DEVKMSG_LOG_MASK_LOCK = 4, }; enum printk_info_flags { LOG_NEWLINE = 2, LOG_CONT = 8, }; enum cons_flags { CON_PRINTBUFFER = 1, CON_CONSDEV = 2, CON_ENABLED = 4, CON_BOOT = 8, CON_ANYTIME = 16, CON_BRL = 32, CON_EXTENDED = 64, CON_SUSPENDED = 128, }; enum con_msg_format_flags { MSG_FORMAT_DEFAULT = 0, MSG_FORMAT_SYSLOG = 1, }; typedef unsigned int uint; struct console { char name[16]; void (*write)(struct console *, const char *, unsigned int); int (*read)(struct console *, char *, unsigned int); struct tty_driver * (*device)(struct console *, int *); void (*unblank)(); int (*setup)(struct console *, char *); int (*exit)(struct console *); int (*match)(struct console *, char *, int, char *); short flags; short index; int cflag; uint ispeed; uint ospeed; u64 seq; unsigned long dropped; void *data; struct hlist_node node; }; struct kmsg_dumper { struct list_head list; void (*dump)(struct kmsg_dumper *, enum kmsg_dump_reason); enum kmsg_dump_reason max_reason; bool registered; }; struct trace_event_raw_console { struct trace_entry ent; u32 __data_loc_msg; char __data[0]; }; struct trace_event_data_offsets_console { u32 msg; }; struct printk_record { struct printk_info *info; char *text_buf; unsigned int text_buf_size; }; struct prb_reserved_entry { struct printk_ringbuffer *rb; unsigned long irqflags; unsigned long id; unsigned int text_space; }; struct printk_message { struct printk_buffers *pbufs; unsigned int outbuf_len; u64 seq; unsigned long dropped; }; struct devkmsg_user { atomic64_t seq; struct ratelimit_state rs; struct mutex lock; struct printk_buffers pbufs; }; struct kmsg_dump_iter { u64 cur_seq; u64 next_seq; }; enum desc_state { desc_miss = -1, desc_reserved = 0, desc_committed = 1, desc_finalized = 2, desc_reusable = 3, }; struct prb_data_block { unsigned long id; char data[0]; }; enum { IRQS_AUTODETECT = 1, IRQS_SPURIOUS_DISABLED = 2, IRQS_POLL_INPROGRESS = 8, IRQS_ONESHOT = 32, IRQS_REPLAY = 64, IRQS_WAITING = 128, IRQS_PENDING = 512, IRQS_SUSPENDED = 2048, IRQS_TIMINGS = 4096, IRQS_NMI = 8192, IRQS_SYSFS = 16384, }; enum { _IRQ_DEFAULT_INIT_FLAGS = 0, _IRQ_PER_CPU = 512, _IRQ_LEVEL = 256, _IRQ_NOPROBE = 1024, _IRQ_NOREQUEST = 2048, _IRQ_NOTHREAD = 65536, _IRQ_NOAUTOEN = 4096, _IRQ_MOVE_PCNTXT = 16384, _IRQ_NO_BALANCING = 8192, _IRQ_NESTED_THREAD = 32768, _IRQ_PER_CPU_DEVID = 131072, _IRQ_IS_POLLED = 262144, _IRQ_DISABLE_UNLAZY = 524288, _IRQ_HIDDEN = 1048576, _IRQ_NO_DEBUG = 2097152, _IRQF_MODIFY_MASK = 2096911, }; enum { IRQTF_RUNTHREAD = 0, IRQTF_WARNED = 1, IRQTF_AFFINITY = 2, IRQTF_FORCED_THREAD = 3, IRQTF_READY = 4, }; enum { IRQ_SET_MASK_OK = 0, IRQ_SET_MASK_OK_NOCOPY = 1, IRQ_SET_MASK_OK_DONE = 2, }; enum { IRQCHIP_SET_TYPE_MASKED = 1, IRQCHIP_EOI_IF_HANDLED = 2, IRQCHIP_MASK_ON_SUSPEND = 4, IRQCHIP_ONOFFLINE_ENABLED = 8, IRQCHIP_SKIP_SET_WAKE = 16, IRQCHIP_ONESHOT_SAFE = 32, IRQCHIP_EOI_THREADED = 64, IRQCHIP_SUPPORTS_LEVEL_MSI = 128, IRQCHIP_SUPPORTS_NMI = 256, IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = 512, IRQCHIP_AFFINITY_PRE_STARTUP = 1024, IRQCHIP_IMMUTABLE = 2048, }; enum { IRQC_IS_HARDIRQ = 0, IRQC_IS_NESTED = 1, }; enum { IRQ_STARTUP_NORMAL = 0, IRQ_STARTUP_MANAGED = 1, IRQ_STARTUP_ABORT = 2, }; struct irq_devres { unsigned int irq; void *dev_id; }; struct irq_desc_devres { unsigned int from; unsigned int cnt; }; struct irq_generic_chip_devres { struct irq_chip_generic *gc; u32 msk; unsigned int clr; unsigned int set; }; enum { IRQ_DOMAIN_FLAG_HIERARCHY = 1, IRQ_DOMAIN_NAME_ALLOCATED = 2, IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4, IRQ_DOMAIN_FLAG_IPI_SINGLE = 8, IRQ_DOMAIN_FLAG_MSI = 16, IRQ_DOMAIN_FLAG_ISOLATED_MSI = 32, IRQ_DOMAIN_FLAG_NO_MAP = 64, IRQ_DOMAIN_FLAG_MSI_PARENT = 256, IRQ_DOMAIN_FLAG_MSI_DEVICE = 512, IRQ_DOMAIN_FLAG_NONCORE = 65536, }; struct irqchip_fwid { struct fwnode_handle fwnode; unsigned int type; char *name; phys_addr_t *pa; }; enum { AFFINITY = 0, AFFINITY_LIST = 1, EFFECTIVE = 2, EFFECTIVE_LIST = 3, }; enum msi_domain_ids { MSI_DEFAULT_DOMAIN = 0, MSI_SECONDARY_DOMAIN = 1, MSI_MAX_DEVICE_IRQDOMAINS = 2, }; enum msi_desc_filter { MSI_DESC_ALL = 0, MSI_DESC_NOTASSOCIATED = 1, MSI_DESC_ASSOCIATED = 2, }; enum { MSI_FLAG_USE_DEF_DOM_OPS = 1, MSI_FLAG_USE_DEF_CHIP_OPS = 2, MSI_FLAG_ACTIVATE_EARLY = 4, MSI_FLAG_MUST_REACTIVATE = 8, MSI_FLAG_DEV_SYSFS = 16, MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = 32, MSI_FLAG_FREE_MSI_DESCS = 64, MSI_GENERIC_FLAGS_MASK = 65535, MSI_DOMAIN_FLAGS_MASK = 4294901760, MSI_FLAG_MULTI_PCI_MSI = 65536, MSI_FLAG_PCI_MSIX = 131072, MSI_FLAG_LEVEL_CAPABLE = 262144, MSI_FLAG_MSIX_CONTIGUOUS = 524288, MSI_FLAG_PCI_MSIX_ALLOC_DYN = 1048576, MSI_FLAG_PCI_IMS = 2097152, }; struct msi_domain_template { char name[48]; struct irq_chip chip; struct msi_domain_ops ops; struct msi_domain_info info; }; struct xa_limit { u32 max; u32 min; }; struct msi_ctrl { unsigned int domid; unsigned int first; unsigned int last; unsigned int nirqs; }; struct msi_map { int index; int virq; }; struct irq_affinity { unsigned int pre_vectors; unsigned int post_vectors; unsigned int nr_sets; unsigned int set_size[4]; void (*calc_sets)(struct irq_affinity *, unsigned int); void *priv; }; typedef void (*btf_trace_rcu_utilization)(void *, const char *); typedef void (*btf_trace_rcu_grace_period)(void *, const char *, unsigned long, const char *); typedef void (*btf_trace_rcu_future_grace_period)(void *, const char *, unsigned long, unsigned long, u8, int, int, const char *); typedef void (*btf_trace_rcu_grace_period_init)(void *, const char *, unsigned long, u8, int, int, unsigned long); typedef void (*btf_trace_rcu_exp_grace_period)(void *, const char *, unsigned long, const char *); typedef void (*btf_trace_rcu_exp_funnel_lock)(void *, const char *, u8, int, int, const char *); typedef void (*btf_trace_rcu_nocb_wake)(void *, const char *, int, const char *); typedef void (*btf_trace_rcu_preempt_task)(void *, const char *, int, unsigned long); typedef void (*btf_trace_rcu_unlock_preempted_task)(void *, const char *, unsigned long, int); typedef void (*btf_trace_rcu_quiescent_state_report)(void *, const char *, unsigned long, unsigned long, unsigned long, u8, int, int, int); typedef void (*btf_trace_rcu_fqs)(void *, const char *, unsigned long, int, const char *); typedef void (*btf_trace_rcu_stall_warning)(void *, const char *, const char *); typedef void (*btf_trace_rcu_dyntick)(void *, const char *, long, long, int); typedef void (*btf_trace_rcu_callback)(void *, const char *, struct callback_head *, long); typedef void (*btf_trace_rcu_segcb_stats)(void *, struct rcu_segcblist *, const char *); typedef void (*btf_trace_rcu_kvfree_callback)(void *, const char *, struct callback_head *, unsigned long, long); typedef void (*btf_trace_rcu_batch_start)(void *, const char *, long, long); typedef void (*btf_trace_rcu_invoke_callback)(void *, const char *, struct callback_head *); typedef void (*btf_trace_rcu_invoke_kvfree_callback)(void *, const char *, struct callback_head *, unsigned long); typedef void (*btf_trace_rcu_invoke_kfree_bulk_callback)(void *, const char *, unsigned long, void **); typedef void (*btf_trace_rcu_batch_end)(void *, const char *, int, char, char, char, char); typedef void (*btf_trace_rcu_torture_read)(void *, const char *, struct callback_head *, unsigned long, unsigned long, unsigned long); typedef void (*btf_trace_rcu_barrier)(void *, const char *, const char *, int, int, unsigned long); struct rcu_tasks; typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *); typedef void (*pregp_func_t)(struct list_head *); typedef void (*pertask_func_t)(struct task_struct *, struct list_head *); typedef void (*postscan_func_t)(struct list_head *); typedef void (*holdouts_func_t)(struct list_head *, bool, bool *); typedef void (*postgp_func_t)(struct rcu_tasks *); typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t); struct rcu_tasks_percpu; struct rcu_tasks { struct rcuwait cbs_wait; raw_spinlock_t cbs_gbl_lock; struct mutex tasks_gp_mutex; int gp_state; int gp_sleep; int init_fract; unsigned long gp_jiffies; unsigned long gp_start; unsigned long tasks_gp_seq; unsigned long n_ipis; unsigned long n_ipis_fails; struct task_struct *kthread_ptr; unsigned long lazy_jiffies; rcu_tasks_gp_func_t gp_func; pregp_func_t pregp_func; pertask_func_t pertask_func; postscan_func_t postscan_func; holdouts_func_t holdouts_func; postgp_func_t postgp_func; call_rcu_func_t call_func; struct rcu_tasks_percpu __attribute__((btf_type_tag("percpu"))) *rtpcpu; int percpu_enqueue_shift; int percpu_enqueue_lim; int percpu_dequeue_lim; unsigned long percpu_dequeue_gpseq; struct mutex barrier_q_mutex; atomic_t barrier_q_count; struct completion barrier_q_completion; unsigned long barrier_q_seq; char *name; char *kname; }; struct rcu_tasks_percpu { struct rcu_segcblist cblist; raw_spinlock_t lock; unsigned long rtp_jiffies; unsigned long rtp_n_lock_retries; struct timer_list lazy_timer; unsigned int urgent_gp; struct work_struct rtp_work; struct irq_work rtp_irq_work; struct callback_head barrier_q_head; struct list_head rtp_blkd_tasks; int cpu; struct rcu_tasks *rtpp; }; struct rcu_synchronize { struct callback_head head; struct completion completion; }; struct trace_event_raw_rcu_utilization { struct trace_entry ent; const char *s; char __data[0]; }; struct trace_event_raw_rcu_grace_period { struct trace_entry ent; const char *rcuname; long gp_seq; const char *gpevent; char __data[0]; }; struct trace_event_raw_rcu_future_grace_period { struct trace_entry ent; const char *rcuname; long gp_seq; long gp_seq_req; u8 level; int grplo; int grphi; const char *gpevent; char __data[0]; }; struct trace_event_raw_rcu_grace_period_init { struct trace_entry ent; const char *rcuname; long gp_seq; u8 level; int grplo; int grphi; unsigned long qsmask; char __data[0]; }; struct trace_event_raw_rcu_exp_grace_period { struct trace_entry ent; const char *rcuname; long gpseq; const char *gpevent; char __data[0]; }; struct trace_event_raw_rcu_exp_funnel_lock { struct trace_entry ent; const char *rcuname; u8 level; int grplo; int grphi; const char *gpevent; char __data[0]; }; struct trace_event_raw_rcu_nocb_wake { struct trace_entry ent; const char *rcuname; int cpu; const char *reason; char __data[0]; }; struct trace_event_raw_rcu_preempt_task { struct trace_entry ent; const char *rcuname; long gp_seq; int pid; char __data[0]; }; struct trace_event_raw_rcu_unlock_preempted_task { struct trace_entry ent; const char *rcuname; long gp_seq; int pid; char __data[0]; }; struct trace_event_raw_rcu_quiescent_state_report { struct trace_entry ent; const char *rcuname; long gp_seq; unsigned long mask; unsigned long qsmask; u8 level; int grplo; int grphi; u8 gp_tasks; char __data[0]; }; struct trace_event_raw_rcu_fqs { struct trace_entry ent; const char *rcuname; long gp_seq; int cpu; const char *qsevent; char __data[0]; }; struct trace_event_raw_rcu_stall_warning { struct trace_entry ent; const char *rcuname; const char *msg; char __data[0]; }; struct trace_event_raw_rcu_dyntick { struct trace_entry ent; const char *polarity; long oldnesting; long newnesting; int dynticks; char __data[0]; }; struct trace_event_raw_rcu_callback { struct trace_entry ent; const char *rcuname; void *rhp; void *func; long qlen; char __data[0]; }; struct trace_event_raw_rcu_segcb_stats { struct trace_entry ent; const char *ctx; unsigned long gp_seq[4]; long seglen[4]; char __data[0]; }; struct trace_event_raw_rcu_kvfree_callback { struct trace_entry ent; const char *rcuname; void *rhp; unsigned long offset; long qlen; char __data[0]; }; struct trace_event_raw_rcu_batch_start { struct trace_entry ent; const char *rcuname; long qlen; long blimit; char __data[0]; }; struct trace_event_raw_rcu_invoke_callback { struct trace_entry ent; const char *rcuname; void *rhp; void *func; char __data[0]; }; struct trace_event_raw_rcu_invoke_kvfree_callback { struct trace_entry ent; const char *rcuname; void *rhp; unsigned long offset; char __data[0]; }; struct trace_event_raw_rcu_invoke_kfree_bulk_callback { struct trace_entry ent; const char *rcuname; unsigned long nr_records; void **p; char __data[0]; }; struct trace_event_raw_rcu_batch_end { struct trace_entry ent; const char *rcuname; int callbacks_invoked; char cb; char nr; char iit; char risk; char __data[0]; }; struct trace_event_raw_rcu_torture_read { struct trace_entry ent; char rcutorturename[8]; struct callback_head *rhp; unsigned long secs; unsigned long c_old; unsigned long c; char __data[0]; }; struct trace_event_raw_rcu_barrier { struct trace_entry ent; const char *rcuname; const char *s; int cpu; int cnt; unsigned long done; char __data[0]; }; struct rcu_cblist { struct callback_head *head; struct callback_head **tail; long len; }; struct trc_stall_chk_rdr { int nesting; int ipi_to_cpu; u8 needqs; }; struct trace_event_data_offsets_rcu_utilization {}; struct trace_event_data_offsets_rcu_grace_period {}; struct trace_event_data_offsets_rcu_future_grace_period {}; struct trace_event_data_offsets_rcu_grace_period_init {}; struct trace_event_data_offsets_rcu_exp_grace_period {}; struct trace_event_data_offsets_rcu_exp_funnel_lock {}; struct trace_event_data_offsets_rcu_nocb_wake {}; struct trace_event_data_offsets_rcu_preempt_task {}; struct trace_event_data_offsets_rcu_unlock_preempted_task {}; struct trace_event_data_offsets_rcu_quiescent_state_report {}; struct trace_event_data_offsets_rcu_fqs {}; struct trace_event_data_offsets_rcu_stall_warning {}; struct trace_event_data_offsets_rcu_dyntick {}; struct trace_event_data_offsets_rcu_callback {}; struct trace_event_data_offsets_rcu_segcb_stats {}; struct trace_event_data_offsets_rcu_kvfree_callback {}; struct trace_event_data_offsets_rcu_batch_start {}; struct trace_event_data_offsets_rcu_invoke_callback {}; struct trace_event_data_offsets_rcu_invoke_kvfree_callback {}; struct trace_event_data_offsets_rcu_invoke_kfree_bulk_callback {}; struct trace_event_data_offsets_rcu_batch_end {}; struct trace_event_data_offsets_rcu_torture_read {}; struct trace_event_data_offsets_rcu_barrier {}; enum { GP_IDLE = 0, GP_ENTER = 1, GP_PASSED = 2, GP_EXIT = 3, GP_REPLAY = 4, }; typedef unsigned long ulong; enum rcutorture_type { RCU_FLAVOR = 0, RCU_TASKS_FLAVOR = 1, RCU_TASKS_RUDE_FLAVOR = 2, RCU_TASKS_TRACING_FLAVOR = 3, RCU_TRIVIAL_FLAVOR = 4, SRCU_FLAVOR = 5, INVALID_RCU_FLAVOR = 6, }; struct rcu_exp_work { unsigned long rew_s; struct kthread_work rew_work; }; struct rcu_node { raw_spinlock_t lock; unsigned long gp_seq; unsigned long gp_seq_needed; unsigned long completedqs; unsigned long qsmask; unsigned long rcu_gp_init_mask; unsigned long qsmaskinit; unsigned long qsmaskinitnext; unsigned long expmask; unsigned long expmaskinit; unsigned long expmaskinitnext; unsigned long cbovldmask; unsigned long ffmask; unsigned long grpmask; int grplo; int grphi; u8 grpnum; u8 level; bool wait_blkd_tasks; struct rcu_node *parent; struct list_head blkd_tasks; struct list_head *gp_tasks; struct list_head *exp_tasks; struct list_head *boost_tasks; struct rt_mutex boost_mtx; unsigned long boost_time; struct mutex boost_kthread_mutex; struct task_struct *boost_kthread_task; unsigned int boost_kthread_status; unsigned long n_boosts; struct swait_queue_head nocb_gp_wq[2]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; raw_spinlock_t fqslock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t exp_lock; unsigned long exp_seq_rq; wait_queue_head_t exp_wq[4]; struct rcu_exp_work rew; bool exp_need_flush; raw_spinlock_t exp_poll_lock; unsigned long exp_seq_poll_rq; struct work_struct exp_poll_wq; long: 64; long: 64; long: 64; long: 64; }; union rcu_noqs { struct { u8 norm; u8 exp; } b; u16 s; }; struct rcu_snap_record { unsigned long gp_seq; u64 cputime_irq; u64 cputime_softirq; u64 cputime_system; unsigned long nr_hardirqs; unsigned int nr_softirqs; unsigned long long nr_csw; unsigned long jiffies; }; struct rcu_data { unsigned long gp_seq; unsigned long gp_seq_needed; union rcu_noqs cpu_no_qs; bool core_needs_qs; bool beenonline; bool gpwrap; bool cpu_started; struct rcu_node *mynode; unsigned long grpmask; unsigned long ticks_this_gp; struct irq_work defer_qs_iw; bool defer_qs_iw_pending; struct work_struct strict_work; struct rcu_segcblist cblist; long qlen_last_fqs_check; unsigned long n_cbs_invoked; unsigned long n_force_qs_snap; long blimit; int dynticks_snap; bool rcu_need_heavy_qs; bool rcu_urgent_qs; bool rcu_forced_tick; bool rcu_forced_tick_exp; unsigned long barrier_seq_snap; struct callback_head barrier_head; int exp_dynticks_snap; struct swait_queue_head nocb_cb_wq; struct swait_queue_head nocb_state_wq; struct task_struct *nocb_gp_kthread; raw_spinlock_t nocb_lock; atomic_t nocb_lock_contended; int nocb_defer_wakeup; struct timer_list nocb_timer; unsigned long nocb_gp_adv_time; struct mutex nocb_gp_kthread_mutex; raw_spinlock_t nocb_bypass_lock; struct rcu_cblist nocb_bypass; unsigned long nocb_bypass_first; unsigned long nocb_nobypass_last; int nocb_nobypass_count; long: 64; raw_spinlock_t nocb_gp_lock; u8 nocb_gp_sleep; u8 nocb_gp_bypass; u8 nocb_gp_gp; unsigned long nocb_gp_seq; unsigned long nocb_gp_loops; struct swait_queue_head nocb_gp_wq; bool nocb_cb_sleep; struct task_struct *nocb_cb_kthread; struct list_head nocb_head_rdp; struct list_head nocb_entry_rdp; struct rcu_data *nocb_toggling_rdp; long: 64; long: 64; long: 64; struct rcu_data *nocb_gp_rdp; struct task_struct *rcu_cpu_kthread_task; unsigned int rcu_cpu_kthread_status; char rcu_cpu_has_work; unsigned long rcuc_activity; unsigned int softirq_snap; struct irq_work rcu_iw; bool rcu_iw_pending; unsigned long rcu_iw_gp_seq; unsigned long rcu_ofl_gp_seq; short rcu_ofl_gp_flags; unsigned long rcu_onl_gp_seq; short rcu_onl_gp_flags; unsigned long last_fqs_resched; unsigned long last_sched_clock; struct rcu_snap_record snap_record; long lazy_len; int cpu; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct rcu_state { struct rcu_node node[3]; struct rcu_node *level[3]; int ncpus; int n_online_cpus; long: 64; long: 64; long: 64; long: 64; unsigned long gp_seq; unsigned long gp_max; struct task_struct *gp_kthread; struct swait_queue_head gp_wq; short gp_flags; short gp_state; unsigned long gp_wake_time; unsigned long gp_wake_seq; unsigned long gp_seq_polled; unsigned long gp_seq_polled_snap; unsigned long gp_seq_polled_exp_snap; struct mutex barrier_mutex; atomic_t barrier_cpu_count; struct completion barrier_completion; unsigned long barrier_sequence; raw_spinlock_t barrier_lock; struct mutex exp_mutex; struct mutex exp_wake_mutex; unsigned long expedited_sequence; atomic_t expedited_need_qs; struct swait_queue_head expedited_wq; int ncpus_snap; u8 cbovld; u8 cbovldnext; unsigned long jiffies_force_qs; unsigned long jiffies_kick_kthreads; unsigned long n_force_qs; unsigned long gp_start; unsigned long gp_end; unsigned long gp_activity; unsigned long gp_req_activity; unsigned long jiffies_stall; int nr_fqs_jiffies_stall; unsigned long jiffies_resched; unsigned long n_force_qs_gpstart; const char *name; char abbr; long: 0; arch_spinlock_t ofl_lock; int nocb_is_setup; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct rcu_gp_oldstate { unsigned long rgos_norm; unsigned long rgos_exp; }; struct kfree_rcu_cpu; struct kfree_rcu_cpu_work { struct rcu_work rcu_work; struct callback_head *head_free; struct rcu_gp_oldstate head_free_gp_snap; struct list_head bulk_head_free[2]; struct kfree_rcu_cpu *krcp; }; struct kfree_rcu_cpu { struct callback_head *head; unsigned long head_gp_snap; atomic_t head_count; struct list_head bulk_head[2]; atomic_t bulk_count[2]; struct kfree_rcu_cpu_work krw_arr[2]; raw_spinlock_t lock; struct delayed_work monitor_work; bool initialized; struct delayed_work page_cache_work; atomic_t backoff_page_cache_fill; atomic_t work_in_progress; struct hrtimer hrtimer; struct llist_head bkvcache; int nr_bkv_objs; }; enum tick_dep_bits { TICK_DEP_BIT_POSIX_TIMER = 0, TICK_DEP_BIT_PERF_EVENTS = 1, TICK_DEP_BIT_SCHED = 2, TICK_DEP_BIT_CLOCK_UNSTABLE = 3, TICK_DEP_BIT_RCU = 4, TICK_DEP_BIT_RCU_EXP = 5, }; struct context_tracking { atomic_t state; long dynticks_nesting; long dynticks_nmi_nesting; }; struct kvfree_rcu_bulk_data { struct list_head list; struct rcu_gp_oldstate gp_snap; unsigned long nr_records; void *records[0]; }; struct rcu_stall_chk_rdr { int nesting; union rcu_special rs; bool on_blkd_list; }; struct dma_sgt_handle { struct sg_table sgt; struct page **pages; }; struct dma_devres { size_t size; void *vaddr; dma_addr_t dma_handle; unsigned long attrs; }; enum pci_p2pdma_map_type { PCI_P2PDMA_MAP_UNKNOWN = 0, PCI_P2PDMA_MAP_NOT_SUPPORTED = 1, PCI_P2PDMA_MAP_BUS_ADDR = 2, PCI_P2PDMA_MAP_THRU_HOST_BRIDGE = 3, }; typedef struct pglist_data pg_data_t; struct pci_p2pdma_map_state { struct dev_pagemap *pgmap; int map; u64 bus_off; }; struct debugfs_u32_array { u32 *array; u32 n_elements; }; struct cma { unsigned long base_pfn; unsigned long count; unsigned long *bitmap; unsigned int order_per_bit; spinlock_t lock; struct hlist_head mem_head; spinlock_t mem_head_lock; struct debugfs_u32_array dfs_bitmap; char name[64]; bool reserve_pages_on_error; }; struct dma_coherent_mem { void *virt_base; dma_addr_t device_base; unsigned long pfn_base; int size; unsigned long *bitmap; spinlock_t spinlock; bool use_dev_dma_pfn_offset; }; struct io_tlb_area { unsigned long used; unsigned int index; spinlock_t lock; }; struct io_tlb_slot { phys_addr_t orig_addr; size_t alloc_size; unsigned int list; }; typedef void (*btf_trace_swiotlb_bounced)(void *, struct device *, dma_addr_t, size_t); struct trace_event_raw_swiotlb_bounced { struct trace_entry ent; u32 __data_loc_dev_name; u64 dma_mask; dma_addr_t dev_addr; size_t size; bool force; char __data[0]; }; struct trace_event_data_offsets_swiotlb_bounced { u32 dev_name; }; struct gen_pool; typedef unsigned long (*genpool_algo_t)(unsigned long *, unsigned long, unsigned long, unsigned int, void *, struct gen_pool *, unsigned long); struct gen_pool { spinlock_t lock; struct list_head chunks; int min_alloc_order; genpool_algo_t algo; void *data; const char *name; }; typedef void (*btf_trace_module_load)(void *, struct module *); typedef void (*btf_trace_module_free)(void *, struct module *); typedef void (*btf_trace_module_get)(void *, struct module *, unsigned long); typedef void (*btf_trace_module_put)(void *, struct module *, unsigned long); typedef void (*btf_trace_module_request)(void *, char *, bool, unsigned long); struct latch_tree_root { seqcount_latch_t seq; struct rb_root tree[2]; }; struct mod_tree_root { struct latch_tree_root root; unsigned long addr_min; unsigned long addr_max; }; enum mod_license { NOT_GPL_ONLY = 0, GPL_ONLY = 1, }; struct symsearch { const struct kernel_symbol *start; const struct kernel_symbol *stop; const s32 *crcs; enum mod_license license; }; enum kernel_load_data_id { LOADING_UNKNOWN = 0, LOADING_FIRMWARE = 1, LOADING_MODULE = 2, LOADING_KEXEC_IMAGE = 3, LOADING_KEXEC_INITRAMFS = 4, LOADING_POLICY = 5, LOADING_X509_CERTIFICATE = 6, LOADING_MAX_ID = 7, }; enum fail_dup_mod_reason { FAIL_DUP_MOD_BECOMING = 0, FAIL_DUP_MOD_LOAD = 1, }; enum kernel_read_file_id { READING_UNKNOWN = 0, READING_FIRMWARE = 1, READING_MODULE = 2, READING_KEXEC_IMAGE = 3, READING_KEXEC_INITRAMFS = 4, READING_POLICY = 5, READING_X509_CERTIFICATE = 6, READING_MAX_ID = 7, }; struct trace_event_raw_module_load { struct trace_entry ent; unsigned int taints; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_module_free { struct trace_entry ent; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_module_refcnt { struct trace_entry ent; unsigned long ip; int refcnt; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_module_request { struct trace_entry ent; unsigned long ip; bool wait; u32 __data_loc_name; char __data[0]; }; struct module_use { struct list_head source_list; struct list_head target_list; struct module *source; struct module *target; }; struct mod_initfree { struct llist_node node; void *init_text; void *init_data; void *init_rodata; }; struct idempotent { const void *cookie; struct hlist_node entry; struct completion complete; int ret; }; struct trace_event_data_offsets_module_load { u32 name; }; struct trace_event_data_offsets_module_free { u32 name; }; struct trace_event_data_offsets_module_refcnt { u32 name; }; struct trace_event_data_offsets_module_request { u32 name; }; struct find_symbol_arg { const char *name; bool gplok; bool warn; struct module *owner; const s32 *crc; const struct kernel_symbol *sym; enum mod_license license; }; struct load_info { const char *name; struct module *mod; Elf64_Ehdr *hdr; unsigned long len; Elf64_Shdr *sechdrs; char *secstrings; char *strtab; unsigned long symoffs; unsigned long stroffs; unsigned long init_typeoffs; unsigned long core_typeoffs; bool sig_ok; unsigned long mod_kallsyms_init_off; struct { unsigned int sym; unsigned int str; unsigned int mod; unsigned int vers; unsigned int info; unsigned int pcpu; } index; }; enum key_being_used_for { VERIFYING_MODULE_SIGNATURE = 0, VERIFYING_FIRMWARE_SIGNATURE = 1, VERIFYING_KEXEC_PE_SIGNATURE = 2, VERIFYING_KEY_SIGNATURE = 3, VERIFYING_KEY_SELF_SIGNATURE = 4, VERIFYING_UNSPECIFIED_SIGNATURE = 5, NR__KEY_BEING_USED_FOR = 6, }; struct module_signature { u8 algo; u8 hash; u8 id_type; u8 signer_len; u8 key_id_len; u8 __pad[3]; __be32 sig_len; }; struct latch_tree_ops { bool (*less)(struct latch_tree_node *, struct latch_tree_node *); int (*comp)(void *, struct latch_tree_node *); }; struct module_sect_attr { struct bin_attribute battr; unsigned long address; }; struct module_sect_attrs { struct attribute_group grp; unsigned int nsections; struct module_sect_attr attrs[0]; }; struct module_notes_attrs { struct kobject *dir; unsigned int notes; struct bin_attribute attrs[0]; }; struct modversion_info { unsigned long crc; char name[56]; }; enum kcmp_type { KCMP_FILE = 0, KCMP_VM = 1, KCMP_FILES = 2, KCMP_FS = 3, KCMP_SIGHAND = 4, KCMP_IO = 5, KCMP_SYSVSEM = 6, KCMP_EPOLL_TFD = 7, KCMP_TYPES = 8, }; struct kcmp_epoll_slot { __u32 efd; __u32 tfd; __u32 toff; }; struct profile_hit { u32 pc; u32 hits; }; enum profile_type { PROFILE_TASK_EXIT = 0, PROFILE_MUNMAP = 1, }; struct stacktrace_cookie { unsigned long *store; unsigned int size; unsigned int skip; unsigned int len; }; struct timezone { int tz_minuteswest; int tz_dsttime; }; typedef __kernel_long_t __kernel_suseconds_t; typedef __kernel_suseconds_t suseconds_t; typedef __u64 timeu64_t; struct __kernel_timex_timeval { __kernel_time64_t tv_sec; long long tv_usec; }; struct __kernel_timex { unsigned int modes; long long offset; long long freq; long long maxerror; long long esterror; int status; long long constant; long long precision; long long tolerance; struct __kernel_timex_timeval time; long long tick; long long ppsfreq; long long jitter; int shift; long long stabil; long long jitcnt; long long calcnt; long long errcnt; long long stbcnt; int tai; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct old_timex32 { u32 modes; s32 offset; s32 freq; s32 maxerror; s32 esterror; s32 status; s32 constant; s32 precision; s32 tolerance; struct old_timeval32 time; s32 tick; s32 ppsfreq; s32 jitter; s32 shift; s32 stabil; s32 jitcnt; s32 calcnt; s32 errcnt; s32 stbcnt; s32 tai; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct itimerspec64 { struct timespec64 it_interval; struct timespec64 it_value; }; struct __kernel_itimerspec { struct __kernel_timespec it_interval; struct __kernel_timespec it_value; }; struct old_itimerspec32 { struct old_timespec32 it_interval; struct old_timespec32 it_value; }; typedef void (*btf_trace_timer_init)(void *, struct timer_list *); typedef void (*btf_trace_timer_start)(void *, struct timer_list *, unsigned long, unsigned int); typedef void (*btf_trace_timer_expire_entry)(void *, struct timer_list *, unsigned long); typedef void (*btf_trace_timer_expire_exit)(void *, struct timer_list *); typedef void (*btf_trace_timer_cancel)(void *, struct timer_list *); typedef void (*btf_trace_hrtimer_init)(void *, struct hrtimer *, clockid_t, enum hrtimer_mode); typedef void (*btf_trace_hrtimer_start)(void *, struct hrtimer *, enum hrtimer_mode); typedef void (*btf_trace_hrtimer_expire_entry)(void *, struct hrtimer *, ktime_t *); typedef void (*btf_trace_hrtimer_expire_exit)(void *, struct hrtimer *); typedef void (*btf_trace_hrtimer_cancel)(void *, struct hrtimer *); typedef void (*btf_trace_itimer_state)(void *, int, const struct itimerspec64 * const, unsigned long long); typedef void (*btf_trace_itimer_expire)(void *, int, struct pid *, unsigned long long); typedef void (*btf_trace_tick_stop)(void *, int, int); struct timer_base { raw_spinlock_t lock; struct timer_list *running_timer; unsigned long clk; unsigned long next_expiry; unsigned int cpu; bool next_expiry_recalc; bool is_idle; bool timers_pending; unsigned long pending_map[9]; struct hlist_head vectors[576]; long: 64; long: 64; }; struct trace_event_raw_timer_class { struct trace_entry ent; void *timer; char __data[0]; }; struct trace_event_raw_timer_start { struct trace_entry ent; void *timer; void *function; unsigned long expires; unsigned long now; unsigned int flags; char __data[0]; }; struct trace_event_raw_timer_expire_entry { struct trace_entry ent; void *timer; unsigned long now; void *function; unsigned long baseclk; char __data[0]; }; struct trace_event_raw_hrtimer_init { struct trace_entry ent; void *hrtimer; clockid_t clockid; enum hrtimer_mode mode; char __data[0]; }; struct trace_event_raw_hrtimer_start { struct trace_entry ent; void *hrtimer; void *function; s64 expires; s64 softexpires; enum hrtimer_mode mode; char __data[0]; }; struct trace_event_raw_hrtimer_expire_entry { struct trace_entry ent; void *hrtimer; s64 now; void *function; char __data[0]; }; struct trace_event_raw_hrtimer_class { struct trace_entry ent; void *hrtimer; char __data[0]; }; struct trace_event_raw_itimer_state { struct trace_entry ent; int which; unsigned long long expires; long value_sec; long value_nsec; long interval_sec; long interval_nsec; char __data[0]; }; struct trace_event_raw_itimer_expire { struct trace_entry ent; int which; pid_t pid; unsigned long long now; char __data[0]; }; struct trace_event_raw_tick_stop { struct trace_entry ent; int success; int dependency; char __data[0]; }; struct process_timer { struct timer_list timer; struct task_struct *task; }; struct trace_event_data_offsets_timer_class {}; struct trace_event_data_offsets_timer_start {}; struct trace_event_data_offsets_timer_expire_entry {}; struct trace_event_data_offsets_hrtimer_init {}; struct trace_event_data_offsets_hrtimer_start {}; struct trace_event_data_offsets_hrtimer_expire_entry {}; struct trace_event_data_offsets_hrtimer_class {}; struct trace_event_data_offsets_itimer_state {}; struct trace_event_data_offsets_itimer_expire {}; struct trace_event_data_offsets_tick_stop {}; enum hrtimer_base_type { HRTIMER_BASE_MONOTONIC = 0, HRTIMER_BASE_REALTIME = 1, HRTIMER_BASE_BOOTTIME = 2, HRTIMER_BASE_TAI = 3, HRTIMER_BASE_MONOTONIC_SOFT = 4, HRTIMER_BASE_REALTIME_SOFT = 5, HRTIMER_BASE_BOOTTIME_SOFT = 6, HRTIMER_BASE_TAI_SOFT = 7, HRTIMER_MAX_CLOCK_BASES = 8, }; enum clock_event_state { CLOCK_EVT_STATE_DETACHED = 0, CLOCK_EVT_STATE_SHUTDOWN = 1, CLOCK_EVT_STATE_PERIODIC = 2, CLOCK_EVT_STATE_ONESHOT = 3, CLOCK_EVT_STATE_ONESHOT_STOPPED = 4, }; struct clock_event_device { void (*event_handler)(struct clock_event_device *); int (*set_next_event)(unsigned long, struct clock_event_device *); int (*set_next_ktime)(ktime_t, struct clock_event_device *); ktime_t next_event; u64 max_delta_ns; u64 min_delta_ns; u32 mult; u32 shift; enum clock_event_state state_use_accessors; unsigned int features; unsigned long retries; int (*set_state_periodic)(struct clock_event_device *); int (*set_state_oneshot)(struct clock_event_device *); int (*set_state_oneshot_stopped)(struct clock_event_device *); int (*set_state_shutdown)(struct clock_event_device *); int (*tick_resume)(struct clock_event_device *); void (*broadcast)(const struct cpumask *); void (*suspend)(struct clock_event_device *); void (*resume)(struct clock_event_device *); unsigned long min_delta_ticks; unsigned long max_delta_ticks; const char *name; int rating; int irq; int bound_on; const struct cpumask *cpumask; struct list_head list; struct module *owner; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct clocksource; struct tk_read_base { struct clocksource *clock; u64 mask; u64 cycle_last; u32 mult; u32 shift; u64 xtime_nsec; ktime_t base; u64 base_real; }; struct tk_fast { seqcount_latch_t seq; struct tk_read_base base[2]; }; enum vdso_clock_mode { VDSO_CLOCKMODE_NONE = 0, VDSO_CLOCKMODE_ARCHTIMER = 1, VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT = 2, VDSO_CLOCKMODE_MAX = 3, VDSO_CLOCKMODE_TIMENS = 2147483647, }; struct clocksource { u64 (*read)(struct clocksource *); u64 mask; u32 mult; u32 shift; u64 max_idle_ns; u32 maxadj; u32 uncertainty_margin; u64 max_cycles; const char *name; struct list_head list; int rating; enum clocksource_ids id; enum vdso_clock_mode vdso_clock_mode; unsigned long flags; int (*enable)(struct clocksource *); void (*disable)(struct clocksource *); void (*suspend)(struct clocksource *); void (*resume)(struct clocksource *); void (*mark_unstable)(struct clocksource *); void (*tick_stable)(struct clocksource *); struct module *owner; }; struct timekeeper { struct tk_read_base tkr_mono; struct tk_read_base tkr_raw; u64 xtime_sec; unsigned long ktime_sec; struct timespec64 wall_to_monotonic; ktime_t offs_real; ktime_t offs_boot; ktime_t offs_tai; s32 tai_offset; unsigned int clock_was_set_seq; u8 cs_was_changed_seq; ktime_t next_leap_ktime; u64 raw_sec; struct timespec64 monotonic_to_boot; u64 cycle_interval; u64 xtime_interval; s64 xtime_remainder; u64 raw_interval; u64 ntp_tick; s64 ntp_error; u32 ntp_error_shift; u32 ntp_err_mult; u32 skip_second_overflow; }; enum timekeeping_adv_mode { TK_ADV_TICK = 0, TK_ADV_FREQ = 1, }; struct system_device_crosststamp { ktime_t device; ktime_t sys_realtime; ktime_t sys_monoraw; }; struct audit_ntp_val { long long oldval; long long newval; }; struct audit_ntp_data { struct audit_ntp_val vals[6]; }; struct ktime_timestamps { u64 mono; u64 boot; u64 real; }; struct system_counterval_t { u64 cycles; struct clocksource *cs; }; enum audit_ntp_type { AUDIT_NTP_OFFSET = 0, AUDIT_NTP_FREQ = 1, AUDIT_NTP_STATUS = 2, AUDIT_NTP_TAI = 3, AUDIT_NTP_TICK = 4, AUDIT_NTP_ADJUST = 5, AUDIT_NTP_NVALS = 6, }; struct rtc_device; struct rtc_timer { struct timerqueue_node node; ktime_t period; void (*func)(struct rtc_device *); struct rtc_device *rtc; int enabled; }; struct rtc_class_ops; struct rtc_device { struct device dev; struct module *owner; int id; const struct rtc_class_ops *ops; struct mutex ops_lock; struct cdev char_dev; unsigned long flags; unsigned long irq_data; spinlock_t irq_lock; wait_queue_head_t irq_queue; struct fasync_struct *async_queue; int irq_freq; int max_user_freq; struct timerqueue_head timerqueue; struct rtc_timer aie_timer; struct rtc_timer uie_rtctimer; struct hrtimer pie_timer; int pie_enabled; struct work_struct irqwork; unsigned long set_offset_nsec; unsigned long features[1]; time64_t range_min; timeu64_t range_max; timeu64_t alarm_offset_max; time64_t start_secs; time64_t offset_secs; bool set_start_time; u64 android_kabi_reserved1; }; struct rtc_time; struct rtc_wkalrm; struct rtc_param; struct rtc_class_ops { int (*ioctl)(struct device *, unsigned int, unsigned long); int (*read_time)(struct device *, struct rtc_time *); int (*set_time)(struct device *, struct rtc_time *); int (*read_alarm)(struct device *, struct rtc_wkalrm *); int (*set_alarm)(struct device *, struct rtc_wkalrm *); int (*proc)(struct device *, struct seq_file *); int (*alarm_irq_enable)(struct device *, unsigned int); int (*read_offset)(struct device *, long *); int (*set_offset)(struct device *, long); int (*param_get)(struct device *, struct rtc_param *); int (*param_set)(struct device *, struct rtc_param *); u64 android_kabi_reserved1; }; struct rtc_time { int tm_sec; int tm_min; int tm_hour; int tm_mday; int tm_mon; int tm_year; int tm_wday; int tm_yday; int tm_isdst; }; struct rtc_wkalrm { unsigned char enabled; unsigned char pending; struct rtc_time time; }; struct rtc_param { __u64 param; union { __u64 uvalue; __s64 svalue; __u64 ptr; }; __u32 index; __u32 __pad; }; enum tick_device_mode { TICKDEV_MODE_PERIODIC = 0, TICKDEV_MODE_ONESHOT = 1, }; enum tick_nohz_mode { NOHZ_MODE_INACTIVE = 0, NOHZ_MODE_LOWRES = 1, NOHZ_MODE_HIGHRES = 2, }; struct tick_device { struct clock_event_device *evtdev; enum tick_device_mode mode; }; struct tick_sched { unsigned int inidle: 1; unsigned int tick_stopped: 1; unsigned int idle_active: 1; unsigned int do_timer_last: 1; unsigned int got_idle_tick: 1; unsigned int stalled_jiffies; unsigned long last_tick_jiffies; struct hrtimer sched_timer; ktime_t last_tick; ktime_t next_tick; unsigned long idle_jiffies; ktime_t idle_waketime; seqcount_t idle_sleeptime_seq; ktime_t idle_entrytime; enum tick_nohz_mode nohz_mode; unsigned long last_jiffies; u64 timer_expires_base; u64 timer_expires; u64 next_timer; ktime_t idle_expires; unsigned long idle_calls; unsigned long idle_sleeps; ktime_t idle_exittime; ktime_t idle_sleeptime; ktime_t iowait_sleeptime; atomic_t tick_dep_mask; unsigned long check_clocks; }; struct timer_list_iter { int cpu; bool second_pass; u64 now; }; struct tm { int tm_sec; int tm_min; int tm_hour; int tm_mday; int tm_mon; long tm_year; int tm_wday; int tm_yday; }; typedef void (*btf_trace_alarmtimer_suspend)(void *, ktime_t, int); struct alarm; typedef void (*btf_trace_alarmtimer_fired)(void *, struct alarm *, ktime_t); enum alarmtimer_restart { ALARMTIMER_NORESTART = 0, ALARMTIMER_RESTART = 1, }; enum alarmtimer_type { ALARM_REALTIME = 0, ALARM_BOOTTIME = 1, ALARM_NUMTYPE = 2, ALARM_REALTIME_FREEZER = 3, ALARM_BOOTTIME_FREEZER = 4, }; struct alarm { struct timerqueue_node node; struct hrtimer timer; enum alarmtimer_restart (*function)(struct alarm *, ktime_t); enum alarmtimer_type type; int state; void *data; }; typedef void (*btf_trace_alarmtimer_start)(void *, struct alarm *, ktime_t); typedef void (*btf_trace_alarmtimer_cancel)(void *, struct alarm *, ktime_t); struct k_itimer; struct k_clock { int (*clock_getres)(const clockid_t, struct timespec64 *); int (*clock_set)(const clockid_t, const struct timespec64 *); int (*clock_get_timespec)(const clockid_t, struct timespec64 *); ktime_t (*clock_get_ktime)(const clockid_t); int (*clock_adj)(const clockid_t, struct __kernel_timex *); int (*timer_create)(struct k_itimer *); int (*nsleep)(const clockid_t, int, const struct timespec64 *); int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *); int (*timer_del)(struct k_itimer *); void (*timer_get)(struct k_itimer *, struct itimerspec64 *); void (*timer_rearm)(struct k_itimer *); s64 (*timer_forward)(struct k_itimer *, ktime_t); ktime_t (*timer_remaining)(struct k_itimer *, ktime_t); int (*timer_try_to_cancel)(struct k_itimer *); void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool); void (*timer_wait_running)(struct k_itimer *); }; struct cpu_timer { struct timerqueue_node node; struct timerqueue_head *head; struct pid *pid; struct list_head elist; int firing; struct task_struct __attribute__((btf_type_tag("rcu"))) *handling; }; typedef __kernel_timer_t timer_t; struct k_itimer { struct list_head list; struct hlist_node t_hash; spinlock_t it_lock; const struct k_clock *kclock; clockid_t it_clock; timer_t it_id; int it_active; s64 it_overrun; s64 it_overrun_last; int it_requeue_pending; int it_sigev_notify; ktime_t it_interval; struct signal_struct *it_signal; union { struct pid *it_pid; struct task_struct *it_process; }; struct sigqueue *sigq; union { struct { struct hrtimer timer; } real; struct cpu_timer cpu; struct { struct alarm alarmtimer; } alarm; } it; struct callback_head rcu; }; struct alarm_base { spinlock_t lock; struct timerqueue_head timerqueue; ktime_t (*get_ktime)(); void (*get_timespec)(struct timespec64 *); clockid_t base_clockid; }; struct class_interface { struct list_head node; const struct class *class; int (*add_dev)(struct device *); void (*remove_dev)(struct device *); }; struct platform_driver { int (*probe)(struct platform_device *); int (*remove)(struct platform_device *); void (*remove_new)(struct platform_device *); void (*shutdown)(struct platform_device *); int (*suspend)(struct platform_device *, pm_message_t); int (*resume)(struct platform_device *); struct device_driver driver; const struct platform_device_id *id_table; bool prevent_deferred_probe; bool driver_managed_dma; u64 android_kabi_reserved1; }; struct trace_event_raw_alarmtimer_suspend { struct trace_entry ent; s64 expires; unsigned char alarm_type; char __data[0]; }; struct trace_event_raw_alarm_class { struct trace_entry ent; void *alarm; unsigned char alarm_type; s64 expires; s64 now; char __data[0]; }; struct property_entry; struct platform_device_info { struct device *parent; struct fwnode_handle *fwnode; bool of_node_reused; const char *name; int id; const struct resource *res; unsigned int num_res; const void *data; size_t size_data; u64 dma_mask; const struct property_entry *properties; u64 android_kabi_reserved1; }; enum dev_prop_type { DEV_PROP_U8 = 0, DEV_PROP_U16 = 1, DEV_PROP_U32 = 2, DEV_PROP_U64 = 3, DEV_PROP_STRING = 4, DEV_PROP_REF = 5, }; struct property_entry { const char *name; size_t length; bool is_inline; enum dev_prop_type type; union { const void *pointer; union { u8 u8_data[8]; u16 u16_data[4]; u32 u32_data[2]; u64 u64_data[1]; const char *str[1]; } value; }; }; struct trace_event_data_offsets_alarmtimer_suspend {}; struct trace_event_data_offsets_alarm_class {}; struct sigevent { sigval_t sigev_value; int sigev_signo; int sigev_notify; union { int _pad[12]; int _tid; struct { void (*_function)(sigval_t); void *_attribute; } _sigev_thread; } _sigev_un; }; struct compat_sigevent { compat_sigval_t sigev_value; compat_int_t sigev_signo; compat_int_t sigev_notify; union { compat_int_t _pad[13]; compat_int_t _tid; struct { compat_uptr_t _function; compat_uptr_t _attribute; } _sigev_thread; } _sigev_un; }; typedef struct sigevent sigevent_t; struct posix_clock; struct posix_clock_operations { struct module *owner; int (*clock_adjtime)(struct posix_clock *, struct __kernel_timex *); int (*clock_gettime)(struct posix_clock *, struct timespec64 *); int (*clock_getres)(struct posix_clock *, struct timespec64 *); int (*clock_settime)(struct posix_clock *, const struct timespec64 *); long (*ioctl)(struct posix_clock *, unsigned int, unsigned long); int (*open)(struct posix_clock *, fmode_t); __poll_t (*poll)(struct posix_clock *, struct file *, poll_table *); int (*release)(struct posix_clock *); ssize_t (*read)(struct posix_clock *, uint, char __attribute__((btf_type_tag("user"))) *, size_t); }; struct posix_clock { struct posix_clock_operations ops; struct cdev cdev; struct device *dev; struct rw_semaphore rwsem; bool zombie; }; struct posix_clock_desc { struct file *fp; struct posix_clock *clk; }; struct __kernel_old_itimerval { struct __kernel_old_timeval it_interval; struct __kernel_old_timeval it_value; }; struct old_itimerval32 { struct old_timeval32 it_interval; struct old_timeval32 it_value; }; typedef s64 int64_t; struct ce_unbind { struct clock_event_device *ce; int res; }; enum tick_broadcast_state { TICK_BROADCAST_EXIT = 0, TICK_BROADCAST_ENTER = 1, }; enum tick_broadcast_mode { TICK_BROADCAST_OFF = 0, TICK_BROADCAST_ON = 1, TICK_BROADCAST_FORCE = 2, }; struct clock_read_data { u64 epoch_ns; u64 epoch_cyc; u64 sched_clock_mask; u64 (*read_sched_clock)(); u32 mult; u32 shift; }; struct clock_data { seqcount_latch_t seq; struct clock_read_data read_data[2]; ktime_t wrap_kt; unsigned long rate; u64 (*actual_read_sched_clock)(); }; struct proc_timens_offset { int clockid; struct timespec64 val; }; struct futex_hash_bucket { atomic_t waiters; spinlock_t lock; struct plist_head chain; long: 64; long: 64; long: 64; long: 64; long: 64; }; enum futex_access { FUTEX_READ = 0, FUTEX_WRITE = 1, }; union futex_key { struct { u64 i_seq; unsigned long pgoff; unsigned int offset; } shared; struct { union { struct mm_struct *mm; u64 __tmp; }; unsigned long address; unsigned int offset; } private; struct { u64 ptr; unsigned long word; unsigned int offset; } both; }; struct futex_pi_state { struct list_head list; struct rt_mutex_base pi_mutex; struct task_struct *owner; refcount_t refcount; union futex_key key; }; struct futex_q { struct plist_node list; struct task_struct *task; spinlock_t *lock_ptr; union futex_key key; struct futex_pi_state *pi_state; struct rt_mutex_waiter *rt_waiter; union futex_key *requeue_pi_key; u32 bitset; atomic_t requeue_state; }; struct futex_waitv { __u64 val; __u64 uaddr; __u32 flags; __u32 __reserved; }; struct futex_vector { struct futex_waitv w; struct futex_q q; }; enum { Q_REQUEUE_PI_NONE = 0, Q_REQUEUE_PI_IGNORE = 1, Q_REQUEUE_PI_IN_PROGRESS = 2, Q_REQUEUE_PI_WAIT = 3, Q_REQUEUE_PI_DONE = 4, Q_REQUEUE_PI_LOCKED = 5, }; typedef void (*btf_trace_csd_queue_cpu)(void *, const unsigned int, unsigned long, smp_call_func_t, struct __call_single_data *); typedef void (*btf_trace_csd_function_entry)(void *, smp_call_func_t, struct __call_single_data *); typedef void (*btf_trace_csd_function_exit)(void *, smp_call_func_t, struct __call_single_data *); struct call_function_data { call_single_data_t __attribute__((btf_type_tag("percpu"))) *csd; cpumask_var_t cpumask; cpumask_var_t cpumask_ipi; }; struct trace_event_raw_csd_queue_cpu { struct trace_entry ent; unsigned int cpu; void *callsite; void *func; void *csd; char __data[0]; }; struct trace_event_raw_csd_function { struct trace_entry ent; void *func; void *csd; char __data[0]; }; struct smp_call_on_cpu_struct { struct work_struct work; struct completion done; int (*func)(void *); void *data; int ret; int cpu; }; struct trace_event_data_offsets_csd_queue_cpu {}; struct trace_event_data_offsets_csd_function {}; typedef unsigned short __kernel_old_uid_t; typedef __kernel_old_uid_t old_uid_t; typedef unsigned short __kernel_old_gid_t; typedef __kernel_old_gid_t old_gid_t; enum pkey_id_type { PKEY_ID_PGP = 0, PKEY_ID_X509 = 1, PKEY_ID_PKCS7 = 2, }; union bpf_iter_link_info; typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *, union bpf_iter_link_info *, struct bpf_iter_aux_info *); typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *); typedef void (*bpf_iter_show_fdinfo_t)(const struct bpf_iter_aux_info *, struct seq_file *); typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *, struct bpf_link_info *); enum bpf_func_id { BPF_FUNC_unspec = 0, BPF_FUNC_map_lookup_elem = 1, BPF_FUNC_map_update_elem = 2, BPF_FUNC_map_delete_elem = 3, BPF_FUNC_probe_read = 4, BPF_FUNC_ktime_get_ns = 5, BPF_FUNC_trace_printk = 6, BPF_FUNC_get_prandom_u32 = 7, BPF_FUNC_get_smp_processor_id = 8, BPF_FUNC_skb_store_bytes = 9, BPF_FUNC_l3_csum_replace = 10, BPF_FUNC_l4_csum_replace = 11, BPF_FUNC_tail_call = 12, BPF_FUNC_clone_redirect = 13, BPF_FUNC_get_current_pid_tgid = 14, BPF_FUNC_get_current_uid_gid = 15, BPF_FUNC_get_current_comm = 16, BPF_FUNC_get_cgroup_classid = 17, BPF_FUNC_skb_vlan_push = 18, BPF_FUNC_skb_vlan_pop = 19, BPF_FUNC_skb_get_tunnel_key = 20, BPF_FUNC_skb_set_tunnel_key = 21, BPF_FUNC_perf_event_read = 22, BPF_FUNC_redirect = 23, BPF_FUNC_get_route_realm = 24, BPF_FUNC_perf_event_output = 25, BPF_FUNC_skb_load_bytes = 26, BPF_FUNC_get_stackid = 27, BPF_FUNC_csum_diff = 28, BPF_FUNC_skb_get_tunnel_opt = 29, BPF_FUNC_skb_set_tunnel_opt = 30, BPF_FUNC_skb_change_proto = 31, BPF_FUNC_skb_change_type = 32, BPF_FUNC_skb_under_cgroup = 33, BPF_FUNC_get_hash_recalc = 34, BPF_FUNC_get_current_task = 35, BPF_FUNC_probe_write_user = 36, BPF_FUNC_current_task_under_cgroup = 37, BPF_FUNC_skb_change_tail = 38, BPF_FUNC_skb_pull_data = 39, BPF_FUNC_csum_update = 40, BPF_FUNC_set_hash_invalid = 41, BPF_FUNC_get_numa_node_id = 42, BPF_FUNC_skb_change_head = 43, BPF_FUNC_xdp_adjust_head = 44, BPF_FUNC_probe_read_str = 45, BPF_FUNC_get_socket_cookie = 46, BPF_FUNC_get_socket_uid = 47, BPF_FUNC_set_hash = 48, BPF_FUNC_setsockopt = 49, BPF_FUNC_skb_adjust_room = 50, BPF_FUNC_redirect_map = 51, BPF_FUNC_sk_redirect_map = 52, BPF_FUNC_sock_map_update = 53, BPF_FUNC_xdp_adjust_meta = 54, BPF_FUNC_perf_event_read_value = 55, BPF_FUNC_perf_prog_read_value = 56, BPF_FUNC_getsockopt = 57, BPF_FUNC_override_return = 58, BPF_FUNC_sock_ops_cb_flags_set = 59, BPF_FUNC_msg_redirect_map = 60, BPF_FUNC_msg_apply_bytes = 61, BPF_FUNC_msg_cork_bytes = 62, BPF_FUNC_msg_pull_data = 63, BPF_FUNC_bind = 64, BPF_FUNC_xdp_adjust_tail = 65, BPF_FUNC_skb_get_xfrm_state = 66, BPF_FUNC_get_stack = 67, BPF_FUNC_skb_load_bytes_relative = 68, BPF_FUNC_fib_lookup = 69, BPF_FUNC_sock_hash_update = 70, BPF_FUNC_msg_redirect_hash = 71, BPF_FUNC_sk_redirect_hash = 72, BPF_FUNC_lwt_push_encap = 73, BPF_FUNC_lwt_seg6_store_bytes = 74, BPF_FUNC_lwt_seg6_adjust_srh = 75, BPF_FUNC_lwt_seg6_action = 76, BPF_FUNC_rc_repeat = 77, BPF_FUNC_rc_keydown = 78, BPF_FUNC_skb_cgroup_id = 79, BPF_FUNC_get_current_cgroup_id = 80, BPF_FUNC_get_local_storage = 81, BPF_FUNC_sk_select_reuseport = 82, BPF_FUNC_skb_ancestor_cgroup_id = 83, BPF_FUNC_sk_lookup_tcp = 84, BPF_FUNC_sk_lookup_udp = 85, BPF_FUNC_sk_release = 86, BPF_FUNC_map_push_elem = 87, BPF_FUNC_map_pop_elem = 88, BPF_FUNC_map_peek_elem = 89, BPF_FUNC_msg_push_data = 90, BPF_FUNC_msg_pop_data = 91, BPF_FUNC_rc_pointer_rel = 92, BPF_FUNC_spin_lock = 93, BPF_FUNC_spin_unlock = 94, BPF_FUNC_sk_fullsock = 95, BPF_FUNC_tcp_sock = 96, BPF_FUNC_skb_ecn_set_ce = 97, BPF_FUNC_get_listener_sock = 98, BPF_FUNC_skc_lookup_tcp = 99, BPF_FUNC_tcp_check_syncookie = 100, BPF_FUNC_sysctl_get_name = 101, BPF_FUNC_sysctl_get_current_value = 102, BPF_FUNC_sysctl_get_new_value = 103, BPF_FUNC_sysctl_set_new_value = 104, BPF_FUNC_strtol = 105, BPF_FUNC_strtoul = 106, BPF_FUNC_sk_storage_get = 107, BPF_FUNC_sk_storage_delete = 108, BPF_FUNC_send_signal = 109, BPF_FUNC_tcp_gen_syncookie = 110, BPF_FUNC_skb_output = 111, BPF_FUNC_probe_read_user = 112, BPF_FUNC_probe_read_kernel = 113, BPF_FUNC_probe_read_user_str = 114, BPF_FUNC_probe_read_kernel_str = 115, BPF_FUNC_tcp_send_ack = 116, BPF_FUNC_send_signal_thread = 117, BPF_FUNC_jiffies64 = 118, BPF_FUNC_read_branch_records = 119, BPF_FUNC_get_ns_current_pid_tgid = 120, BPF_FUNC_xdp_output = 121, BPF_FUNC_get_netns_cookie = 122, BPF_FUNC_get_current_ancestor_cgroup_id = 123, BPF_FUNC_sk_assign = 124, BPF_FUNC_ktime_get_boot_ns = 125, BPF_FUNC_seq_printf = 126, BPF_FUNC_seq_write = 127, BPF_FUNC_sk_cgroup_id = 128, BPF_FUNC_sk_ancestor_cgroup_id = 129, BPF_FUNC_ringbuf_output = 130, BPF_FUNC_ringbuf_reserve = 131, BPF_FUNC_ringbuf_submit = 132, BPF_FUNC_ringbuf_discard = 133, BPF_FUNC_ringbuf_query = 134, BPF_FUNC_csum_level = 135, BPF_FUNC_skc_to_tcp6_sock = 136, BPF_FUNC_skc_to_tcp_sock = 137, BPF_FUNC_skc_to_tcp_timewait_sock = 138, BPF_FUNC_skc_to_tcp_request_sock = 139, BPF_FUNC_skc_to_udp6_sock = 140, BPF_FUNC_get_task_stack = 141, BPF_FUNC_load_hdr_opt = 142, BPF_FUNC_store_hdr_opt = 143, BPF_FUNC_reserve_hdr_opt = 144, BPF_FUNC_inode_storage_get = 145, BPF_FUNC_inode_storage_delete = 146, BPF_FUNC_d_path = 147, BPF_FUNC_copy_from_user = 148, BPF_FUNC_snprintf_btf = 149, BPF_FUNC_seq_printf_btf = 150, BPF_FUNC_skb_cgroup_classid = 151, BPF_FUNC_redirect_neigh = 152, BPF_FUNC_per_cpu_ptr = 153, BPF_FUNC_this_cpu_ptr = 154, BPF_FUNC_redirect_peer = 155, BPF_FUNC_task_storage_get = 156, BPF_FUNC_task_storage_delete = 157, BPF_FUNC_get_current_task_btf = 158, BPF_FUNC_bprm_opts_set = 159, BPF_FUNC_ktime_get_coarse_ns = 160, BPF_FUNC_ima_inode_hash = 161, BPF_FUNC_sock_from_file = 162, BPF_FUNC_check_mtu = 163, BPF_FUNC_for_each_map_elem = 164, BPF_FUNC_snprintf = 165, BPF_FUNC_sys_bpf = 166, BPF_FUNC_btf_find_by_name_kind = 167, BPF_FUNC_sys_close = 168, BPF_FUNC_timer_init = 169, BPF_FUNC_timer_set_callback = 170, BPF_FUNC_timer_start = 171, BPF_FUNC_timer_cancel = 172, BPF_FUNC_get_func_ip = 173, BPF_FUNC_get_attach_cookie = 174, BPF_FUNC_task_pt_regs = 175, BPF_FUNC_get_branch_snapshot = 176, BPF_FUNC_trace_vprintk = 177, BPF_FUNC_skc_to_unix_sock = 178, BPF_FUNC_kallsyms_lookup_name = 179, BPF_FUNC_find_vma = 180, BPF_FUNC_loop = 181, BPF_FUNC_strncmp = 182, BPF_FUNC_get_func_arg = 183, BPF_FUNC_get_func_ret = 184, BPF_FUNC_get_func_arg_cnt = 185, BPF_FUNC_get_retval = 186, BPF_FUNC_set_retval = 187, BPF_FUNC_xdp_get_buff_len = 188, BPF_FUNC_xdp_load_bytes = 189, BPF_FUNC_xdp_store_bytes = 190, BPF_FUNC_copy_from_user_task = 191, BPF_FUNC_skb_set_tstamp = 192, BPF_FUNC_ima_file_hash = 193, BPF_FUNC_kptr_xchg = 194, BPF_FUNC_map_lookup_percpu_elem = 195, BPF_FUNC_skc_to_mptcp_sock = 196, BPF_FUNC_dynptr_from_mem = 197, BPF_FUNC_ringbuf_reserve_dynptr = 198, BPF_FUNC_ringbuf_submit_dynptr = 199, BPF_FUNC_ringbuf_discard_dynptr = 200, BPF_FUNC_dynptr_read = 201, BPF_FUNC_dynptr_write = 202, BPF_FUNC_dynptr_data = 203, BPF_FUNC_tcp_raw_gen_syncookie_ipv4 = 204, BPF_FUNC_tcp_raw_gen_syncookie_ipv6 = 205, BPF_FUNC_tcp_raw_check_syncookie_ipv4 = 206, BPF_FUNC_tcp_raw_check_syncookie_ipv6 = 207, BPF_FUNC_ktime_get_tai_ns = 208, BPF_FUNC_user_ringbuf_drain = 209, BPF_FUNC_cgrp_storage_get = 210, BPF_FUNC_cgrp_storage_delete = 211, __BPF_FUNC_MAX_ID = 212, }; struct bpf_func_proto; typedef const struct bpf_func_proto * (*bpf_iter_get_func_proto_t)(enum bpf_func_id, const struct bpf_prog *); struct bpf_iter_reg { const char *target; bpf_iter_attach_target_t attach_target; bpf_iter_detach_target_t detach_target; bpf_iter_show_fdinfo_t show_fdinfo; bpf_iter_fill_link_info_t fill_link_info; bpf_iter_get_func_proto_t get_func_proto; u32 ctx_arg_info_size; u32 feature; struct bpf_ctx_arg_aux ctx_arg_info[2]; const struct bpf_iter_seq_info *seq_info; }; union bpf_iter_link_info { struct { __u32 map_fd; } map; struct { enum bpf_cgroup_iter_order order; __u32 cgroup_fd; __u64 cgroup_id; } cgroup; struct { __u32 tid; __u32 pid; __u32 pid_fd; } task; }; enum bpf_return_type { RET_INTEGER = 0, RET_VOID = 1, RET_PTR_TO_MAP_VALUE = 2, RET_PTR_TO_SOCKET = 3, RET_PTR_TO_TCP_SOCK = 4, RET_PTR_TO_SOCK_COMMON = 5, RET_PTR_TO_MEM = 6, RET_PTR_TO_MEM_OR_BTF_ID = 7, RET_PTR_TO_BTF_ID = 8, __BPF_RET_TYPE_MAX = 9, RET_PTR_TO_MAP_VALUE_OR_NULL = 258, RET_PTR_TO_SOCKET_OR_NULL = 259, RET_PTR_TO_TCP_SOCK_OR_NULL = 260, RET_PTR_TO_SOCK_COMMON_OR_NULL = 261, RET_PTR_TO_RINGBUF_MEM_OR_NULL = 1286, RET_PTR_TO_DYNPTR_MEM_OR_NULL = 262, RET_PTR_TO_BTF_ID_OR_NULL = 264, RET_PTR_TO_BTF_ID_TRUSTED = 1048584, __BPF_RET_TYPE_LIMIT = 33554431, }; enum bpf_arg_type { ARG_DONTCARE = 0, ARG_CONST_MAP_PTR = 1, ARG_PTR_TO_MAP_KEY = 2, ARG_PTR_TO_MAP_VALUE = 3, ARG_PTR_TO_MEM = 4, ARG_CONST_SIZE = 5, ARG_CONST_SIZE_OR_ZERO = 6, ARG_PTR_TO_CTX = 7, ARG_ANYTHING = 8, ARG_PTR_TO_SPIN_LOCK = 9, ARG_PTR_TO_SOCK_COMMON = 10, ARG_PTR_TO_INT = 11, ARG_PTR_TO_LONG = 12, ARG_PTR_TO_SOCKET = 13, ARG_PTR_TO_BTF_ID = 14, ARG_PTR_TO_RINGBUF_MEM = 15, ARG_CONST_ALLOC_SIZE_OR_ZERO = 16, ARG_PTR_TO_BTF_ID_SOCK_COMMON = 17, ARG_PTR_TO_PERCPU_BTF_ID = 18, ARG_PTR_TO_FUNC = 19, ARG_PTR_TO_STACK = 20, ARG_PTR_TO_CONST_STR = 21, ARG_PTR_TO_TIMER = 22, ARG_PTR_TO_KPTR = 23, ARG_PTR_TO_DYNPTR = 24, __BPF_ARG_TYPE_MAX = 25, ARG_PTR_TO_MAP_VALUE_OR_NULL = 259, ARG_PTR_TO_MEM_OR_NULL = 260, ARG_PTR_TO_CTX_OR_NULL = 263, ARG_PTR_TO_SOCKET_OR_NULL = 269, ARG_PTR_TO_STACK_OR_NULL = 276, ARG_PTR_TO_BTF_ID_OR_NULL = 270, ARG_PTR_TO_UNINIT_MEM = 32772, ARG_PTR_TO_FIXED_SIZE_MEM = 262148, __BPF_ARG_TYPE_LIMIT = 33554431, }; struct bpf_func_proto { u64 (*func)(u64, u64, u64, u64, u64); bool gpl_only; bool pkt_access; bool might_sleep; enum bpf_return_type ret_type; union { struct { enum bpf_arg_type arg1_type; enum bpf_arg_type arg2_type; enum bpf_arg_type arg3_type; enum bpf_arg_type arg4_type; enum bpf_arg_type arg5_type; }; enum bpf_arg_type arg_type[5]; }; union { struct { u32 *arg1_btf_id; u32 *arg2_btf_id; u32 *arg3_btf_id; u32 *arg4_btf_id; u32 *arg5_btf_id; }; u32 *arg_btf_id[5]; struct { size_t arg1_size; size_t arg2_size; size_t arg3_size; size_t arg4_size; size_t arg5_size; }; size_t arg_size[5]; }; int *ret_btf_id; bool (*allowed)(const struct bpf_prog *); }; struct kallsym_iter { loff_t pos; loff_t pos_mod_end; loff_t pos_ftrace_mod_end; loff_t pos_bpf_end; unsigned long value; unsigned int nameoff; char type; char name[512]; char module_name[56]; int exported; int show_value; }; struct bpf_iter_meta; struct bpf_iter__ksym { union { struct bpf_iter_meta *meta; }; union { struct kallsym_iter *ksym; }; }; struct bpf_iter_meta { union { struct seq_file *seq; }; u64 session_id; u64 seq_num; }; struct cgroup_taskset { struct list_head src_csets; struct list_head dst_csets; int nr_tasks; int ssid; struct list_head *csets; struct css_set *cur_cset; struct task_struct *cur_task; }; struct bpf_cgroup_storage_key { __u64 cgroup_inode_id; __u32 attach_type; }; struct bpf_storage_buffer; struct bpf_cgroup_storage_map; struct bpf_cgroup_storage { union { struct bpf_storage_buffer *buf; void __attribute__((btf_type_tag("percpu"))) *percpu_buf; }; struct bpf_cgroup_storage_map *map; struct bpf_cgroup_storage_key key; struct list_head list_map; struct list_head list_cg; struct rb_node node; struct callback_head rcu; }; struct bpf_storage_buffer { struct callback_head rcu; char data[0]; }; typedef void (*btf_trace_cgroup_setup_root)(void *, struct cgroup_root *); typedef void (*btf_trace_cgroup_destroy_root)(void *, struct cgroup_root *); typedef void (*btf_trace_cgroup_remount)(void *, struct cgroup_root *); typedef void (*btf_trace_cgroup_mkdir)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_rmdir)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_release)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_rename)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_freeze)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_unfreeze)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_attach_task)(void *, struct cgroup *, const char *, struct task_struct *, bool); typedef void (*btf_trace_cgroup_transfer_tasks)(void *, struct cgroup *, const char *, struct task_struct *, bool); typedef void (*btf_trace_cgroup_notify_populated)(void *, struct cgroup *, const char *, int); typedef void (*btf_trace_cgroup_notify_frozen)(void *, struct cgroup *, const char *, int); struct kernfs_fs_context { struct kernfs_root *root; void *ns_tag; unsigned long magic; bool new_sb_created; }; struct cgroup_fs_context { struct kernfs_fs_context kfc; struct cgroup_root *root; struct cgroup_namespace *ns; unsigned int flags; bool cpuset_clone_children; bool none; bool all_ss; u16 subsys_mask; char *name; char *release_agent; }; struct kernfs_syscall_ops { int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; enum { CGRP_ROOT_NOPREFIX = 2, CGRP_ROOT_XATTR = 4, CGRP_ROOT_NS_DELEGATE = 8, CGRP_ROOT_FAVOR_DYNMODS = 16, CGRP_ROOT_CPUSET_V2_MODE = 65536, CGRP_ROOT_MEMORY_LOCAL_EVENTS = 131072, CGRP_ROOT_MEMORY_RECURSIVE_PROT = 262144, }; enum kernfs_node_type { KERNFS_DIR = 1, KERNFS_FILE = 2, KERNFS_LINK = 4, }; enum { CGRP_NOTIFY_ON_RELEASE = 0, CGRP_CPUSET_CLONE_CHILDREN = 1, CGRP_FREEZE = 2, CGRP_FROZEN = 3, CGRP_KILL = 4, }; enum kernfs_root_flag { KERNFS_ROOT_CREATE_DEACTIVATED = 1, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2, KERNFS_ROOT_SUPPORT_EXPORTOP = 4, KERNFS_ROOT_SUPPORT_USER_XATTR = 8, }; enum cgroup_opt_features { OPT_FEATURE_PRESSURE = 0, OPT_FEATURE_COUNT = 1, }; enum { CFTYPE_ONLY_ON_ROOT = 1, CFTYPE_NOT_ON_ROOT = 2, CFTYPE_NS_DELEGATABLE = 4, CFTYPE_NO_PREFIX = 8, CFTYPE_WORLD_WRITABLE = 16, CFTYPE_DEBUG = 32, __CFTYPE_ONLY_ON_DFL = 65536, __CFTYPE_NOT_ON_DFL = 131072, __CFTYPE_ADDED = 262144, }; enum cgroup2_param { Opt_nsdelegate = 0, Opt_favordynmods = 1, Opt_memory_localevents = 2, Opt_memory_recursiveprot = 3, nr__cgroup2_params = 4, }; struct cgrp_cset_link { struct cgroup *cgrp; struct css_set *cset; struct list_head cset_link; struct list_head cgrp_link; }; struct trace_event_raw_cgroup_root { struct trace_entry ent; int root; u16 ss_mask; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_cgroup { struct trace_entry ent; int root; int level; u64 id; u32 __data_loc_path; char __data[0]; }; struct trace_event_raw_cgroup_migrate { struct trace_entry ent; int dst_root; int dst_level; u64 dst_id; int pid; u32 __data_loc_dst_path; u32 __data_loc_comm; char __data[0]; }; struct trace_event_raw_cgroup_event { struct trace_entry ent; int root; int level; u64 id; u32 __data_loc_path; int val; char __data[0]; }; struct trace_event_data_offsets_cgroup_root { u32 name; }; struct trace_event_data_offsets_cgroup { u32 path; }; struct trace_event_data_offsets_cgroup_event { u32 path; }; struct cgroup_mgctx { struct list_head preloaded_src_csets; struct list_head preloaded_dst_csets; struct cgroup_taskset tset; u16 ss_mask; }; struct cgroup_pidlist; struct cgroup_file_ctx { struct cgroup_namespace *ns; struct { void *trigger; } psi; struct { bool started; struct css_task_iter iter; } procs; struct { struct cgroup_pidlist *pidlist; } procs1; }; struct trace_event_data_offsets_cgroup_migrate { u32 dst_path; u32 comm; }; typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *, u32); struct btf_id_set8; struct btf_kfunc_id_set { struct module *owner; struct btf_id_set8 *set; btf_kfunc_filter_t filter; }; struct btf_id_set8 { u32 cnt; u32 flags; struct { u32 id; u32 flags; } pairs[0]; }; enum cgroup_filetype { CGROUP_FILE_PROCS = 0, CGROUP_FILE_TASKS = 1, }; enum cgroup1_param { Opt_all = 0, Opt_clone_children = 1, Opt_cpuset_v2_mode = 2, Opt_name = 3, Opt_none = 4, Opt_noprefix = 5, Opt_release_agent = 6, Opt_xattr = 7, Opt_favordynmods___2 = 8, Opt_nofavordynmods = 9, }; struct cgroup_pidlist { struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; pid_t *list; int length; struct list_head links; struct cgroup *owner; struct delayed_work destroy_dwork; }; struct cgroupstats { __u64 nr_sleeping; __u64 nr_running; __u64 nr_stopped; __u64 nr_uninterruptible; __u64 nr_io_wait; }; enum freezer_state_flags { CGROUP_FREEZER_ONLINE = 1, CGROUP_FREEZING_SELF = 2, CGROUP_FREEZING_PARENT = 4, CGROUP_FROZEN = 8, CGROUP_FREEZING = 6, }; struct freezer { struct cgroup_subsys_state css; unsigned int state; }; struct fmeter { int cnt; int val; time64_t time; spinlock_t lock; }; enum prs_errcode { PERR_NONE = 0, PERR_INVCPUS = 1, PERR_INVPARENT = 2, PERR_NOTPART = 3, PERR_NOTEXCL = 4, PERR_NOCPUS = 5, PERR_HOTPLUG = 6, PERR_CPUSEMPTY = 7, }; struct cpuset { struct cgroup_subsys_state css; unsigned long flags; cpumask_var_t cpus_allowed; cpumask_var_t cpus_requested; nodemask_t mems_allowed; cpumask_var_t effective_cpus; nodemask_t effective_mems; cpumask_var_t subparts_cpus; nodemask_t old_mems_allowed; struct fmeter fmeter; int attach_in_progress; int pn; int relax_domain_level; int nr_subparts_cpus; int partition_root_state; int use_parent_ecpus; int child_ecpus_count; int nr_deadline_tasks; int nr_migrate_dl_tasks; u64 sum_migrate_dl_bw; enum prs_errcode prs_err; struct cgroup_file partition_file; }; enum subparts_cmd { partcmd_enable = 0, partcmd_disable = 1, partcmd_update = 2, partcmd_invalidate = 3, }; enum { ZONELIST_FALLBACK = 0, MAX_ZONELISTS = 1, }; struct cpuset_migrate_mm_work { struct work_struct work; struct mm_struct *mm; nodemask_t from; nodemask_t to; }; struct tmpmasks { cpumask_var_t addmask; cpumask_var_t delmask; cpumask_var_t new_cpus; }; typedef enum { CS_ONLINE = 0, CS_CPU_EXCLUSIVE = 1, CS_MEM_EXCLUSIVE = 2, CS_MEM_HARDWALL = 3, CS_MEMORY_MIGRATE = 4, CS_SCHED_LOAD_BALANCE = 5, CS_SPREAD_PAGE = 6, CS_SPREAD_SLAB = 7, } cpuset_flagbits_t; typedef enum { FILE_MEMORY_MIGRATE = 0, FILE_CPULIST = 1, FILE_MEMLIST = 2, FILE_EFFECTIVE_CPULIST = 3, FILE_EFFECTIVE_MEMLIST = 4, FILE_SUBPARTS_CPULIST = 5, FILE_CPU_EXCLUSIVE = 6, FILE_MEM_EXCLUSIVE = 7, FILE_MEM_HARDWALL = 8, FILE_SCHED_LOAD_BALANCE = 9, FILE_PARTITION_ROOT = 10, FILE_SCHED_RELAX_DOMAIN_LEVEL = 11, FILE_MEMORY_PRESSURE_ENABLED = 12, FILE_MEMORY_PRESSURE = 13, FILE_SPREAD_PAGE = 14, FILE_SPREAD_SLAB = 15, } cpuset_filetype_t; struct cpu_stopper { struct task_struct *thread; raw_spinlock_t lock; bool enabled; struct list_head works; struct cpu_stop_work stop_work; unsigned long caller; cpu_stop_fn_t fn; }; struct cpu_stop_done { atomic_t nr_todo; int ret; struct completion completion; }; enum multi_stop_state { MULTI_STOP_NONE = 0, MULTI_STOP_PREPARE = 1, MULTI_STOP_DISABLE_IRQ = 2, MULTI_STOP_RUN = 3, MULTI_STOP_EXIT = 4, }; struct multi_stop_data { cpu_stop_fn_t fn; void *data; unsigned int num_threads; const struct cpumask *active_cpus; enum multi_stop_state state; atomic_t thread_ack; }; struct auditd_connection { struct pid *pid; u32 portid; struct net *net; struct callback_head rcu; }; typedef int __kernel_mqd_t; typedef __kernel_mqd_t mqd_t; struct mq_attr { __kernel_long_t mq_flags; __kernel_long_t mq_maxmsg; __kernel_long_t mq_msgsize; __kernel_long_t mq_curmsgs; __kernel_long_t __reserved[4]; }; struct audit_cap_data { kernel_cap_t permitted; kernel_cap_t inheritable; union { unsigned int fE; kernel_cap_t effective; }; kernel_cap_t ambient; kuid_t rootid; }; struct open_how { __u64 flags; __u64 mode; __u64 resolve; }; enum audit_state { AUDIT_STATE_DISABLED = 0, AUDIT_STATE_BUILD = 1, AUDIT_STATE_RECORD = 2, }; struct audit_names { struct list_head list; struct filename *name; int name_len; bool hidden; unsigned long ino; dev_t dev; umode_t mode; kuid_t uid; kgid_t gid; dev_t rdev; u32 osid; struct audit_cap_data fcap; unsigned int fcap_ver; unsigned char type; bool should_free; }; struct audit_proctitle { int len; char *value; }; struct audit_aux_data; struct __kernel_sockaddr_storage; struct audit_tree_refs; struct audit_context { int dummy; enum { AUDIT_CTX_UNUSED = 0, AUDIT_CTX_SYSCALL = 1, AUDIT_CTX_URING = 2, } context; enum audit_state state; enum audit_state current_state; unsigned int serial; int major; int uring_op; struct timespec64 ctime; unsigned long argv[4]; long return_code; u64 prio; int return_valid; struct audit_names preallocated_names[5]; int name_count; struct list_head names_list; char *filterkey; struct path pwd; struct audit_aux_data *aux; struct audit_aux_data *aux_pids; struct __kernel_sockaddr_storage *sockaddr; size_t sockaddr_len; pid_t ppid; kuid_t uid; kuid_t euid; kuid_t suid; kuid_t fsuid; kgid_t gid; kgid_t egid; kgid_t sgid; kgid_t fsgid; unsigned long personality; int arch; pid_t target_pid; kuid_t target_auid; kuid_t target_uid; unsigned int target_sessionid; u32 target_sid; char target_comm[16]; struct audit_tree_refs *trees; struct audit_tree_refs *first_trees; struct list_head killed_trees; int tree_count; int type; union { struct { int nargs; long args[6]; } socketcall; struct { kuid_t uid; kgid_t gid; umode_t mode; u32 osid; int has_perm; uid_t perm_uid; gid_t perm_gid; umode_t perm_mode; unsigned long qbytes; } ipc; struct { mqd_t mqdes; struct mq_attr mqstat; } mq_getsetattr; struct { mqd_t mqdes; int sigev_signo; } mq_notify; struct { mqd_t mqdes; size_t msg_len; unsigned int msg_prio; struct timespec64 abs_timeout; } mq_sendrecv; struct { int oflag; umode_t mode; struct mq_attr attr; } mq_open; struct { pid_t pid; struct audit_cap_data cap; } capset; struct { int fd; int flags; } mmap; struct open_how openat2; struct { int argc; } execve; struct { char *name; } module; struct { struct audit_ntp_data ntp_data; struct timespec64 tk_injoffset; } time; }; int fds[2]; struct audit_proctitle proctitle; }; struct __kernel_sockaddr_storage { union { struct { __kernel_sa_family_t ss_family; char __data[126]; }; void *__align; }; }; struct audit_ctl_mutex { struct mutex lock; void *owner; }; struct pernet_operations { struct list_head list; int (*init)(struct net *); void (*pre_exit)(struct net *); void (*exit)(struct net *); void (*exit_batch)(struct list_head *); unsigned int *id; size_t size; }; struct audit_features { __u32 vers; __u32 mask; __u32 features; __u32 lock; }; enum skb_drop_reason { SKB_NOT_DROPPED_YET = 0, SKB_CONSUMED = 1, SKB_DROP_REASON_NOT_SPECIFIED = 2, SKB_DROP_REASON_NO_SOCKET = 3, SKB_DROP_REASON_PKT_TOO_SMALL = 4, SKB_DROP_REASON_TCP_CSUM = 5, SKB_DROP_REASON_SOCKET_FILTER = 6, SKB_DROP_REASON_UDP_CSUM = 7, SKB_DROP_REASON_NETFILTER_DROP = 8, SKB_DROP_REASON_OTHERHOST = 9, SKB_DROP_REASON_IP_CSUM = 10, SKB_DROP_REASON_IP_INHDR = 11, SKB_DROP_REASON_IP_RPFILTER = 12, SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST = 13, SKB_DROP_REASON_XFRM_POLICY = 14, SKB_DROP_REASON_IP_NOPROTO = 15, SKB_DROP_REASON_SOCKET_RCVBUFF = 16, SKB_DROP_REASON_PROTO_MEM = 17, SKB_DROP_REASON_TCP_MD5NOTFOUND = 18, SKB_DROP_REASON_TCP_MD5UNEXPECTED = 19, SKB_DROP_REASON_TCP_MD5FAILURE = 20, SKB_DROP_REASON_SOCKET_BACKLOG = 21, SKB_DROP_REASON_TCP_FLAGS = 22, SKB_DROP_REASON_TCP_ZEROWINDOW = 23, SKB_DROP_REASON_TCP_OLD_DATA = 24, SKB_DROP_REASON_TCP_OVERWINDOW = 25, SKB_DROP_REASON_TCP_OFOMERGE = 26, SKB_DROP_REASON_TCP_RFC7323_PAWS = 27, SKB_DROP_REASON_TCP_OLD_SEQUENCE = 28, SKB_DROP_REASON_TCP_INVALID_SEQUENCE = 29, SKB_DROP_REASON_TCP_RESET = 30, SKB_DROP_REASON_TCP_INVALID_SYN = 31, SKB_DROP_REASON_TCP_CLOSE = 32, SKB_DROP_REASON_TCP_FASTOPEN = 33, SKB_DROP_REASON_TCP_OLD_ACK = 34, SKB_DROP_REASON_TCP_TOO_OLD_ACK = 35, SKB_DROP_REASON_TCP_ACK_UNSENT_DATA = 36, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE = 37, SKB_DROP_REASON_TCP_OFO_DROP = 38, SKB_DROP_REASON_IP_OUTNOROUTES = 39, SKB_DROP_REASON_BPF_CGROUP_EGRESS = 40, SKB_DROP_REASON_IPV6DISABLED = 41, SKB_DROP_REASON_NEIGH_CREATEFAIL = 42, SKB_DROP_REASON_NEIGH_FAILED = 43, SKB_DROP_REASON_NEIGH_QUEUEFULL = 44, SKB_DROP_REASON_NEIGH_DEAD = 45, SKB_DROP_REASON_TC_EGRESS = 46, SKB_DROP_REASON_QDISC_DROP = 47, SKB_DROP_REASON_CPU_BACKLOG = 48, SKB_DROP_REASON_XDP = 49, SKB_DROP_REASON_TC_INGRESS = 50, SKB_DROP_REASON_UNHANDLED_PROTO = 51, SKB_DROP_REASON_SKB_CSUM = 52, SKB_DROP_REASON_SKB_GSO_SEG = 53, SKB_DROP_REASON_SKB_UCOPY_FAULT = 54, SKB_DROP_REASON_DEV_HDR = 55, SKB_DROP_REASON_DEV_READY = 56, SKB_DROP_REASON_FULL_RING = 57, SKB_DROP_REASON_NOMEM = 58, SKB_DROP_REASON_HDR_TRUNC = 59, SKB_DROP_REASON_TAP_FILTER = 60, SKB_DROP_REASON_TAP_TXFILTER = 61, SKB_DROP_REASON_ICMP_CSUM = 62, SKB_DROP_REASON_INVALID_PROTO = 63, SKB_DROP_REASON_IP_INADDRERRORS = 64, SKB_DROP_REASON_IP_INNOROUTES = 65, SKB_DROP_REASON_PKT_TOO_BIG = 66, SKB_DROP_REASON_DUP_FRAG = 67, SKB_DROP_REASON_FRAG_REASM_TIMEOUT = 68, SKB_DROP_REASON_FRAG_TOO_FAR = 69, SKB_DROP_REASON_TCP_MINTTL = 70, SKB_DROP_REASON_IPV6_BAD_EXTHDR = 71, SKB_DROP_REASON_IPV6_NDISC_FRAG = 72, SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT = 73, SKB_DROP_REASON_IPV6_NDISC_BAD_CODE = 74, SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS = 75, SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST = 76, SKB_DROP_REASON_QUEUE_PURGE = 77, SKB_DROP_REASON_MAX = 78, SKB_DROP_REASON_SUBSYS_MASK = 4294901760, }; enum audit_nlgrps { AUDIT_NLGRP_NONE = 0, AUDIT_NLGRP_READLOG = 1, __AUDIT_NLGRP_MAX = 2, }; struct scm_creds { u32 pid; kuid_t uid; kgid_t gid; }; struct netlink_skb_parms { struct scm_creds creds; __u32 portid; __u32 dst_group; __u32 flags; struct sock *sk; bool nsid_is_set; int nsid; }; struct audit_reply { __u32 portid; struct net *net; struct sk_buff *skb; }; struct audit_net { struct sock *sk; }; struct audit_buffer { struct sk_buff *skb; struct audit_context *ctx; gfp_t gfp_mask; }; struct netlink_kernel_cfg { unsigned int groups; unsigned int flags; void (*input)(struct sk_buff *); struct mutex *cb_mutex; int (*bind)(struct net *, int); void (*unbind)(struct net *, int); void (*release)(struct sock *, unsigned long *); }; struct audit_sig_info { uid_t uid; pid_t pid; char ctx[0]; }; struct audit_tty_status { __u32 enabled; __u32 log_passwd; }; struct audit_status { __u32 mask; __u32 enabled; __u32 failure; __u32 pid; __u32 rate_limit; __u32 backlog_limit; __u32 lost; __u32 backlog; union { __u32 version; __u32 feature_bitmap; }; __u32 backlog_wait_time; __u32 backlog_wait_time_actual; }; struct audit_netlink_list { __u32 portid; struct net *net; struct sk_buff_head q; }; enum { Audit_equal = 0, Audit_not_equal = 1, Audit_bitmask = 2, Audit_bittest = 3, Audit_lt = 4, Audit_gt = 5, Audit_le = 6, Audit_ge = 7, Audit_bad = 8, }; struct audit_field; struct audit_watch; struct audit_tree; struct audit_fsnotify_mark; struct audit_krule { u32 pflags; u32 flags; u32 listnr; u32 action; u32 mask[64]; u32 buflen; u32 field_count; char *filterkey; struct audit_field *fields; struct audit_field *arch_f; struct audit_field *inode_f; struct audit_watch *watch; struct audit_tree *tree; struct audit_fsnotify_mark *exe; struct list_head rlist; struct list_head list; u64 prio; }; struct audit_entry { struct list_head list; struct callback_head rcu; struct audit_krule rule; }; struct audit_field { u32 type; union { u32 val; kuid_t uid; kgid_t gid; struct { char *lsm_str; void *lsm_rule; }; }; u32 op; }; struct audit_rule_data { __u32 flags; __u32 action; __u32 field_count; __u32 mask[64]; __u32 fields[64]; __u32 values[64]; __u32 fieldflags[64]; __u32 buflen; char buf[0]; }; enum audit_nfcfgop { AUDIT_XT_OP_REGISTER = 0, AUDIT_XT_OP_REPLACE = 1, AUDIT_XT_OP_UNREGISTER = 2, AUDIT_NFT_OP_TABLE_REGISTER = 3, AUDIT_NFT_OP_TABLE_UNREGISTER = 4, AUDIT_NFT_OP_CHAIN_REGISTER = 5, AUDIT_NFT_OP_CHAIN_UNREGISTER = 6, AUDIT_NFT_OP_RULE_REGISTER = 7, AUDIT_NFT_OP_RULE_UNREGISTER = 8, AUDIT_NFT_OP_SET_REGISTER = 9, AUDIT_NFT_OP_SET_UNREGISTER = 10, AUDIT_NFT_OP_SETELEM_REGISTER = 11, AUDIT_NFT_OP_SETELEM_UNREGISTER = 12, AUDIT_NFT_OP_GEN_REGISTER = 13, AUDIT_NFT_OP_OBJ_REGISTER = 14, AUDIT_NFT_OP_OBJ_UNREGISTER = 15, AUDIT_NFT_OP_OBJ_RESET = 16, AUDIT_NFT_OP_FLOWTABLE_REGISTER = 17, AUDIT_NFT_OP_FLOWTABLE_UNREGISTER = 18, AUDIT_NFT_OP_SETELEM_RESET = 19, AUDIT_NFT_OP_RULE_RESET = 20, AUDIT_NFT_OP_INVALID = 21, }; struct audit_nfcfgop_tab { enum audit_nfcfgop op; const char *s; }; struct audit_aux_data { struct audit_aux_data *next; int type; }; struct audit_chunk; struct audit_tree_refs { struct audit_tree_refs *next; struct audit_chunk *c[31]; }; enum auditsc_class_t { AUDITSC_NATIVE = 0, AUDITSC_COMPAT = 1, AUDITSC_OPEN = 2, AUDITSC_OPENAT = 3, AUDITSC_SOCKETCALL = 4, AUDITSC_EXECVE = 5, AUDITSC_OPENAT2 = 6, AUDITSC_NVALS = 7, }; struct cpu_vfs_cap_data { __u32 magic_etc; kuid_t rootid; kernel_cap_t permitted; kernel_cap_t inheritable; }; typedef int __kernel_key_t; typedef __kernel_key_t key_t; struct kern_ipc_perm { spinlock_t lock; bool deleted; int id; key_t key; kuid_t uid; kgid_t gid; kuid_t cuid; kgid_t cgid; umode_t mode; unsigned long seq; void *security; struct rhash_head khtnode; struct callback_head rcu; refcount_t refcount; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct audit_aux_data_bprm_fcaps { struct audit_aux_data d; struct audit_cap_data fcap; unsigned int fcap_ver; struct audit_cap_data old_pcap; struct audit_cap_data new_pcap; }; struct audit_aux_data_pids { struct audit_aux_data d; pid_t target_pid[16]; kuid_t target_auid[16]; kuid_t target_uid[16]; unsigned int target_sessionid[16]; u32 target_sid[16]; char target_comm[256]; int pid_count; }; struct fanotify_response_info_header { __u8 type; __u8 pad; __u16 len; }; struct fanotify_response_info_audit_rule { struct fanotify_response_info_header hdr; __u32 rule_number; __u32 subj_trust; __u32 obj_trust; }; struct inotify_group_private_data { spinlock_t idr_lock; struct idr idr; struct ucounts *ucounts; }; struct fsnotify_ops; struct fsnotify_event; struct fsnotify_group { const struct fsnotify_ops *ops; refcount_t refcnt; spinlock_t notification_lock; struct list_head notification_list; wait_queue_head_t notification_waitq; unsigned int q_len; unsigned int max_events; unsigned int priority; bool shutdown; int flags; unsigned int owner_flags; struct mutex mark_mutex; atomic_t user_waits; struct list_head marks_list; struct fasync_struct *fsn_fa; struct fsnotify_event *overflow_event; struct mem_cgroup *memcg; union { void *private; struct inotify_group_private_data inotify_data; }; }; struct fsnotify_iter_info; struct fsnotify_mark; struct fsnotify_ops { int (*handle_event)(struct fsnotify_group *, u32, const void *, int, struct inode *, const struct qstr *, u32, struct fsnotify_iter_info *); int (*handle_inode_event)(struct fsnotify_mark *, u32, struct inode *, struct inode *, const struct qstr *, u32); void (*free_group_priv)(struct fsnotify_group *); void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *); void (*free_event)(struct fsnotify_group *, struct fsnotify_event *); void (*free_mark)(struct fsnotify_mark *); }; struct fsnotify_iter_info { struct fsnotify_mark *marks[5]; struct fsnotify_group *current_group; unsigned int report_mask; int srcu_idx; }; struct fsnotify_mark { __u32 mask; refcount_t refcnt; struct fsnotify_group *group; struct list_head g_list; spinlock_t lock; struct hlist_node obj_list; struct fsnotify_mark_connector *connector; __u32 ignore_mask; unsigned int flags; }; struct fsnotify_event { struct list_head list; }; enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_ANY = -1, FSNOTIFY_OBJ_TYPE_INODE = 0, FSNOTIFY_OBJ_TYPE_VFSMOUNT = 1, FSNOTIFY_OBJ_TYPE_SB = 2, FSNOTIFY_OBJ_TYPE_COUNT = 3, FSNOTIFY_OBJ_TYPE_DETACHED = 3, }; struct audit_parent { struct list_head watches; struct fsnotify_mark mark; }; struct audit_watch { refcount_t count; dev_t dev; char *path; unsigned long ino; struct audit_parent *parent; struct list_head wlist; struct list_head rules; }; struct audit_fsnotify_mark { dev_t dev; unsigned long ino; char *path; struct fsnotify_mark mark; struct audit_krule *rule; }; enum { HASH_SIZE = 128, }; struct audit_node { struct list_head list; struct audit_tree *owner; unsigned int index; }; struct audit_chunk { struct list_head hash; unsigned long key; struct fsnotify_mark *mark; struct list_head trees; int count; atomic_long_t refs; struct callback_head head; struct audit_node owners[0]; }; struct audit_tree { refcount_t count; int goner; struct audit_chunk *root; struct list_head chunks; struct list_head rules; struct list_head list; struct list_head same_root; struct callback_head head; char pathname[0]; }; struct audit_tree_mark { struct fsnotify_mark mark; struct audit_chunk *chunk; }; enum kprobe_slot_state { SLOT_CLEAN = 0, SLOT_DIRTY = 1, SLOT_USED = 2, }; enum perf_record_ksymbol_type { PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, PERF_RECORD_KSYMBOL_TYPE_BPF = 1, PERF_RECORD_KSYMBOL_TYPE_OOL = 2, PERF_RECORD_KSYMBOL_TYPE_MAX = 3, }; struct kprobe_insn_page { struct list_head list; kprobe_opcode_t *insns; struct kprobe_insn_cache *cache; int nused; int ngarbage; char slot_used[0]; }; struct kprobe_blacklist_entry { struct list_head list; unsigned long start_addr; unsigned long end_addr; }; enum stats_per_group { STATS_SYSTEM = 0, STATS_SOFTIRQ = 1, STATS_HARDIRQ = 2, STATS_IDLE = 3, NUM_STATS_PER_GROUP = 4, }; struct action_cache { unsigned long allow_native[8]; unsigned long allow_compat[8]; }; struct notification; struct seccomp_filter { refcount_t refs; refcount_t users; bool log; bool wait_killable_recv; struct action_cache cache; struct seccomp_filter *prev; struct bpf_prog *prog; struct notification *notif; struct mutex notify_lock; wait_queue_head_t wqh; }; struct notification { atomic_t requests; u32 flags; u64 next_id; struct list_head notifications; }; struct seccomp_log_name { u32 log; const char *name; }; enum notify_state { SECCOMP_NOTIFY_INIT = 0, SECCOMP_NOTIFY_SENT = 1, SECCOMP_NOTIFY_REPLIED = 2, }; struct seccomp_kaddfd { struct file *file; int fd; unsigned int flags; __u32 ioctl_flags; union { bool setfd; int ret; }; struct completion completion; struct list_head list; }; struct seccomp_knotif { struct task_struct *task; u64 id; const struct seccomp_data *data; enum notify_state state; int error; long val; u32 flags; struct completion ready; struct list_head list; struct list_head addfd; }; typedef unsigned int (*bpf_dispatcher_fn)(const void *, const struct bpf_insn *, unsigned int (*)(const void *, const struct bpf_insn *)); typedef unsigned int (*bpf_func_t)(const void *, const struct bpf_insn *); struct seccomp_notif_sizes { __u16 seccomp_notif; __u16 seccomp_notif_resp; __u16 seccomp_data; }; struct sock_fprog { unsigned short len; struct sock_filter __attribute__((btf_type_tag("user"))) *filter; }; struct compat_sock_fprog { u16 len; compat_uptr_t filter; }; typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int); struct seccomp_notif { __u64 id; __u32 pid; __u32 flags; struct seccomp_data data; }; struct seccomp_notif_resp { __u64 id; __s64 val; __s32 error; __u32 flags; }; struct seccomp_notif_addfd { __u64 id; __u32 flags; __u32 srcfd; __u32 newfd; __u32 newfd_flags; }; struct listener_list { struct rw_semaphore sem; struct list_head list; }; struct genl_split_ops; struct genl_info; struct genl_ops; struct genl_small_ops; struct genl_multicast_group; struct genl_family { unsigned int hdrsize; char name[16]; unsigned int version; unsigned int maxattr; u8 netnsok: 1; u8 parallel_ops: 1; u8 n_ops; u8 n_small_ops; u8 n_split_ops; u8 n_mcgrps; u8 resv_start_op; const struct nla_policy *policy; int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); const struct genl_ops *ops; const struct genl_small_ops *small_ops; const struct genl_split_ops *split_ops; const struct genl_multicast_group *mcgrps; struct module *module; int id; unsigned int mcgrp_offset; u64 android_kabi_reserved1; }; struct genl_split_ops { union { struct { int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); int (*doit)(struct sk_buff *, struct genl_info *); void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); }; struct { int (*start)(struct netlink_callback *); int (*dumpit)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); }; }; const struct nla_policy *policy; unsigned int maxattr; u8 cmd; u8 internal_flags; u8 flags; u8 validate; }; struct genlmsghdr; struct genl_info { u32 snd_seq; u32 snd_portid; const struct genl_family *family; const struct nlmsghdr *nlhdr; struct genlmsghdr *genlhdr; struct nlattr **attrs; possible_net_t _net; void *user_ptr[2]; struct netlink_ext_ack *extack; }; struct genlmsghdr { __u8 cmd; __u8 version; __u16 reserved; }; struct genl_ops { int (*doit)(struct sk_buff *, struct genl_info *); int (*start)(struct netlink_callback *); int (*dumpit)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); const struct nla_policy *policy; unsigned int maxattr; u8 cmd; u8 internal_flags; u8 flags; u8 validate; u64 android_kabi_reserved1; }; struct genl_small_ops { int (*doit)(struct sk_buff *, struct genl_info *); int (*dumpit)(struct sk_buff *, struct netlink_callback *); u8 cmd; u8 internal_flags; u8 flags; u8 validate; }; struct genl_multicast_group { char name[16]; u8 flags; u8 cap_sys_admin: 1; }; enum { TASKSTATS_CMD_UNSPEC = 0, TASKSTATS_CMD_GET = 1, TASKSTATS_CMD_NEW = 2, __TASKSTATS_CMD_MAX = 3, }; enum { TASKSTATS_TYPE_UNSPEC = 0, TASKSTATS_TYPE_PID = 1, TASKSTATS_TYPE_TGID = 2, TASKSTATS_TYPE_STATS = 3, TASKSTATS_TYPE_AGGR_PID = 4, TASKSTATS_TYPE_AGGR_TGID = 5, TASKSTATS_TYPE_NULL = 6, __TASKSTATS_TYPE_MAX = 7, }; enum { TASKSTATS_CMD_ATTR_UNSPEC = 0, TASKSTATS_CMD_ATTR_PID = 1, TASKSTATS_CMD_ATTR_TGID = 2, TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3, TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4, __TASKSTATS_CMD_ATTR_MAX = 5, }; enum actions { REGISTER = 0, DEREGISTER = 1, CPU_DONT_CARE = 2, }; enum { CGROUPSTATS_CMD_ATTR_UNSPEC = 0, CGROUPSTATS_CMD_ATTR_FD = 1, __CGROUPSTATS_CMD_ATTR_MAX = 2, }; enum { CGROUPSTATS_CMD_UNSPEC = 3, CGROUPSTATS_CMD_GET = 4, CGROUPSTATS_CMD_NEW = 5, __CGROUPSTATS_CMD_MAX = 6, }; enum { CGROUPSTATS_TYPE_UNSPEC = 0, CGROUPSTATS_TYPE_CGROUP_STATS = 1, __CGROUPSTATS_TYPE_MAX = 2, }; struct listener { struct list_head list; pid_t pid; char valid; }; struct tp_transition_snapshot { unsigned long rcu; unsigned long srcu; bool ongoing; }; enum tp_func_state { TP_FUNC_0 = 0, TP_FUNC_1 = 1, TP_FUNC_2 = 2, TP_FUNC_N = 3, }; enum tp_transition_sync { TP_TRANSITION_SYNC_1_0_1 = 0, TP_TRANSITION_SYNC_N_2_1 = 1, _NR_TP_TRANSITION_SYNC = 2, }; struct tp_module { struct list_head list; struct module *mod; }; struct tp_probes { struct callback_head rcu; struct tracepoint_func probes[0]; }; enum { RB_LEN_TIME_EXTEND = 8, RB_LEN_TIME_STAMP = 8, }; enum { RB_CTX_TRANSITION = 0, RB_CTX_NMI = 1, RB_CTX_IRQ = 2, RB_CTX_SOFTIRQ = 3, RB_CTX_NORMAL = 4, RB_CTX_MAX = 5, }; enum { RB_ADD_STAMP_NONE = 0, RB_ADD_STAMP_EXTEND = 2, RB_ADD_STAMP_ABSOLUTE = 4, RB_ADD_STAMP_FORCE = 8, }; struct buffer_page { struct list_head list; local_t write; unsigned int read; local_t entries; unsigned long real_end; struct buffer_data_page *page; u32 id; }; struct ring_buffer_per_cpu; struct ring_buffer_iter { struct ring_buffer_per_cpu *cpu_buffer; unsigned long head; unsigned long next_event; struct buffer_page *head_page; struct buffer_page *cache_reader_page; unsigned long cache_read; unsigned long cache_pages_removed; u64 read_stamp; u64 page_stamp; struct ring_buffer_event *event; int missed_events; }; struct rb_time_struct { local64_t time; }; typedef struct rb_time_struct rb_time_t; struct rb_irq_work { struct irq_work work; wait_queue_head_t waiters; wait_queue_head_t full_waiters; bool waiters_pending; bool full_waiters_pending; bool wakeup_full; }; struct ring_buffer_per_cpu { int cpu; atomic_t record_disabled; atomic_t resize_disabled; struct trace_buffer *buffer; raw_spinlock_t reader_lock; arch_spinlock_t lock; struct lock_class_key lock_key; struct buffer_data_page *free_page; unsigned long nr_pages; unsigned int current_context; struct list_head *pages; struct buffer_page *head_page; struct buffer_page *tail_page; struct buffer_page *commit_page; struct buffer_page *reader_page; unsigned long lost_events; unsigned long last_overrun; unsigned long nest; local_t entries_bytes; local_t entries; local_t overrun; local_t commit_overrun; local_t dropped_events; local_t committing; local_t commits; local_t pages_touched; local_t pages_lost; local_t pages_read; long last_pages_touch; size_t shortest_full; unsigned long read; unsigned long read_bytes; rb_time_t write_stamp; rb_time_t before_stamp; u64 event_stamp[5]; u64 read_stamp; unsigned long pages_removed; int mapped; struct mutex mapping_lock; unsigned long *page_ids; struct ring_buffer_meta *meta_page; struct ring_buffer_writer *writer; long nr_pages_to_update; struct list_head new_pages; struct work_struct update_pages_work; struct completion update_done; struct rb_irq_work irq_work; }; struct trace_buffer { unsigned int flags; int cpus; atomic_t record_disabled; atomic_t resizing; cpumask_var_t cpumask; struct lock_class_key *reader_lock_key; struct mutex mutex; struct ring_buffer_per_cpu **buffers; struct ring_buffer_writer *writer; struct hlist_node node; u64 (*clock)(); struct rb_irq_work irq_work; bool time_stamp_abs; }; struct rb_event_info { u64 ts; u64 delta; u64 before; u64 after; unsigned long length; struct buffer_page *tail_page; int add_timestamp; }; typedef bool (*ring_buffer_cond_fn)(void *); struct trace_array_cpu; struct array_buffer { struct trace_array *tr; struct trace_buffer *buffer; struct trace_array_cpu __attribute__((btf_type_tag("percpu"))) *data; u64 time_start; int cpu; }; struct trace_pid_list; struct trace_options; struct trace_func_repeats; struct trace_array { struct list_head list; char *name; struct array_buffer array_buffer; struct trace_pid_list __attribute__((btf_type_tag("rcu"))) *filtered_pids; struct trace_pid_list __attribute__((btf_type_tag("rcu"))) *filtered_no_pids; arch_spinlock_t max_lock; int buffer_disabled; int stop_count; int clock_id; int nr_topts; bool clear_trace; int buffer_percent; unsigned int n_err_log_entries; struct tracer *current_trace; unsigned int trace_flags; unsigned char trace_flags_index[32]; unsigned int flags; raw_spinlock_t start_lock; struct list_head err_log; struct dentry *dir; struct dentry *options; struct dentry *percpu_dir; struct eventfs_inode *event_dir; struct trace_options *topts; struct list_head systems; struct list_head events; struct trace_event_file *trace_marker_file; cpumask_var_t tracing_cpumask; cpumask_var_t pipe_cpumask; int ref; int trace_ref; int no_filter_buffering_ref; struct list_head hist_vars; struct trace_func_repeats __attribute__((btf_type_tag("percpu"))) *last_func_repeats; }; struct trace_array_cpu { atomic_t disabled; void *buffer_page; unsigned long entries; unsigned long saved_latency; unsigned long critical_start; unsigned long critical_end; unsigned long critical_sequence; unsigned long nice; unsigned long policy; unsigned long rt_priority; unsigned long skipped_entries; u64 preempt_timestamp; pid_t pid; kuid_t uid; char comm[16]; bool ignore_pid; }; union upper_chunk; union lower_chunk; struct trace_pid_list { raw_spinlock_t lock; struct irq_work refill_irqwork; union upper_chunk *upper[256]; union upper_chunk *upper_list; union lower_chunk *lower_list; int free_upper_chunks; int free_lower_chunks; }; union upper_chunk { union upper_chunk *next; union lower_chunk *data[256]; }; union lower_chunk { union lower_chunk *next; unsigned long data[256]; }; struct tracer_flags; struct tracer { const char *name; int (*init)(struct trace_array *); void (*reset)(struct trace_array *); void (*start)(struct trace_array *); void (*stop)(struct trace_array *); int (*update_thresh)(struct trace_array *); void (*open)(struct trace_iterator *); void (*pipe_open)(struct trace_iterator *); void (*close)(struct trace_iterator *); void (*pipe_close)(struct trace_iterator *); ssize_t (*read)(struct trace_iterator *, struct file *, char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); void (*print_header)(struct seq_file *); enum print_line_t (*print_line)(struct trace_iterator *); int (*set_flag)(struct trace_array *, u32, u32, int); int (*flag_changed)(struct trace_array *, u32, int); struct tracer *next; struct tracer_flags *flags; int enabled; bool print_max; bool allow_instances; bool noboot; }; struct tracer_opt; struct tracer_flags { u32 val; struct tracer_opt *opts; struct tracer *trace; }; struct tracer_opt { const char *name; u32 bit; }; struct trace_option_dentry; struct trace_options { struct tracer *tracer; struct trace_option_dentry *topts; }; struct trace_option_dentry { struct tracer_opt *opt; struct tracer_flags *flags; struct trace_array *tr; struct dentry *entry; }; struct filter_pred; struct prog_entry { int target; int when_to_branch; struct filter_pred *pred; }; struct event_subsystem; struct trace_subsystem_dir { struct list_head list; struct event_subsystem *subsystem; struct trace_array *tr; struct eventfs_inode *ei; int ref_count; int nr_events; }; struct event_subsystem { struct list_head list; const char *name; struct event_filter *filter; int ref_count; }; struct trace_func_repeats { unsigned long ip; unsigned long parent_ip; unsigned long count; u64 ts_last_call; }; struct trace_export { struct trace_export __attribute__((btf_type_tag("rcu"))) *next; void (*write)(struct trace_export *, const void *, unsigned int); int flags; }; struct saved_cmdlines_buffer { unsigned int map_pid_to_cmdline[32769]; unsigned int *map_cmdline_to_pid; unsigned int cmdline_num; int cmdline_idx; char saved_cmdlines[0]; }; struct ftrace_stack { unsigned long calls[1024]; }; struct ftrace_stacks { struct ftrace_stack stacks[4]; }; struct trace_buffer_struct { int nesting; char buffer[4096]; }; enum trace_iterator_flags { TRACE_ITER_PRINT_PARENT = 1, TRACE_ITER_SYM_OFFSET = 2, TRACE_ITER_SYM_ADDR = 4, TRACE_ITER_VERBOSE = 8, TRACE_ITER_RAW = 16, TRACE_ITER_HEX = 32, TRACE_ITER_BIN = 64, TRACE_ITER_BLOCK = 128, TRACE_ITER_FIELDS = 256, TRACE_ITER_PRINTK = 512, TRACE_ITER_ANNOTATE = 1024, TRACE_ITER_USERSTACKTRACE = 2048, TRACE_ITER_SYM_USEROBJ = 4096, TRACE_ITER_PRINTK_MSGONLY = 8192, TRACE_ITER_CONTEXT_INFO = 16384, TRACE_ITER_LATENCY_FMT = 32768, TRACE_ITER_RECORD_CMD = 65536, TRACE_ITER_RECORD_TGID = 131072, TRACE_ITER_OVERWRITE = 262144, TRACE_ITER_STOP_ON_FREE = 524288, TRACE_ITER_IRQ_INFO = 1048576, TRACE_ITER_MARKERS = 2097152, TRACE_ITER_EVENT_FORK = 4194304, TRACE_ITER_PAUSE_ON_TRACE = 8388608, TRACE_ITER_HASH_PTR = 16777216, TRACE_ITER_STACKTRACE = 33554432, }; enum trace_type { __TRACE_FIRST_TYPE = 0, TRACE_FN = 1, TRACE_CTX = 2, TRACE_WAKE = 3, TRACE_STACK = 4, TRACE_PRINT = 5, TRACE_BPRINT = 6, TRACE_MMIO_RW = 7, TRACE_MMIO_MAP = 8, TRACE_BRANCH = 9, TRACE_GRAPH_RET = 10, TRACE_GRAPH_ENT = 11, TRACE_USER_STACK = 12, TRACE_BLK = 13, TRACE_BPUTS = 14, TRACE_HWLAT = 15, TRACE_OSNOISE = 16, TRACE_TIMERLAT = 17, TRACE_RAW_DATA = 18, TRACE_FUNC_REPEATS = 19, __TRACE_LAST_TYPE = 20, }; enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 1, TRACE_FLAG_IRQS_NOSUPPORT = 2, TRACE_FLAG_NEED_RESCHED = 4, TRACE_FLAG_HARDIRQ = 8, TRACE_FLAG_SOFTIRQ = 16, TRACE_FLAG_PREEMPT_RESCHED = 32, TRACE_FLAG_NMI = 64, TRACE_FLAG_BH_OFF = 128, }; enum event_trigger_type { ETT_NONE = 0, ETT_TRACE_ONOFF = 1, ETT_SNAPSHOT = 2, ETT_STACKTRACE = 4, ETT_EVENT_ENABLE = 8, ETT_EVENT_HIST = 16, ETT_HIST_ENABLE = 32, ETT_EVENT_EPROBE = 64, }; enum trace_iter_flags { TRACE_FILE_LAT_FMT = 1, TRACE_FILE_ANNOTATE = 2, TRACE_FILE_TIME_IN_NS = 4, }; enum { EVENT_FILE_FL_ENABLED_BIT = 0, EVENT_FILE_FL_RECORDED_CMD_BIT = 1, EVENT_FILE_FL_RECORDED_TGID_BIT = 2, EVENT_FILE_FL_FILTERED_BIT = 3, EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, EVENT_FILE_FL_SOFT_MODE_BIT = 5, EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, EVENT_FILE_FL_TRIGGER_COND_BIT = 8, EVENT_FILE_FL_PID_FILTER_BIT = 9, EVENT_FILE_FL_WAS_ENABLED_BIT = 10, EVENT_FILE_FL_FREED_BIT = 11, }; enum { TRACE_ARRAY_FL_GLOBAL = 1, }; struct err_info { const char **errs; u8 type; u16 pos; u64 ts; }; struct tracing_log_err { struct list_head list; struct err_info info; char loc[128]; char *cmd; }; struct buffer_ref { struct trace_buffer *buffer; void *page; int cpu; refcount_t refcount; }; struct trace_parser { bool cont; char *buffer; unsigned int idx; unsigned int size; }; struct userstack_entry { struct trace_entry ent; unsigned int tgid; unsigned long caller[8]; }; struct func_repeats_entry { struct trace_entry ent; unsigned long ip; unsigned long parent_ip; u16 count; u16 top_delta_ts; u32 bottom_delta_ts; }; typedef struct vfsmount * (*debugfs_automount_t)(struct dentry *, void *); struct partial_page; struct splice_pipe_desc { struct page **pages; struct partial_page *partial; int nr_pages; unsigned int nr_pages_max; const struct pipe_buf_operations *ops; void (*spd_release)(struct splice_pipe_desc *, unsigned int); }; struct partial_page { unsigned int offset; unsigned int len; unsigned long private; }; struct print_entry { struct trace_entry ent; unsigned long ip; char buf[0]; }; struct bputs_entry { struct trace_entry ent; unsigned long ip; const char *str; }; typedef bool (*cond_update_fn_t)(struct trace_array *, void *); struct ftrace_entry { struct trace_entry ent; unsigned long ip; unsigned long parent_ip; }; struct stack_entry { struct trace_entry ent; int size; unsigned long caller[0]; }; struct bprint_entry { struct trace_entry ent; unsigned long ip; const char *fmt; u32 buf[0]; }; struct trace_min_max_param { struct mutex *lock; u64 *val; u64 *min; u64 *max; }; struct raw_data_entry { struct trace_entry ent; unsigned int id; char buf[0]; }; struct ftrace_buffer_info { struct trace_iterator iter; void *spare; unsigned int spare_cpu; unsigned int read; }; struct trace_mark { unsigned long long val; char sym; }; enum { FILTER_OTHER = 0, FILTER_STATIC_STRING = 1, FILTER_DYN_STRING = 2, FILTER_RDYN_STRING = 3, FILTER_PTR_STRING = 4, FILTER_TRACE_FN = 5, FILTER_CPUMASK = 6, FILTER_COMM = 7, FILTER_CPU = 8, FILTER_STACKTRACE = 9, }; struct ftrace_event_field { struct list_head link; const char *name; const char *type; int filter_type; int offset; int size; int is_signed; int len; }; struct ctx_switch_entry { struct trace_entry ent; unsigned int prev_pid; unsigned int next_pid; unsigned int next_cpu; unsigned char prev_prio; unsigned char prev_state; unsigned char next_prio; unsigned char next_state; }; struct hwlat_entry { struct trace_entry ent; u64 duration; u64 outer_duration; u64 nmi_total_ts; struct timespec64 timestamp; unsigned int nmi_count; unsigned int seqnum; unsigned int count; }; struct osnoise_entry { struct trace_entry ent; u64 noise; u64 runtime; u64 max_sample; unsigned int hw_count; unsigned int nmi_count; unsigned int irq_count; unsigned int softirq_count; unsigned int thread_count; }; struct timerlat_entry { struct trace_entry ent; unsigned int seqnum; int context; u64 timer_latency; }; struct tracer_stat; struct stat_session { struct list_head session_list; struct tracer_stat *ts; struct rb_root stat_root; struct mutex stat_mutex; struct dentry *file; }; struct tracer_stat { const char *name; void * (*stat_start)(struct tracer_stat *); void * (*stat_next)(void *, int); cmp_func_t stat_cmp; int (*stat_show)(struct seq_file *, void *); void (*stat_release)(void *); int (*stat_headers)(struct seq_file *); }; struct stat_node { struct rb_node node; void *stat; }; struct trace_bprintk_fmt { struct list_head list; const char *fmt; }; struct tracing_map; struct tracing_map_field; struct tracing_map_elt { struct tracing_map *map; struct tracing_map_field *fields; atomic64_t *vars; bool *var_set; void *key; void *private_data; }; typedef int (*tracing_map_cmp_fn_t)(void *, void *); struct tracing_map_field { tracing_map_cmp_fn_t cmp_fn; union { atomic64_t sum; unsigned int offset; }; }; struct tracing_map_sort_key { unsigned int field_idx; bool descending; }; struct tracing_map_array; struct tracing_map_ops; struct tracing_map { unsigned int key_size; unsigned int map_bits; unsigned int map_size; unsigned int max_elts; atomic_t next_elt; struct tracing_map_array *elts; struct tracing_map_array *map; const struct tracing_map_ops *ops; void *private_data; struct tracing_map_field fields[6]; unsigned int n_fields; int key_idx[3]; unsigned int n_keys; struct tracing_map_sort_key sort_key; unsigned int n_vars; atomic64_t hits; atomic64_t drops; }; struct tracing_map_array { unsigned int entries_per_page; unsigned int entry_size_shift; unsigned int entry_shift; unsigned int entry_mask; unsigned int n_pages; void **pages; }; struct tracing_map_ops { int (*elt_alloc)(struct tracing_map_elt *); void (*elt_free)(struct tracing_map_elt *); void (*elt_clear)(struct tracing_map_elt *); void (*elt_init)(struct tracing_map_elt *); }; struct tracing_map_entry { u32 key; struct tracing_map_elt *val; }; struct tracing_map_sort_entry { void *key; struct tracing_map_elt *elt; bool elt_copied; bool dup; }; enum { TRACE_NOP_OPT_ACCEPT = 1, TRACE_NOP_OPT_REFUSE = 2, }; enum req_flag_bits { __REQ_FAILFAST_DEV = 8, __REQ_FAILFAST_TRANSPORT = 9, __REQ_FAILFAST_DRIVER = 10, __REQ_SYNC = 11, __REQ_META = 12, __REQ_PRIO = 13, __REQ_NOMERGE = 14, __REQ_IDLE = 15, __REQ_INTEGRITY = 16, __REQ_FUA = 17, __REQ_PREFLUSH = 18, __REQ_RAHEAD = 19, __REQ_BACKGROUND = 20, __REQ_NOWAIT = 21, __REQ_POLLED = 22, __REQ_ALLOC_CACHE = 23, __REQ_SWAP = 24, __REQ_DRV = 25, __REQ_FS_PRIVATE = 26, __REQ_NOUNMAP = 27, __REQ_NR_BITS = 28, }; enum req_op { REQ_OP_READ = 0, REQ_OP_WRITE = 1, REQ_OP_FLUSH = 2, REQ_OP_DISCARD = 3, REQ_OP_SECURE_ERASE = 5, REQ_OP_WRITE_ZEROES = 9, REQ_OP_ZONE_OPEN = 10, REQ_OP_ZONE_CLOSE = 11, REQ_OP_ZONE_FINISH = 12, REQ_OP_ZONE_APPEND = 13, REQ_OP_ZONE_RESET = 15, REQ_OP_ZONE_RESET_ALL = 17, REQ_OP_DRV_IN = 34, REQ_OP_DRV_OUT = 35, REQ_OP_LAST = 36, }; enum blk_crypto_mode_num { BLK_ENCRYPTION_MODE_INVALID = 0, BLK_ENCRYPTION_MODE_AES_256_XTS = 1, BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV = 2, BLK_ENCRYPTION_MODE_ADIANTUM = 3, BLK_ENCRYPTION_MODE_SM4_XTS = 4, BLK_ENCRYPTION_MODE_MAX = 5, }; enum blk_crypto_key_type { BLK_CRYPTO_KEY_TYPE_STANDARD = 1, BLK_CRYPTO_KEY_TYPE_HW_WRAPPED = 2, }; enum mq_rq_state { MQ_RQ_IDLE = 0, MQ_RQ_IN_FLIGHT = 1, MQ_RQ_COMPLETE = 2, }; enum rq_end_io_ret { RQ_END_IO_NONE = 0, RQ_END_IO_FREE = 1, }; struct blk_crypto_key; struct bio_crypt_ctx { const struct blk_crypto_key *bc_key; u64 bc_dun[4]; }; struct blk_crypto_config { enum blk_crypto_mode_num crypto_mode; unsigned int data_unit_size; unsigned int dun_bytes; enum blk_crypto_key_type key_type; }; struct blk_crypto_key { struct blk_crypto_config crypto_cfg; unsigned int data_unit_size_bits; unsigned int size; u8 raw[128]; }; typedef enum rq_end_io_ret rq_end_io_fn(struct request *, blk_status_t); typedef __u32 req_flags_t; struct blk_crypto_keyslot; struct request { struct request_queue *q; struct blk_mq_ctx *mq_ctx; struct blk_mq_hw_ctx *mq_hctx; blk_opf_t cmd_flags; req_flags_t rq_flags; int tag; int internal_tag; unsigned int timeout; unsigned int __data_len; sector_t __sector; struct bio *bio; struct bio *biotail; union { struct list_head queuelist; struct request *rq_next; }; struct block_device *part; u64 alloc_time_ns; u64 start_time_ns; u64 io_start_time_ns; unsigned short wbt_flags; unsigned short stats_sectors; unsigned short nr_phys_segments; struct bio_crypt_ctx *crypt_ctx; struct blk_crypto_keyslot *crypt_keyslot; enum rw_hint write_hint; unsigned short ioprio; enum mq_rq_state state; atomic_t ref; unsigned long deadline; union { struct hlist_node hash; struct llist_node ipi_list; }; union { struct rb_node rb_node; struct bio_vec special_vec; }; struct { struct io_cq *icq; void *priv[2]; } elv; struct { unsigned int seq; rq_end_io_fn *saved_end_io; } flush; u64 fifo_time; rq_end_io_fn *end_io; void *end_io_data; u64 android_oem_data1; u64 android_kabi_reserved1; }; struct sbitmap_word; struct sbitmap { unsigned int depth; unsigned int shift; unsigned int map_nr; bool round_robin; struct sbitmap_word *map; unsigned int __attribute__((btf_type_tag("percpu"))) *alloc_hint; }; struct blk_mq_hw_ctx { struct { spinlock_t lock; struct list_head dispatch; unsigned long state; long: 64; long: 64; long: 64; long: 64; }; struct delayed_work run_work; cpumask_var_t cpumask; int next_cpu; int next_cpu_batch; unsigned long flags; void *sched_data; struct request_queue *queue; struct blk_flush_queue *fq; void *driver_data; struct sbitmap ctx_map; struct blk_mq_ctx *dispatch_from; unsigned int dispatch_busy; unsigned short type; unsigned short nr_ctx; struct blk_mq_ctx **ctxs; spinlock_t dispatch_wait_lock; wait_queue_entry_t dispatch_wait; atomic_t wait_index; struct blk_mq_tags *tags; struct blk_mq_tags *sched_tags; unsigned long run; unsigned int numa_node; unsigned int queue_num; atomic_t nr_active; struct hlist_node cpuhp_online; struct hlist_node cpuhp_dead; struct kobject kobj; struct dentry *debugfs_dir; struct dentry *sched_debugfs_dir; struct list_head hctx_list; u64 android_kabi_reserved1; }; struct blk_flush_queue { spinlock_t mq_flush_lock; unsigned int flush_pending_idx: 1; unsigned int flush_running_idx: 1; blk_status_t rq_status; unsigned long flush_pending_since; struct list_head flush_queue[2]; unsigned long flush_data_in_flight; struct request *flush_rq; }; struct sbitmap_word { unsigned long word; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; unsigned long cleared; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct sbq_wait_state; struct sbitmap_queue { struct sbitmap sb; unsigned int wake_batch; atomic_t wake_index; struct sbq_wait_state *ws; atomic_t ws_active; unsigned int min_shallow_depth; atomic_t completion_cnt; atomic_t wakeup_cnt; }; struct blk_mq_tags { unsigned int nr_tags; unsigned int nr_reserved_tags; unsigned int active_queues; struct sbitmap_queue bitmap_tags; struct sbitmap_queue breserved_tags; struct request **rqs; struct request **static_rqs; struct list_head page_list; spinlock_t lock; u64 android_oem_data1; }; struct sbq_wait_state { wait_queue_head_t wait; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct blk_mq_queue_data { struct request *rq; bool last; }; struct blk_mq_queue_map { unsigned int *mq_map; unsigned int nr_queues; unsigned int queue_offset; }; struct blk_mq_tag_set { const struct blk_mq_ops *ops; struct blk_mq_queue_map map[3]; unsigned int nr_maps; unsigned int nr_hw_queues; unsigned int queue_depth; unsigned int reserved_tags; unsigned int cmd_size; int numa_node; unsigned int timeout; unsigned int flags; void *driver_data; struct blk_mq_tags **tags; struct blk_mq_tags *shared_tags; struct mutex tag_list_lock; struct list_head tag_list; struct srcu_struct *srcu; u64 android_kabi_reserved1; }; struct boot_triggers { const char *event; char *trigger; }; typedef int (*eventfs_callback)(const char *, umode_t *, void **, const struct file_operations **); struct eventfs_entry { const char *name; eventfs_callback callback; }; enum { TRACE_PIDS = 1, TRACE_NO_PIDS = 2, }; enum { FORMAT_HEADER = 1, FORMAT_FIELD_SEPERATOR = 2, FORMAT_PRINTFMT = 3, }; struct module_string { struct list_head next; struct module *module; char *str; }; enum perf_event_sample_format { PERF_SAMPLE_IP = 1, PERF_SAMPLE_TID = 2, PERF_SAMPLE_TIME = 4, PERF_SAMPLE_ADDR = 8, PERF_SAMPLE_READ = 16, PERF_SAMPLE_CALLCHAIN = 32, PERF_SAMPLE_ID = 64, PERF_SAMPLE_CPU = 128, PERF_SAMPLE_PERIOD = 256, PERF_SAMPLE_STREAM_ID = 512, PERF_SAMPLE_RAW = 1024, PERF_SAMPLE_BRANCH_STACK = 2048, PERF_SAMPLE_REGS_USER = 4096, PERF_SAMPLE_STACK_USER = 8192, PERF_SAMPLE_WEIGHT = 16384, PERF_SAMPLE_DATA_SRC = 32768, PERF_SAMPLE_IDENTIFIER = 65536, PERF_SAMPLE_TRANSACTION = 131072, PERF_SAMPLE_REGS_INTR = 262144, PERF_SAMPLE_PHYS_ADDR = 524288, PERF_SAMPLE_AUX = 1048576, PERF_SAMPLE_CGROUP = 2097152, PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, PERF_SAMPLE_WEIGHT_STRUCT = 16777216, PERF_SAMPLE_MAX = 33554432, }; typedef unsigned long perf_trace_t[1024]; struct ustring_buffer { char buffer[1024]; }; enum regex_type { MATCH_FULL = 0, MATCH_FRONT_ONLY = 1, MATCH_MIDDLE_ONLY = 2, MATCH_END_ONLY = 3, MATCH_GLOB = 4, MATCH_INDEX = 5, }; enum filter_pred_fn { FILTER_PRED_FN_NOP = 0, FILTER_PRED_FN_64 = 1, FILTER_PRED_FN_64_CPUMASK = 2, FILTER_PRED_FN_S64 = 3, FILTER_PRED_FN_U64 = 4, FILTER_PRED_FN_32 = 5, FILTER_PRED_FN_32_CPUMASK = 6, FILTER_PRED_FN_S32 = 7, FILTER_PRED_FN_U32 = 8, FILTER_PRED_FN_16 = 9, FILTER_PRED_FN_16_CPUMASK = 10, FILTER_PRED_FN_S16 = 11, FILTER_PRED_FN_U16 = 12, FILTER_PRED_FN_8 = 13, FILTER_PRED_FN_8_CPUMASK = 14, FILTER_PRED_FN_S8 = 15, FILTER_PRED_FN_U8 = 16, FILTER_PRED_FN_COMM = 17, FILTER_PRED_FN_STRING = 18, FILTER_PRED_FN_STRLOC = 19, FILTER_PRED_FN_STRRELLOC = 20, FILTER_PRED_FN_PCHAR_USER = 21, FILTER_PRED_FN_PCHAR = 22, FILTER_PRED_FN_CPU = 23, FILTER_PRED_FN_CPU_CPUMASK = 24, FILTER_PRED_FN_CPUMASK = 25, FILTER_PRED_FN_CPUMASK_CPU = 26, FILTER_PRED_FN_FUNCTION = 27, FILTER_PRED_FN_ = 28, FILTER_PRED_TEST_VISITED = 29, }; enum filter_op_ids { OP_GLOB = 0, OP_NE = 1, OP_EQ = 2, OP_LE = 3, OP_LT = 4, OP_GE = 5, OP_GT = 6, OP_BAND = 7, OP_MAX = 8, }; enum { TOO_MANY_CLOSE = -1, TOO_MANY_OPEN = -2, MISSING_QUOTE = -3, }; enum { FILT_ERR_NONE = 0, FILT_ERR_INVALID_OP = 1, FILT_ERR_TOO_MANY_OPEN = 2, FILT_ERR_TOO_MANY_CLOSE = 3, FILT_ERR_MISSING_QUOTE = 4, FILT_ERR_MISSING_BRACE_OPEN = 5, FILT_ERR_MISSING_BRACE_CLOSE = 6, FILT_ERR_OPERAND_TOO_LONG = 7, FILT_ERR_EXPECT_STRING = 8, FILT_ERR_EXPECT_DIGIT = 9, FILT_ERR_ILLEGAL_FIELD_OP = 10, FILT_ERR_FIELD_NOT_FOUND = 11, FILT_ERR_ILLEGAL_INTVAL = 12, FILT_ERR_BAD_SUBSYS_FILTER = 13, FILT_ERR_TOO_MANY_PREDS = 14, FILT_ERR_INVALID_FILTER = 15, FILT_ERR_INVALID_CPULIST = 16, FILT_ERR_IP_FIELD_ONLY = 17, FILT_ERR_INVALID_VALUE = 18, FILT_ERR_NO_FUNCTION = 19, FILT_ERR_ERRNO = 20, FILT_ERR_NO_FILTER = 21, }; enum { INVERT = 1, PROCESS_AND = 2, PROCESS_OR = 4, }; struct regex; struct filter_pred { struct regex *regex; struct cpumask *mask; unsigned short *ops; struct ftrace_event_field *field; u64 val; u64 val2; enum filter_pred_fn fn_num; int offset; int not; int op; }; typedef int (*regex_match_func)(char *, struct regex *, int); struct regex { char pattern[256]; int len; int field_len; regex_match_func match; }; struct filter_list { struct list_head list; struct event_filter *filter; }; struct filter_parse_error { int lasterr; int lasterr_pos; }; typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **); struct event_trigger_data; struct event_trigger_ops; struct event_command { struct list_head list; char *name; enum event_trigger_type trigger_type; int flags; int (*parse)(struct event_command *, struct trace_event_file *, char *, char *, char *); int (*reg)(char *, struct event_trigger_data *, struct trace_event_file *); void (*unreg)(char *, struct event_trigger_data *, struct trace_event_file *); void (*unreg_all)(struct trace_event_file *); int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *); struct event_trigger_ops * (*get_trigger_ops)(char *, char *); }; struct event_trigger_data { unsigned long count; int ref; int flags; struct event_trigger_ops *ops; struct event_command *cmd_ops; struct event_filter __attribute__((btf_type_tag("rcu"))) *filter; char *filter_str; void *private_data; bool paused; bool paused_tmp; struct list_head list; char *name; struct list_head named_list; struct event_trigger_data *named_data; }; struct event_trigger_ops { void (*trigger)(struct event_trigger_data *, struct trace_buffer *, void *, struct ring_buffer_event *); int (*init)(struct event_trigger_data *); void (*free)(struct event_trigger_data *); int (*print)(struct seq_file *, struct event_trigger_data *); }; enum event_command_flags { EVENT_CMD_FL_POST_TRIGGER = 1, EVENT_CMD_FL_NEEDS_REC = 2, }; enum { EVENT_TRIGGER_FL_PROBE = 1, }; struct enable_trigger_data { struct trace_event_file *file; bool enable; bool hist; }; struct dyn_event; struct dyn_event_operations { struct list_head list; int (*create)(const char *); int (*show)(struct seq_file *, struct dyn_event *); bool (*is_busy)(struct dyn_event *); int (*free)(struct dyn_event *); bool (*match)(const char *, const char *, int, const char **, struct dyn_event *); }; struct dyn_event { struct list_head list; struct dyn_event_operations *ops; }; enum fetch_op { FETCH_OP_NOP = 0, FETCH_OP_REG = 1, FETCH_OP_STACK = 2, FETCH_OP_STACKP = 3, FETCH_OP_RETVAL = 4, FETCH_OP_IMM = 5, FETCH_OP_COMM = 6, FETCH_OP_ARG = 7, FETCH_OP_FOFFS = 8, FETCH_OP_DATA = 9, FETCH_OP_DEREF = 10, FETCH_OP_UDEREF = 11, FETCH_OP_ST_RAW = 12, FETCH_OP_ST_MEM = 13, FETCH_OP_ST_UMEM = 14, FETCH_OP_ST_STRING = 15, FETCH_OP_ST_USTRING = 16, FETCH_OP_ST_SYMSTR = 17, FETCH_OP_MOD_BF = 18, FETCH_OP_LP_ARRAY = 19, FETCH_OP_TP_ARG = 20, FETCH_OP_END = 21, FETCH_NOP_SYMBOL = 22, }; enum { TP_ERR_FILE_NOT_FOUND = 0, TP_ERR_NO_REGULAR_FILE = 1, TP_ERR_BAD_REFCNT = 2, TP_ERR_REFCNT_OPEN_BRACE = 3, TP_ERR_BAD_REFCNT_SUFFIX = 4, TP_ERR_BAD_UPROBE_OFFS = 5, TP_ERR_BAD_MAXACT_TYPE = 6, TP_ERR_BAD_MAXACT = 7, TP_ERR_MAXACT_TOO_BIG = 8, TP_ERR_BAD_PROBE_ADDR = 9, TP_ERR_NON_UNIQ_SYMBOL = 10, TP_ERR_BAD_RETPROBE = 11, TP_ERR_NO_TRACEPOINT = 12, TP_ERR_BAD_ADDR_SUFFIX = 13, TP_ERR_NO_GROUP_NAME = 14, TP_ERR_GROUP_TOO_LONG = 15, TP_ERR_BAD_GROUP_NAME = 16, TP_ERR_NO_EVENT_NAME = 17, TP_ERR_EVENT_TOO_LONG = 18, TP_ERR_BAD_EVENT_NAME = 19, TP_ERR_EVENT_EXIST = 20, TP_ERR_RETVAL_ON_PROBE = 21, TP_ERR_NO_RETVAL = 22, TP_ERR_BAD_STACK_NUM = 23, TP_ERR_BAD_ARG_NUM = 24, TP_ERR_BAD_VAR = 25, TP_ERR_BAD_REG_NAME = 26, TP_ERR_BAD_MEM_ADDR = 27, TP_ERR_BAD_IMM = 28, TP_ERR_IMMSTR_NO_CLOSE = 29, TP_ERR_FILE_ON_KPROBE = 30, TP_ERR_BAD_FILE_OFFS = 31, TP_ERR_SYM_ON_UPROBE = 32, TP_ERR_TOO_MANY_OPS = 33, TP_ERR_DEREF_NEED_BRACE = 34, TP_ERR_BAD_DEREF_OFFS = 35, TP_ERR_DEREF_OPEN_BRACE = 36, TP_ERR_COMM_CANT_DEREF = 37, TP_ERR_BAD_FETCH_ARG = 38, TP_ERR_ARRAY_NO_CLOSE = 39, TP_ERR_BAD_ARRAY_SUFFIX = 40, TP_ERR_BAD_ARRAY_NUM = 41, TP_ERR_ARRAY_TOO_BIG = 42, TP_ERR_BAD_TYPE = 43, TP_ERR_BAD_STRING = 44, TP_ERR_BAD_SYMSTRING = 45, TP_ERR_BAD_BITFIELD = 46, TP_ERR_ARG_NAME_TOO_LONG = 47, TP_ERR_NO_ARG_NAME = 48, TP_ERR_BAD_ARG_NAME = 49, TP_ERR_USED_ARG_NAME = 50, TP_ERR_ARG_TOO_LONG = 51, TP_ERR_NO_ARG_BODY = 52, TP_ERR_BAD_INSN_BNDRY = 53, TP_ERR_FAIL_REG_PROBE = 54, TP_ERR_DIFF_PROBE_TYPE = 55, TP_ERR_DIFF_ARG_TYPE = 56, TP_ERR_SAME_PROBE = 57, TP_ERR_NO_EVENT_INFO = 58, TP_ERR_BAD_ATTACH_EVENT = 59, TP_ERR_BAD_ATTACH_ARG = 60, TP_ERR_NO_EP_FILTER = 61, TP_ERR_NOSUP_BTFARG = 62, TP_ERR_NO_BTFARG = 63, TP_ERR_NO_BTF_ENTRY = 64, TP_ERR_BAD_VAR_ARGS = 65, TP_ERR_NOFENTRY_ARGS = 66, TP_ERR_DOUBLE_ARGS = 67, TP_ERR_ARGS_2LONG = 68, TP_ERR_ARGIDX_2BIG = 69, TP_ERR_NO_PTR_STRCT = 70, TP_ERR_NOSUP_DAT_ARG = 71, TP_ERR_BAD_HYPHEN = 72, TP_ERR_NO_BTF_FIELD = 73, TP_ERR_BAD_BTF_TID = 74, TP_ERR_BAD_TYPE4STR = 75, TP_ERR_NEED_STRING_TYPE = 76, }; enum probe_print_type { PROBE_PRINT_NORMAL = 0, PROBE_PRINT_RETURN = 1, PROBE_PRINT_EVENT = 2, }; struct eprobe_trace_entry_head { struct trace_entry ent; }; struct fetch_insn; struct fetch_type; struct probe_arg { struct fetch_insn *code; bool dynamic; unsigned int offset; unsigned int count; const char *name; const char *comm; char *fmt; const struct fetch_type *type; }; struct trace_probe_event; struct trace_probe { struct list_head list; struct trace_probe_event *event; ssize_t size; unsigned int nr_args; struct probe_arg args[0]; }; struct trace_eprobe { const char *event_system; const char *event_name; char *filter_str; struct trace_event_call *event; struct dyn_event devent; struct trace_probe tp; }; struct trace_uprobe_filter { rwlock_t rwlock; int nr_systemwide; struct list_head perf_events; }; struct trace_probe_event { unsigned int flags; struct trace_event_class class; struct trace_event_call call; struct list_head files; struct list_head probes; struct trace_uprobe_filter filter[0]; }; struct fetch_insn { enum fetch_op op; union { unsigned int param; struct { unsigned int size; int offset; }; struct { unsigned char basesize; unsigned char lshift; unsigned char rshift; }; unsigned long immediate; void *data; }; }; typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); struct fetch_type { const char *name; size_t size; bool is_signed; bool is_string; print_type_func_t print; const char *fmt; const char *fmttype; }; struct btf_param; struct traceprobe_parse_context { struct trace_event_call *event; const char *funcname; const struct btf_type *proto; const struct btf_param *params; s32 nr_params; struct btf *btf; const struct btf_type *last_type; u32 last_bitoffs; u32 last_bitsize; unsigned int flags; int offset; }; struct btf_param { __u32 name_off; __u32 type; }; struct eprobe_data { struct trace_event_file *file; struct trace_eprobe *ep; }; struct event_file_link { struct trace_event_file *file; struct list_head list; }; enum dynevent_type { DYNEVENT_TYPE_SYNTH = 1, DYNEVENT_TYPE_KPROBE = 2, DYNEVENT_TYPE_NONE = 3, }; enum { SYNTH_ERR_BAD_NAME = 0, SYNTH_ERR_INVALID_CMD = 1, SYNTH_ERR_INVALID_DYN_CMD = 2, SYNTH_ERR_EVENT_EXISTS = 3, SYNTH_ERR_TOO_MANY_FIELDS = 4, SYNTH_ERR_INCOMPLETE_TYPE = 5, SYNTH_ERR_INVALID_TYPE = 6, SYNTH_ERR_INVALID_FIELD = 7, SYNTH_ERR_INVALID_ARRAY_SPEC = 8, }; struct trace_dynamic_info { u16 offset; u16 len; }; union trace_synth_field { u8 as_u8; u16 as_u16; u32 as_u32; u64 as_u64; struct trace_dynamic_info as_dynamic; }; struct synth_trace_event { struct trace_entry ent; union trace_synth_field fields[0]; }; struct synth_field; struct synth_event { struct dyn_event devent; int ref; char *name; struct synth_field **fields; unsigned int n_fields; struct synth_field **dynamic_fields; unsigned int n_dynamic_fields; unsigned int n_u64; struct trace_event_class class; struct trace_event_call call; struct tracepoint *tp; struct module *mod; }; struct synth_field { char *type; char *name; size_t size; unsigned int offset; unsigned int field_pos; bool is_signed; bool is_string; bool is_dynamic; bool is_stack; }; struct dynevent_arg_pair { const char *lhs; const char *rhs; char operator; char separator; }; struct dynevent_cmd; typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *); struct dynevent_cmd { struct seq_buf seq; const char *event_name; unsigned int n_fields; enum dynevent_type type; dynevent_create_fn_t run_command; void *private_data; }; typedef int (*dynevent_check_arg_fn_t)(void *); struct dynevent_arg { const char *str; char separator; }; struct synth_event_trace_state { struct trace_event_buffer fbuffer; struct synth_trace_event *entry; struct trace_buffer *buffer; struct synth_event *event; unsigned int cur_field; unsigned int n_u64; bool disabled; bool add_next; bool add_name; }; struct synth_field_desc { const char *type; const char *name; }; enum hist_field_fn { HIST_FIELD_FN_NOP = 0, HIST_FIELD_FN_VAR_REF = 1, HIST_FIELD_FN_COUNTER = 2, HIST_FIELD_FN_CONST = 3, HIST_FIELD_FN_LOG2 = 4, HIST_FIELD_FN_BUCKET = 5, HIST_FIELD_FN_TIMESTAMP = 6, HIST_FIELD_FN_CPU = 7, HIST_FIELD_FN_STRING = 8, HIST_FIELD_FN_DYNSTRING = 9, HIST_FIELD_FN_RELDYNSTRING = 10, HIST_FIELD_FN_PSTRING = 11, HIST_FIELD_FN_S64 = 12, HIST_FIELD_FN_U64 = 13, HIST_FIELD_FN_S32 = 14, HIST_FIELD_FN_U32 = 15, HIST_FIELD_FN_S16 = 16, HIST_FIELD_FN_U16 = 17, HIST_FIELD_FN_S8 = 18, HIST_FIELD_FN_U8 = 19, HIST_FIELD_FN_UMINUS = 20, HIST_FIELD_FN_MINUS = 21, HIST_FIELD_FN_PLUS = 22, HIST_FIELD_FN_DIV = 23, HIST_FIELD_FN_MULT = 24, HIST_FIELD_FN_DIV_POWER2 = 25, HIST_FIELD_FN_DIV_NOT_POWER2 = 26, HIST_FIELD_FN_DIV_MULT_SHIFT = 27, HIST_FIELD_FN_EXECNAME = 28, HIST_FIELD_FN_STACK = 29, }; enum field_op_id { FIELD_OP_NONE = 0, FIELD_OP_PLUS = 1, FIELD_OP_MINUS = 2, FIELD_OP_UNARY_MINUS = 3, FIELD_OP_DIV = 4, FIELD_OP_MULT = 5, }; enum handler_id { HANDLER_ONMATCH = 1, HANDLER_ONMAX = 2, HANDLER_ONCHANGE = 3, }; enum action_id { ACTION_SAVE = 1, ACTION_TRACE = 2, ACTION_SNAPSHOT = 3, }; enum hist_field_flags { HIST_FIELD_FL_HITCOUNT = 1, HIST_FIELD_FL_KEY = 2, HIST_FIELD_FL_STRING = 4, HIST_FIELD_FL_HEX = 8, HIST_FIELD_FL_SYM = 16, HIST_FIELD_FL_SYM_OFFSET = 32, HIST_FIELD_FL_EXECNAME = 64, HIST_FIELD_FL_SYSCALL = 128, HIST_FIELD_FL_STACKTRACE = 256, HIST_FIELD_FL_LOG2 = 512, HIST_FIELD_FL_TIMESTAMP = 1024, HIST_FIELD_FL_TIMESTAMP_USECS = 2048, HIST_FIELD_FL_VAR = 4096, HIST_FIELD_FL_EXPR = 8192, HIST_FIELD_FL_VAR_REF = 16384, HIST_FIELD_FL_CPU = 32768, HIST_FIELD_FL_ALIAS = 65536, HIST_FIELD_FL_BUCKET = 131072, HIST_FIELD_FL_CONST = 262144, HIST_FIELD_FL_PERCENT = 524288, HIST_FIELD_FL_GRAPH = 1048576, }; enum { HIST_ERR_NONE = 0, HIST_ERR_DUPLICATE_VAR = 1, HIST_ERR_VAR_NOT_UNIQUE = 2, HIST_ERR_TOO_MANY_VARS = 3, HIST_ERR_MALFORMED_ASSIGNMENT = 4, HIST_ERR_NAMED_MISMATCH = 5, HIST_ERR_TRIGGER_EEXIST = 6, HIST_ERR_TRIGGER_ENOENT_CLEAR = 7, HIST_ERR_SET_CLOCK_FAIL = 8, HIST_ERR_BAD_FIELD_MODIFIER = 9, HIST_ERR_TOO_MANY_SUBEXPR = 10, HIST_ERR_TIMESTAMP_MISMATCH = 11, HIST_ERR_TOO_MANY_FIELD_VARS = 12, HIST_ERR_EVENT_FILE_NOT_FOUND = 13, HIST_ERR_HIST_NOT_FOUND = 14, HIST_ERR_HIST_CREATE_FAIL = 15, HIST_ERR_SYNTH_VAR_NOT_FOUND = 16, HIST_ERR_SYNTH_EVENT_NOT_FOUND = 17, HIST_ERR_SYNTH_TYPE_MISMATCH = 18, HIST_ERR_SYNTH_COUNT_MISMATCH = 19, HIST_ERR_FIELD_VAR_PARSE_FAIL = 20, HIST_ERR_VAR_CREATE_FIND_FAIL = 21, HIST_ERR_ONX_NOT_VAR = 22, HIST_ERR_ONX_VAR_NOT_FOUND = 23, HIST_ERR_ONX_VAR_CREATE_FAIL = 24, HIST_ERR_FIELD_VAR_CREATE_FAIL = 25, HIST_ERR_TOO_MANY_PARAMS = 26, HIST_ERR_PARAM_NOT_FOUND = 27, HIST_ERR_INVALID_PARAM = 28, HIST_ERR_ACTION_NOT_FOUND = 29, HIST_ERR_NO_SAVE_PARAMS = 30, HIST_ERR_TOO_MANY_SAVE_ACTIONS = 31, HIST_ERR_ACTION_MISMATCH = 32, HIST_ERR_NO_CLOSING_PAREN = 33, HIST_ERR_SUBSYS_NOT_FOUND = 34, HIST_ERR_INVALID_SUBSYS_EVENT = 35, HIST_ERR_INVALID_REF_KEY = 36, HIST_ERR_VAR_NOT_FOUND = 37, HIST_ERR_FIELD_NOT_FOUND = 38, HIST_ERR_EMPTY_ASSIGNMENT = 39, HIST_ERR_INVALID_SORT_MODIFIER = 40, HIST_ERR_EMPTY_SORT_FIELD = 41, HIST_ERR_TOO_MANY_SORT_FIELDS = 42, HIST_ERR_INVALID_SORT_FIELD = 43, HIST_ERR_INVALID_STR_OPERAND = 44, HIST_ERR_EXPECT_NUMBER = 45, HIST_ERR_UNARY_MINUS_SUBEXPR = 46, HIST_ERR_DIVISION_BY_ZERO = 47, HIST_ERR_NEED_NOHC_VAL = 48, }; struct hist_trigger_data; struct hist_var_data { struct list_head list; struct hist_trigger_data *hist_data; }; struct hist_field; struct hist_trigger_attrs; struct action_data; struct field_var; struct field_var_hist; struct hist_trigger_data { struct hist_field *fields[22]; unsigned int n_vals; unsigned int n_keys; unsigned int n_fields; unsigned int n_vars; unsigned int n_var_str; unsigned int key_size; struct tracing_map_sort_key sort_keys[2]; unsigned int n_sort_keys; struct trace_event_file *event_file; struct hist_trigger_attrs *attrs; struct tracing_map *map; bool enable_timestamps; bool remove; struct hist_field *var_refs[16]; unsigned int n_var_refs; struct action_data *actions[8]; unsigned int n_actions; struct field_var *field_vars[64]; unsigned int n_field_vars; unsigned int n_field_var_str; struct field_var_hist *field_var_hists[64]; unsigned int n_field_var_hists; struct field_var *save_vars[64]; unsigned int n_save_vars; unsigned int n_save_var_str; }; struct hist_var { char *name; struct hist_trigger_data *hist_data; unsigned int idx; }; struct hist_field { struct ftrace_event_field *field; unsigned long flags; unsigned long buckets; const char *type; struct hist_field *operands[2]; struct hist_trigger_data *hist_data; enum hist_field_fn fn_num; unsigned int ref; unsigned int size; unsigned int offset; unsigned int is_signed; struct hist_var var; enum field_op_id operator; char *system; char *event_name; char *name; unsigned int var_ref_idx; bool read_once; unsigned int var_str_idx; u64 constant; u64 div_multiplier; }; struct var_defs { unsigned int n_vars; char *name[16]; char *expr[16]; }; struct hist_trigger_attrs { char *keys_str; char *vals_str; char *sort_key_str; char *name; char *clock; bool pause; bool cont; bool clear; bool ts_in_usecs; bool no_hitcount; unsigned int map_bits; char *assignment_str[16]; unsigned int n_assignments; char *action_str[8]; unsigned int n_actions; struct var_defs var_defs; }; typedef bool (*check_track_val_fn_t)(u64, u64); typedef void (*action_fn_t)(struct hist_trigger_data *, struct tracing_map_elt *, struct trace_buffer *, void *, struct ring_buffer_event *, void *, struct action_data *, u64 *); struct action_data { enum handler_id handler; enum action_id action; char *action_name; action_fn_t fn; unsigned int n_params; char *params[64]; unsigned int var_ref_idx[64]; struct synth_event *synth_event; bool use_trace_keyword; char *synth_event_name; union { struct { char *event; char *event_system; } match_data; struct { char *var_str; struct hist_field *var_ref; struct hist_field *track_var; check_track_val_fn_t check_val; action_fn_t save_data; } track_data; }; }; struct field_var { struct hist_field *var; struct hist_field *val; }; struct field_var_hist { struct hist_trigger_data *hist_data; char *cmd; }; struct hist_val_stat { u64 max; u64 total; }; struct hist_elt_data { char *comm; u64 *var_ref_vals; char **field_var_str; int n_field_var_str; }; typedef void (*synth_probe_func_t)(void *, u64 *, unsigned int *); struct track_data { u64 track_val; bool updated; unsigned int key_len; void *key; struct tracing_map_elt elt; struct action_data *action_data; struct hist_trigger_data *hist_data; }; struct bpf_mem_caches; struct bpf_mem_cache; struct bpf_mem_alloc { struct bpf_mem_caches __attribute__((btf_type_tag("percpu"))) *caches; struct bpf_mem_cache __attribute__((btf_type_tag("percpu"))) *cache; bool percpu; struct work_struct work; }; struct bpf_local_storage_map_bucket; struct bpf_local_storage_map { struct bpf_map map; struct bpf_local_storage_map_bucket *buckets; u32 bucket_log; u16 elem_size; u16 cache_idx; struct bpf_mem_alloc selem_ma; struct bpf_mem_alloc storage_ma; bool bpf_ma; long: 64; long: 64; long: 64; }; struct bpf_local_storage_map_bucket { struct hlist_head list; raw_spinlock_t lock; }; struct bpf_mem_cache { struct llist_head free_llist; local_t active; struct llist_head free_llist_extra; struct irq_work refill_work; struct obj_cgroup *objcg; int unit_size; int free_cnt; int low_watermark; int high_watermark; int batch; int percpu_size; bool draining; struct bpf_mem_cache *tgt; struct llist_head free_by_rcu; struct llist_node *free_by_rcu_tail; struct llist_head waiting_for_gp; struct llist_node *waiting_for_gp_tail; struct callback_head rcu; atomic_t call_rcu_in_progress; struct llist_head free_llist_extra_rcu; struct llist_head free_by_rcu_ttrace; struct llist_head waiting_for_gp_ttrace; struct callback_head rcu_ttrace; atomic_t call_rcu_ttrace_in_progress; }; struct bpf_mem_caches { struct bpf_mem_cache cache[11]; }; struct bpf_local_storage_data { struct bpf_local_storage_map __attribute__((btf_type_tag("rcu"))) *smap; u8 data[0]; }; struct bpf_id_pair { u32 old; u32 cur; }; struct bpf_idmap { u32 tmp_id_gen; struct bpf_id_pair map[600]; }; struct bpf_idset { u32 count; u32 ids[600]; }; struct bpf_verifier_log { u64 start_pos; u64 end_pos; char __attribute__((btf_type_tag("user"))) *ubuf; u32 level; u32 len_total; u32 len_max; char kbuf[1024]; }; struct bpf_subprog_info { u32 start; u32 linfo_idx; u16 stack_depth; bool has_tail_call; bool tail_call_reachable; bool has_ld_abs; bool is_async_cb; u64 android_kabi_reserved1; }; struct backtrack_state { struct bpf_verifier_env *env; u32 frame; u32 reg_masks[8]; u64 stack_masks[8]; }; typedef sockptr_t bpfptr_t; struct bpf_verifier_ops; struct bpf_verifier_stack_elem; struct bpf_verifier_state; struct bpf_verifier_state_list; struct bpf_insn_aux_data; struct bpf_verifier_env { u32 insn_idx; u32 prev_insn_idx; struct bpf_prog *prog; const struct bpf_verifier_ops *ops; struct bpf_verifier_stack_elem *head; int stack_size; bool strict_alignment; bool test_state_freq; struct bpf_verifier_state *cur_state; struct bpf_verifier_state_list **explored_states; struct bpf_verifier_state_list *free_list; struct bpf_map *used_maps[64]; struct btf_mod_pair used_btfs[64]; u32 used_map_cnt; u32 used_btf_cnt; u32 id_gen; bool explore_alu_limits; bool allow_ptr_leaks; bool allow_uninit_stack; bool bpf_capable; bool bypass_spec_v1; bool bypass_spec_v4; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; struct bpf_subprog_info subprog_info[257]; union { struct bpf_idmap idmap_scratch; struct bpf_idset idset_scratch; }; struct { int *insn_state; int *insn_stack; int cur_stack; } cfg; struct backtrack_state bt; u32 pass_cnt; u32 subprog_cnt; u32 prev_insn_processed; u32 insn_processed; u32 prev_jmps_processed; u32 jmps_processed; u64 verification_time; u32 max_states_per_insn; u32 total_states; u32 peak_states; u32 longest_mark_read_walk; bpfptr_t fd_array; u32 scratched_regs; u64 scratched_stack_slots; u64 prev_log_pos; u64 prev_insn_print_pos; char tmp_str_buf[320]; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; enum bpf_access_type { BPF_READ = 1, BPF_WRITE = 2, }; struct bpf_insn_access_aux; struct bpf_reg_state; struct bpf_verifier_ops { const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *); bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *); int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *); int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); int (*btf_struct_access)(struct bpf_verifier_log *, const struct bpf_reg_state *, int, int); u64 android_kabi_reserved1; }; struct bpf_insn_access_aux { enum bpf_reg_type reg_type; union { int ctx_field_size; struct { struct btf *btf; u32 btf_id; }; }; struct bpf_verifier_log *log; }; enum bpf_dynptr_type { BPF_DYNPTR_TYPE_INVALID = 0, BPF_DYNPTR_TYPE_LOCAL = 1, BPF_DYNPTR_TYPE_RINGBUF = 2, BPF_DYNPTR_TYPE_SKB = 3, BPF_DYNPTR_TYPE_XDP = 4, }; enum bpf_iter_state { BPF_ITER_STATE_INVALID = 0, BPF_ITER_STATE_ACTIVE = 1, BPF_ITER_STATE_DRAINED = 2, }; struct tnum { u64 value; u64 mask; }; enum bpf_reg_liveness { REG_LIVE_NONE = 0, REG_LIVE_READ32 = 1, REG_LIVE_READ64 = 2, REG_LIVE_READ = 3, REG_LIVE_WRITTEN = 4, REG_LIVE_DONE = 8, }; struct bpf_reg_state { enum bpf_reg_type type; s32 off; union { int range; struct { struct bpf_map *map_ptr; u32 map_uid; }; struct { struct btf *btf; u32 btf_id; }; struct { u32 mem_size; u32 dynptr_id; }; struct { enum bpf_dynptr_type type; bool first_slot; } dynptr; struct { struct btf *btf; u32 btf_id; enum bpf_iter_state state: 2; int depth: 30; } iter; struct { unsigned long raw1; unsigned long raw2; } raw; u32 subprogno; }; struct tnum var_off; s64 smin_value; s64 smax_value; u64 umin_value; u64 umax_value; s32 s32_min_value; s32 s32_max_value; u32 u32_min_value; u32 u32_max_value; u32 id; u32 ref_obj_id; struct bpf_reg_state *parent; u32 frameno; s32 subreg_def; enum bpf_reg_liveness live; bool precise; }; struct bpf_active_lock { void *ptr; u32 id; }; struct bpf_idx_pair; struct bpf_verifier_state { struct bpf_func_state *frame[8]; struct bpf_verifier_state *parent; u32 branches; u32 insn_idx; u32 curframe; struct bpf_active_lock active_lock; bool speculative; bool active_rcu_lock; bool used_as_loop_entry; u32 first_insn_idx; u32 last_insn_idx; struct bpf_verifier_state *loop_entry; struct bpf_idx_pair *jmp_history; u32 jmp_history_cnt; u32 dfs_depth; u32 callback_unroll_depth; }; struct bpf_reference_state; struct bpf_stack_state; struct bpf_func_state { struct bpf_reg_state regs[11]; int callsite; u32 frameno; u32 subprogno; u32 async_entry_cnt; bool in_callback_fn; struct tnum callback_ret_range; bool in_async_callback_fn; u32 callback_depth; int acquired_refs; struct bpf_reference_state *refs; int allocated_stack; struct bpf_stack_state *stack; }; struct bpf_reference_state { int id; int insn_idx; int callback_ref; }; struct bpf_stack_state { struct bpf_reg_state spilled_ptr; u8 slot_type[8]; }; struct bpf_idx_pair { u32 prev_idx; u32 idx; }; struct bpf_verifier_state_list { struct bpf_verifier_state state; struct bpf_verifier_state_list *next; int miss_cnt; int hit_cnt; }; struct bpf_loop_inline_state { unsigned int initialized: 1; unsigned int fit_for_inline: 1; u32 callback_subprogno; }; struct btf_struct_meta; struct bpf_insn_aux_data { union { enum bpf_reg_type ptr_type; unsigned long map_ptr_state; s32 call_imm; u32 alu_limit; struct { u32 map_index; u32 map_off; }; struct { enum bpf_reg_type reg_type; union { struct { struct btf *btf; u32 btf_id; }; u32 mem_size; }; } btf_var; struct bpf_loop_inline_state loop_inline_state; }; union { u64 obj_new_size; u64 insert_off; }; struct btf_struct_meta *kptr_struct_meta; u64 map_key_state; int ctx_field_size; u32 seen; bool sanitize_stack_spill; bool zext_dst; bool storage_get_func_atomic; bool is_iter_next; u8 alu_state; unsigned int orig_idx; bool jmp_point; bool prune_point; bool force_checkpoint; bool calls_callback; }; struct btf_struct_meta { u32 btf_id; struct btf_record *record; }; typedef void (*btf_trace_bpf_trace_printk)(void *, const char *); struct bpf_nested_pt_regs { struct pt_regs regs[3]; }; struct bpf_trace_sample_data { struct perf_sample_data sds[3]; }; struct send_signal_irq_work { struct irq_work irq_work; struct task_struct *task; u32 sig; enum pid_type type; }; struct bpf_raw_tp_regs { struct pt_regs regs[3]; }; enum key_lookup_flag { KEY_LOOKUP_CREATE = 1, KEY_LOOKUP_PARTIAL = 2, KEY_LOOKUP_ALL = 3, }; enum key_need_perm { KEY_NEED_UNSPECIFIED = 0, KEY_NEED_VIEW = 1, KEY_NEED_READ = 2, KEY_NEED_WRITE = 3, KEY_NEED_SEARCH = 4, KEY_NEED_LINK = 5, KEY_NEED_SETATTR = 6, KEY_NEED_UNLINK = 7, KEY_SYSADMIN_OVERRIDE = 8, KEY_AUTHTOKEN_OVERRIDE = 9, KEY_DEFER_PERM_CHECK = 10, }; enum bpf_task_fd_type { BPF_FD_TYPE_RAW_TRACEPOINT = 0, BPF_FD_TYPE_TRACEPOINT = 1, BPF_FD_TYPE_KPROBE = 2, BPF_FD_TYPE_KRETPROBE = 3, BPF_FD_TYPE_UPROBE = 4, BPF_FD_TYPE_URETPROBE = 5, }; enum uprobe_filter_ctx { UPROBE_FILTER_REGISTER = 0, UPROBE_FILTER_UNREGISTER = 1, UPROBE_FILTER_MMAP = 2, }; enum { BPF_F_UPROBE_MULTI_RETURN = 1, }; enum { BTF_F_COMPACT = 1, BTF_F_NONAME = 2, BTF_F_PTR_RAW = 4, BTF_F_ZERO = 8, }; enum { BPF_F_INDEX_MASK = 4294967295ULL, BPF_F_CURRENT_CPU = 4294967295ULL, BPF_F_CTXLEN_MASK = 4503595332403200ULL, }; enum { BPF_F_GET_BRANCH_RECORDS_SIZE = 1, }; typedef u64 (*btf_bpf_probe_read_user)(void *, u32, const void __attribute__((btf_type_tag("user"))) *); typedef u64 (*btf_bpf_probe_read_user_str)(void *, u32, const void __attribute__((btf_type_tag("user"))) *); typedef u64 (*btf_bpf_probe_read_kernel)(void *, u32, const void *); typedef u64 (*btf_bpf_probe_read_kernel_str)(void *, u32, const void *); typedef u64 (*btf_bpf_probe_read_compat)(void *, u32, const void *); typedef u64 (*btf_bpf_probe_read_compat_str)(void *, u32, const void *); typedef u64 (*btf_bpf_probe_write_user)(void __attribute__((btf_type_tag("user"))) *, const void *, u32); typedef u64 (*btf_bpf_trace_printk)(char *, u32, u64, u64, u64); typedef u64 (*btf_bpf_trace_vprintk)(char *, u32, const void *, u32); typedef u64 (*btf_bpf_seq_printf)(struct seq_file *, char *, u32, const void *, u32); typedef u64 (*btf_bpf_seq_write)(struct seq_file *, const void *, u32); struct btf_ptr; typedef u64 (*btf_bpf_seq_printf_btf)(struct seq_file *, struct btf_ptr *, u32, u64); struct btf_ptr { void *ptr; __u32 type_id; __u32 flags; }; typedef u64 (*btf_bpf_perf_event_read)(struct bpf_map *, u64); struct bpf_perf_event_value; typedef u64 (*btf_bpf_perf_event_read_value)(struct bpf_map *, u64, struct bpf_perf_event_value *, u32); struct bpf_perf_event_value { __u64 counter; __u64 enabled; __u64 running; }; typedef u64 (*btf_bpf_perf_event_output)(struct pt_regs *, struct bpf_map *, u64, void *, u64); typedef u64 (*btf_bpf_get_current_task)(); typedef u64 (*btf_bpf_get_current_task_btf)(); typedef u64 (*btf_bpf_task_pt_regs)(struct task_struct *); typedef u64 (*btf_bpf_current_task_under_cgroup)(struct bpf_map *, u32); typedef u64 (*btf_bpf_send_signal)(u32); typedef u64 (*btf_bpf_send_signal_thread)(u32); typedef u64 (*btf_bpf_d_path)(struct path *, char *, u32); typedef u64 (*btf_bpf_snprintf_btf)(char *, u32, struct btf_ptr *, u32, u64); typedef u64 (*btf_bpf_get_func_ip_tracing)(void *); typedef u64 (*btf_bpf_get_func_ip_kprobe)(struct pt_regs *); typedef u64 (*btf_bpf_get_func_ip_kprobe_multi)(struct pt_regs *); typedef u64 (*btf_bpf_get_attach_cookie_kprobe_multi)(struct pt_regs *); typedef u64 (*btf_bpf_get_func_ip_uprobe_multi)(struct pt_regs *); typedef u64 (*btf_bpf_get_attach_cookie_uprobe_multi)(struct pt_regs *); typedef u64 (*btf_bpf_get_attach_cookie_trace)(void *); struct bpf_perf_event_data_kern; typedef u64 (*btf_bpf_get_attach_cookie_pe)(struct bpf_perf_event_data_kern *); typedef struct user_pt_regs bpf_user_pt_regs_t; struct bpf_perf_event_data_kern { bpf_user_pt_regs_t *regs; struct perf_sample_data *data; struct perf_event *event; }; typedef u64 (*btf_bpf_get_attach_cookie_tracing)(void *); typedef u64 (*btf_bpf_get_branch_snapshot)(void *, u32, u64); typedef u64 (*btf_get_func_arg)(void *, u32, u64 *); typedef u64 (*btf_get_func_ret)(void *, u64 *); typedef u64 (*btf_get_func_arg_cnt)(void *); typedef u64 (*btf_bpf_perf_event_output_tp)(void *, struct bpf_map *, u64, void *, u64); typedef u64 (*btf_bpf_get_stackid_tp)(void *, struct bpf_map *, u64); typedef u64 (*btf_bpf_get_stack_tp)(void *, void *, u32, u64); typedef u64 (*btf_bpf_perf_prog_read_value)(struct bpf_perf_event_data_kern *, struct bpf_perf_event_value *, u32); typedef u64 (*btf_bpf_read_branch_records)(struct bpf_perf_event_data_kern *, void *, u32, u64); struct bpf_raw_tracepoint_args; typedef u64 (*btf_bpf_perf_event_output_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64, void *, u64); struct bpf_raw_tracepoint_args { __u64 args[0]; }; typedef u64 (*btf_bpf_get_stackid_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64); typedef u64 (*btf_bpf_get_stack_raw_tp)(struct bpf_raw_tracepoint_args *, void *, u32, u64); struct trace_event_raw_bpf_trace_printk { struct trace_entry ent; u32 __data_loc_bpf_string; char __data[0]; }; struct bpf_array_aux; struct bpf_array { struct bpf_map map; u32 elem_size; u32 index_mask; struct bpf_array_aux *aux; union { struct { struct {} __empty_value; char value[0]; }; struct { struct {} __empty_ptrs; void *ptrs[0]; }; struct { struct {} __empty_pptrs; void __attribute__((btf_type_tag("percpu"))) *pptrs[0]; }; }; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_array_aux { struct list_head poke_progs; struct bpf_map *map; struct mutex poke_mutex; struct work_struct work; }; struct bpf_trace_run_ctx { struct bpf_run_ctx run_ctx; u64 bpf_cookie; bool is_uprobe; }; struct trace_uprobe; struct uprobe_dispatch_data { struct trace_uprobe *tu; unsigned long bp_addr; }; struct bpf_uprobe; struct bpf_uprobe_multi_run_ctx { struct bpf_run_ctx run_ctx; unsigned long entry_ip; struct bpf_uprobe *uprobe; }; struct uprobe_consumer { int (*handler)(struct uprobe_consumer *, struct pt_regs *); int (*ret_handler)(struct uprobe_consumer *, unsigned long, struct pt_regs *); bool (*filter)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *); struct uprobe_consumer *next; }; struct bpf_uprobe_multi_link; struct bpf_uprobe { struct bpf_uprobe_multi_link *link; loff_t offset; u64 cookie; struct uprobe_consumer consumer; }; struct bpf_uprobe_multi_link { struct path path; struct bpf_link link; u32 cnt; struct bpf_uprobe *uprobes; struct task_struct *task; }; struct bpf_trace_module { struct module *module; struct list_head list; }; struct trace_event_data_offsets_bpf_trace_printk { u32 bpf_string; }; typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *, const void *); struct bpf_bprintf_data { u32 *bin_args; char *buf; bool get_bin_args; bool get_buf; }; struct bpf_event_entry { struct perf_event *event; struct file *perf_file; struct file *map_file; struct callback_head rcu; }; struct __key_reference_with_attributes; typedef struct __key_reference_with_attributes *key_ref_t; struct bpf_dynptr_kern { void *data; u32 size; u32 offset; }; struct bpf_link_primer { struct bpf_link *link; struct file *file; int fd; u32 id; }; struct btf_id_set { u32 cnt; u32 ids[0]; }; struct bpf_key { struct key *key; bool has_ref; }; typedef unsigned long (*bpf_ctx_copy_t)(void *, const void *, unsigned long, unsigned long); struct perf_event_query_bpf { __u32 ids_len; __u32 prog_cnt; __u32 ids[0]; }; struct trace_kprobe { struct dyn_event devent; struct kretprobe rp; unsigned long __attribute__((btf_type_tag("percpu"))) *nhit; const char *symbol; struct trace_probe tp; }; struct kretprobe_trace_entry_head { struct trace_entry ent; unsigned long func; unsigned long ret_ip; }; struct kprobe_trace_entry_head { struct trace_entry ent; unsigned long ip; }; struct sym_count_ctx { unsigned int count; const char *name; }; typedef void (*btf_trace_error_report_end)(void *, enum error_detector, unsigned long); struct trace_event_raw_error_report_template { struct trace_entry ent; enum error_detector error_detector; unsigned long id; char __data[0]; }; struct trace_event_data_offsets_error_report_template {}; typedef void (*btf_trace_cpu_idle)(void *, unsigned int, unsigned int); typedef void (*btf_trace_cpu_idle_miss)(void *, unsigned int, unsigned int, bool); typedef void (*btf_trace_powernv_throttle)(void *, int, const char *, int); typedef void (*btf_trace_pstate_sample)(void *, u32, u32, u32, u32, u64, u64, u64, u32, u32); typedef void (*btf_trace_cpu_frequency)(void *, unsigned int, unsigned int); typedef void (*btf_trace_cpu_frequency_limits)(void *, struct cpufreq_policy *); typedef void (*btf_trace_device_pm_callback_start)(void *, struct device *, const char *, int); typedef void (*btf_trace_device_pm_callback_end)(void *, struct device *, int); typedef void (*btf_trace_suspend_resume)(void *, const char *, int, bool); typedef void (*btf_trace_wakeup_source_activate)(void *, const char *, unsigned int); typedef void (*btf_trace_wakeup_source_deactivate)(void *, const char *, unsigned int); typedef void (*btf_trace_clock_enable)(void *, const char *, unsigned int, unsigned int); typedef void (*btf_trace_clock_disable)(void *, const char *, unsigned int, unsigned int); typedef void (*btf_trace_clock_set_rate)(void *, const char *, unsigned int, unsigned int); typedef void (*btf_trace_power_domain_target)(void *, const char *, unsigned int, unsigned int); typedef void (*btf_trace_pm_qos_add_request)(void *, s32); typedef void (*btf_trace_pm_qos_update_request)(void *, s32); typedef void (*btf_trace_pm_qos_remove_request)(void *, s32); typedef void (*btf_trace_pm_qos_update_target)(void *, enum pm_qos_req_action, int, int); typedef void (*btf_trace_pm_qos_update_flags)(void *, enum pm_qos_req_action, int, int); typedef void (*btf_trace_dev_pm_qos_add_request)(void *, const char *, enum dev_pm_qos_req_type, s32); typedef void (*btf_trace_dev_pm_qos_update_request)(void *, const char *, enum dev_pm_qos_req_type, s32); typedef void (*btf_trace_dev_pm_qos_remove_request)(void *, const char *, enum dev_pm_qos_req_type, s32); typedef void (*btf_trace_guest_halt_poll_ns)(void *, bool, unsigned int, unsigned int); struct trace_event_raw_cpu { struct trace_entry ent; u32 state; u32 cpu_id; char __data[0]; }; struct trace_event_raw_cpu_idle_miss { struct trace_entry ent; u32 cpu_id; u32 state; bool below; char __data[0]; }; struct trace_event_raw_powernv_throttle { struct trace_entry ent; int chip_id; u32 __data_loc_reason; int pmax; char __data[0]; }; struct trace_event_raw_pstate_sample { struct trace_entry ent; u32 core_busy; u32 scaled_busy; u32 from; u32 to; u64 mperf; u64 aperf; u64 tsc; u32 freq; u32 io_boost; char __data[0]; }; struct trace_event_raw_cpu_frequency_limits { struct trace_entry ent; u32 min_freq; u32 max_freq; u32 cpu_id; char __data[0]; }; struct trace_event_raw_device_pm_callback_start { struct trace_entry ent; u32 __data_loc_device; u32 __data_loc_driver; u32 __data_loc_parent; u32 __data_loc_pm_ops; int event; char __data[0]; }; struct trace_event_raw_device_pm_callback_end { struct trace_entry ent; u32 __data_loc_device; u32 __data_loc_driver; int error; char __data[0]; }; struct trace_event_raw_suspend_resume { struct trace_entry ent; const char *action; int val; bool start; char __data[0]; }; struct trace_event_raw_wakeup_source { struct trace_entry ent; u32 __data_loc_name; u64 state; char __data[0]; }; struct trace_event_raw_clock { struct trace_entry ent; u32 __data_loc_name; u64 state; u64 cpu_id; char __data[0]; }; struct trace_event_raw_power_domain { struct trace_entry ent; u32 __data_loc_name; u64 state; u64 cpu_id; char __data[0]; }; struct trace_event_raw_cpu_latency_qos_request { struct trace_entry ent; s32 value; char __data[0]; }; struct trace_event_raw_pm_qos_update { struct trace_entry ent; enum pm_qos_req_action action; int prev_value; int curr_value; char __data[0]; }; struct trace_event_raw_dev_pm_qos_request { struct trace_entry ent; u32 __data_loc_name; enum dev_pm_qos_req_type type; s32 new_value; char __data[0]; }; struct trace_event_raw_guest_halt_poll_ns { struct trace_entry ent; bool grow; unsigned int new; unsigned int old; char __data[0]; }; struct trace_event_data_offsets_powernv_throttle { u32 reason; }; struct trace_event_data_offsets_wakeup_source { u32 name; }; struct trace_event_data_offsets_clock { u32 name; }; struct trace_event_data_offsets_power_domain { u32 name; }; struct trace_event_data_offsets_dev_pm_qos_request { u32 name; }; struct trace_event_data_offsets_cpu {}; struct trace_event_data_offsets_cpu_idle_miss {}; struct trace_event_data_offsets_pstate_sample {}; struct trace_event_data_offsets_cpu_frequency_limits {}; struct trace_event_data_offsets_device_pm_callback_start { u32 device; u32 driver; u32 parent; u32 pm_ops; }; struct trace_event_data_offsets_device_pm_callback_end { u32 device; u32 driver; }; struct trace_event_data_offsets_suspend_resume {}; struct trace_event_data_offsets_cpu_latency_qos_request {}; struct trace_event_data_offsets_pm_qos_update {}; struct trace_event_data_offsets_guest_halt_poll_ns {}; typedef void (*btf_trace_rpm_suspend)(void *, struct device *, int); typedef void (*btf_trace_rpm_resume)(void *, struct device *, int); typedef void (*btf_trace_rpm_idle)(void *, struct device *, int); typedef void (*btf_trace_rpm_usage)(void *, struct device *, int); typedef void (*btf_trace_rpm_return_int)(void *, struct device *, unsigned long, int); typedef void (*btf_trace_rpm_status)(void *, struct device *, enum rpm_status); struct trace_event_raw_rpm_internal { struct trace_entry ent; u32 __data_loc_name; int flags; int usage_count; int disable_depth; int runtime_auto; int request_pending; int irq_safe; int child_count; char __data[0]; }; struct trace_event_raw_rpm_return_int { struct trace_entry ent; u32 __data_loc_name; unsigned long ip; int ret; char __data[0]; }; struct trace_event_raw_rpm_status { struct trace_entry ent; u32 __data_loc_name; int status; char __data[0]; }; struct trace_event_data_offsets_rpm_internal { u32 name; }; struct trace_event_data_offsets_rpm_return_int { u32 name; }; struct trace_event_data_offsets_rpm_status { u32 name; }; struct trace_probe_log { const char *subsystem; const char **argv; int argc; int index; }; enum { BTF_KIND_UNKN = 0, BTF_KIND_INT = 1, BTF_KIND_PTR = 2, BTF_KIND_ARRAY = 3, BTF_KIND_STRUCT = 4, BTF_KIND_UNION = 5, BTF_KIND_ENUM = 6, BTF_KIND_FWD = 7, BTF_KIND_TYPEDEF = 8, BTF_KIND_VOLATILE = 9, BTF_KIND_CONST = 10, BTF_KIND_RESTRICT = 11, BTF_KIND_FUNC = 12, BTF_KIND_FUNC_PROTO = 13, BTF_KIND_VAR = 14, BTF_KIND_DATASEC = 15, BTF_KIND_FLOAT = 16, BTF_KIND_DECL_TAG = 17, BTF_KIND_TYPE_TAG = 18, BTF_KIND_ENUM64 = 19, NR_BTF_KINDS = 20, BTF_KIND_MAX = 19, }; struct btf_array { __u32 type; __u32 index_type; __u32 nelems; }; struct btf_member { __u32 name_off; __u32 type; __u32 offset; }; struct btf_anon_stack { u32 tid; u32 offset; }; struct uprobe_cpu_buffer { struct mutex mutex; void *buf; }; struct trace_uprobe { struct dyn_event devent; struct uprobe_consumer consumer; struct path path; struct inode *inode; char *filename; unsigned long offset; unsigned long ref_ctr_offset; unsigned long nhit; struct trace_probe tp; }; struct uprobe_trace_entry_head { struct trace_entry ent; unsigned long vaddr[0]; }; typedef bool (*filter_func_t)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *); struct bpf_empty_prog_array { struct bpf_prog_array hdr; struct bpf_prog *null_prog; }; struct xdp_mem_info { u32 type; u32 id; }; struct xdp_frame { void *data; u16 len; u16 headroom; u32 metasize; struct xdp_mem_info mem; struct net_device *dev_rx; u32 frame_sz; u32 flags; }; struct xdp_rxq_info; struct xdp_txq_info; struct xdp_buff { void *data; void *data_end; void *data_meta; void *data_hard_start; struct xdp_rxq_info *rxq; struct xdp_txq_info *txq; u32 frame_sz; u32 flags; }; struct xdp_rxq_info { struct net_device *dev; u32 queue_index; u32 reg_state; struct xdp_mem_info mem; unsigned int napi_id; u32 frag_size; long: 64; long: 64; long: 64; long: 64; }; struct xdp_txq_info { struct net_device *dev; }; struct xdp_md { __u32 data; __u32 data_end; __u32 data_meta; __u32 ingress_ifindex; __u32 rx_queue_index; __u32 egress_ifindex; }; struct rhash_lock_head {}; typedef void (*btf_trace_xdp_exception)(void *, const struct net_device *, const struct bpf_prog *, u32); typedef void (*btf_trace_xdp_bulk_tx)(void *, const struct net_device *, int, int, int); typedef void (*btf_trace_xdp_redirect)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); typedef void (*btf_trace_xdp_redirect_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); typedef void (*btf_trace_xdp_redirect_map)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); typedef void (*btf_trace_xdp_redirect_map_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); struct xdp_cpumap_stats; typedef void (*btf_trace_xdp_cpumap_kthread)(void *, int, unsigned int, unsigned int, int, struct xdp_cpumap_stats *); struct xdp_cpumap_stats { unsigned int redirect; unsigned int pass; unsigned int drop; }; typedef void (*btf_trace_xdp_cpumap_enqueue)(void *, int, unsigned int, unsigned int, int); typedef void (*btf_trace_xdp_devmap_xmit)(void *, const struct net_device *, const struct net_device *, int, int, int); struct xdp_mem_allocator; typedef void (*btf_trace_mem_disconnect)(void *, const struct xdp_mem_allocator *); struct xdp_mem_allocator { struct xdp_mem_info mem; union { void *allocator; struct page_pool *page_pool; }; struct rhash_head node; struct callback_head rcu; }; typedef void (*btf_trace_mem_connect)(void *, const struct xdp_mem_allocator *, const struct xdp_rxq_info *); typedef void (*btf_trace_mem_return_failed)(void *, const struct xdp_mem_info *, const struct page *); typedef void (*btf_trace_bpf_xdp_link_attach_failed)(void *, const char *); struct rnd_state { __u32 s1; __u32 s2; __u32 s3; __u32 s4; }; struct bpf_prog_dummy { struct bpf_prog prog; }; enum cgroup_bpf_attach_type { CGROUP_BPF_ATTACH_TYPE_INVALID = -1, CGROUP_INET_INGRESS = 0, CGROUP_INET_EGRESS = 1, CGROUP_INET_SOCK_CREATE = 2, CGROUP_SOCK_OPS = 3, CGROUP_DEVICE = 4, CGROUP_INET4_BIND = 5, CGROUP_INET6_BIND = 6, CGROUP_INET4_CONNECT = 7, CGROUP_INET6_CONNECT = 8, CGROUP_INET4_POST_BIND = 9, CGROUP_INET6_POST_BIND = 10, CGROUP_UDP4_SENDMSG = 11, CGROUP_UDP6_SENDMSG = 12, CGROUP_SYSCTL = 13, CGROUP_UDP4_RECVMSG = 14, CGROUP_UDP6_RECVMSG = 15, CGROUP_GETSOCKOPT = 16, CGROUP_SETSOCKOPT = 17, CGROUP_INET4_GETPEERNAME = 18, CGROUP_INET6_GETPEERNAME = 19, CGROUP_INET4_GETSOCKNAME = 20, CGROUP_INET6_GETSOCKNAME = 21, CGROUP_INET_SOCK_RELEASE = 22, CGROUP_LSM_START = 23, CGROUP_LSM_END = 32, MAX_CGROUP_BPF_ATTACH_TYPE = 33, }; enum bpf_jit_poke_reason { BPF_POKE_REASON_TAIL_CALL = 0, }; enum xdp_action { XDP_ABORTED = 0, XDP_DROP = 1, XDP_PASS = 2, XDP_TX = 3, XDP_REDIRECT = 4, }; struct bpf_prog_pack { struct list_head list; void *ptr; unsigned long bitmap[0]; }; typedef u64 (*btf_bpf_user_rnd_u32)(); typedef u64 (*btf_bpf_get_raw_cpu_id)(); struct trace_event_raw_xdp_exception { struct trace_entry ent; int prog_id; u32 act; int ifindex; char __data[0]; }; struct trace_event_raw_xdp_bulk_tx { struct trace_entry ent; int ifindex; u32 act; int drops; int sent; int err; char __data[0]; }; struct _bpf_dtab_netdev { struct net_device *dev; }; struct trace_event_raw_xdp_redirect_template { struct trace_entry ent; int prog_id; u32 act; int ifindex; int err; int to_ifindex; u32 map_id; int map_index; char __data[0]; }; struct trace_event_raw_xdp_cpumap_kthread { struct trace_entry ent; int map_id; u32 act; int cpu; unsigned int drops; unsigned int processed; int sched; unsigned int xdp_pass; unsigned int xdp_drop; unsigned int xdp_redirect; char __data[0]; }; struct trace_event_raw_xdp_cpumap_enqueue { struct trace_entry ent; int map_id; u32 act; int cpu; unsigned int drops; unsigned int processed; int to_cpu; char __data[0]; }; struct trace_event_raw_xdp_devmap_xmit { struct trace_entry ent; int from_ifindex; u32 act; int to_ifindex; int drops; int sent; int err; char __data[0]; }; struct trace_event_raw_mem_disconnect { struct trace_entry ent; const struct xdp_mem_allocator *xa; u32 mem_id; u32 mem_type; const void *allocator; char __data[0]; }; struct trace_event_raw_mem_connect { struct trace_entry ent; const struct xdp_mem_allocator *xa; u32 mem_id; u32 mem_type; const void *allocator; const struct xdp_rxq_info *rxq; int ifindex; char __data[0]; }; struct trace_event_raw_mem_return_failed { struct trace_entry ent; const struct page *page; u32 mem_id; u32 mem_type; char __data[0]; }; struct trace_event_raw_bpf_xdp_link_attach_failed { struct trace_entry ent; u32 __data_loc_msg; char __data[0]; }; struct trace_event_data_offsets_bpf_xdp_link_attach_failed { u32 msg; }; struct trace_event_data_offsets_xdp_exception {}; struct trace_event_data_offsets_xdp_bulk_tx {}; struct trace_event_data_offsets_xdp_redirect_template {}; struct trace_event_data_offsets_xdp_cpumap_kthread {}; struct trace_event_data_offsets_xdp_cpumap_enqueue {}; struct trace_event_data_offsets_xdp_devmap_xmit {}; struct trace_event_data_offsets_mem_disconnect {}; struct trace_event_data_offsets_mem_connect {}; struct trace_event_data_offsets_mem_return_failed {}; struct bpf_mprog_cp { struct bpf_link *link; }; struct bpf_mprog_bundle { struct bpf_mprog_entry a; struct bpf_mprog_entry b; struct bpf_mprog_cp cp_items[64]; struct bpf_prog *ref; atomic64_t revision; u32 count; }; enum { BPF_F_NO_PREALLOC = 1, BPF_F_NO_COMMON_LRU = 2, BPF_F_NUMA_NODE = 4, BPF_F_RDONLY = 8, BPF_F_WRONLY = 16, BPF_F_STACK_BUILD_ID = 32, BPF_F_ZERO_SEED = 64, BPF_F_RDONLY_PROG = 128, BPF_F_WRONLY_PROG = 256, BPF_F_CLONE = 512, BPF_F_MMAPABLE = 1024, BPF_F_PRESERVE_ELEMS = 2048, BPF_F_INNER_MAP = 4096, BPF_F_LINK = 8192, BPF_F_PATH_FD = 16384, }; enum { BPF_ANY = 0, BPF_NOEXIST = 1, BPF_EXIST = 2, BPF_F_LOCK = 4, }; enum bpf_cmd { BPF_MAP_CREATE = 0, BPF_MAP_LOOKUP_ELEM = 1, BPF_MAP_UPDATE_ELEM = 2, BPF_MAP_DELETE_ELEM = 3, BPF_MAP_GET_NEXT_KEY = 4, BPF_PROG_LOAD = 5, BPF_OBJ_PIN = 6, BPF_OBJ_GET = 7, BPF_PROG_ATTACH = 8, BPF_PROG_DETACH = 9, BPF_PROG_TEST_RUN = 10, BPF_PROG_RUN = 10, BPF_PROG_GET_NEXT_ID = 11, BPF_MAP_GET_NEXT_ID = 12, BPF_PROG_GET_FD_BY_ID = 13, BPF_MAP_GET_FD_BY_ID = 14, BPF_OBJ_GET_INFO_BY_FD = 15, BPF_PROG_QUERY = 16, BPF_RAW_TRACEPOINT_OPEN = 17, BPF_BTF_LOAD = 18, BPF_BTF_GET_FD_BY_ID = 19, BPF_TASK_FD_QUERY = 20, BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, BPF_MAP_FREEZE = 22, BPF_BTF_GET_NEXT_ID = 23, BPF_MAP_LOOKUP_BATCH = 24, BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, BPF_MAP_UPDATE_BATCH = 26, BPF_MAP_DELETE_BATCH = 27, BPF_LINK_CREATE = 28, BPF_LINK_UPDATE = 29, BPF_LINK_GET_FD_BY_ID = 30, BPF_LINK_GET_NEXT_ID = 31, BPF_ENABLE_STATS = 32, BPF_ITER_CREATE = 33, BPF_LINK_DETACH = 34, BPF_PROG_BIND_MAP = 35, }; enum perf_bpf_event_type { PERF_BPF_EVENT_UNKNOWN = 0, PERF_BPF_EVENT_PROG_LOAD = 1, PERF_BPF_EVENT_PROG_UNLOAD = 2, PERF_BPF_EVENT_MAX = 3, }; enum bpf_audit { BPF_AUDIT_LOAD = 0, BPF_AUDIT_UNLOAD = 1, BPF_AUDIT_MAX = 2, }; enum bpf_perf_event_type { BPF_PERF_EVENT_UNSPEC = 0, BPF_PERF_EVENT_UPROBE = 1, BPF_PERF_EVENT_URETPROBE = 2, BPF_PERF_EVENT_KPROBE = 3, BPF_PERF_EVENT_KRETPROBE = 4, BPF_PERF_EVENT_TRACEPOINT = 5, BPF_PERF_EVENT_EVENT = 6, }; enum bpf_stats_type { BPF_STATS_RUN_TIME = 0, }; typedef u64 (*btf_bpf_sys_bpf)(int, union bpf_attr *, u32); typedef u64 (*btf_bpf_sys_close)(u32); typedef u64 (*btf_bpf_kallsyms_lookup_name)(const char *, int, int, u64 *); struct bpf_tracing_link { struct bpf_tramp_link link; enum bpf_attach_type attach_type; struct bpf_trampoline *trampoline; struct bpf_prog *tgt_prog; }; struct bpf_raw_tp_link { struct bpf_link link; struct bpf_raw_event_map *btp; }; struct bpf_perf_link { struct bpf_link link; struct file *perf_file; }; struct bpf_spin_lock { __u32 val; }; struct bpf_prog_kstats { u64 nsecs; u64 cnt; u64 misses; }; struct bpf_prog_info { __u32 type; __u32 id; __u8 tag[8]; __u32 jited_prog_len; __u32 xlated_prog_len; __u64 jited_prog_insns; __u64 xlated_prog_insns; __u64 load_time; __u32 created_by_uid; __u32 nr_map_ids; __u64 map_ids; char name[16]; __u32 ifindex; __u32 gpl_compatible: 1; __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; __u32 nr_jited_func_lens; __u64 jited_ksyms; __u64 jited_func_lens; __u32 btf_id; __u32 func_info_rec_size; __u64 func_info; __u32 nr_func_info; __u32 nr_line_info; __u64 line_info; __u64 jited_line_info; __u32 nr_jited_line_info; __u32 line_info_rec_size; __u32 jited_line_info_rec_size; __u32 nr_prog_tags; __u64 prog_tags; __u64 run_time_ns; __u64 run_cnt; __u64 recursion_misses; __u32 verified_insns; __u32 attach_btf_obj_id; __u32 attach_btf_id; }; struct bpf_map_info { __u32 type; __u32 id; __u32 key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; char name[16]; __u32 ifindex; __u32 btf_vmlinux_value_type_id; __u64 netns_dev; __u64 netns_ino; __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; __u64 map_extra; }; struct bpf_btf_info { __u64 btf; __u32 btf_size; __u32 id; __u64 name; __u32 name_len; __u32 kernel_btf; }; struct bpf_attach_target_info { struct btf_func_model fmodel; long tgt_addr; struct module *tgt_mod; const char *tgt_name; const struct btf_type *tgt_type; }; struct bpf_verifier_stack_elem { struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; u32 log_pos; }; struct bpf_kfunc_desc { struct btf_func_model func_model; u32 func_id; s32 imm; u16 offset; unsigned long addr; }; struct bpf_kfunc_desc_tab { struct bpf_kfunc_desc descs[256]; u32 nr_descs; }; struct bpf_kfunc_btf { struct btf *btf; struct module *module; u16 offset; }; struct bpf_kfunc_btf_tab { struct bpf_kfunc_btf descs[256]; u32 nr_descs; }; struct bpf_reg_types { const enum bpf_reg_type types[10]; u32 *btf_id; }; enum btf_func_linkage { BTF_FUNC_STATIC = 0, BTF_FUNC_GLOBAL = 1, BTF_FUNC_EXTERN = 2, }; enum special_kfunc_type { KF_bpf_obj_new_impl = 0, KF_bpf_obj_drop_impl = 1, KF_bpf_refcount_acquire_impl = 2, KF_bpf_list_push_front_impl = 3, KF_bpf_list_push_back_impl = 4, KF_bpf_list_pop_front = 5, KF_bpf_list_pop_back = 6, KF_bpf_cast_to_kern_ctx = 7, KF_bpf_rdonly_cast = 8, KF_bpf_rcu_read_lock = 9, KF_bpf_rcu_read_unlock = 10, KF_bpf_rbtree_remove = 11, KF_bpf_rbtree_add_impl = 12, KF_bpf_rbtree_first = 13, KF_bpf_dynptr_from_skb = 14, KF_bpf_dynptr_from_xdp = 15, KF_bpf_dynptr_slice = 16, KF_bpf_dynptr_slice_rdwr = 17, KF_bpf_dynptr_clone = 18, }; enum bpf_stack_slot_type { STACK_INVALID = 0, STACK_SPILL = 1, STACK_MISC = 2, STACK_ZERO = 3, STACK_DYNPTR = 4, STACK_ITER = 5, }; enum bpf_type_flag { PTR_MAYBE_NULL = 256, MEM_RDONLY = 512, MEM_RINGBUF = 1024, MEM_USER = 2048, MEM_PERCPU = 4096, OBJ_RELEASE = 8192, PTR_UNTRUSTED = 16384, MEM_UNINIT = 32768, DYNPTR_TYPE_LOCAL = 65536, DYNPTR_TYPE_RINGBUF = 131072, MEM_FIXED_SIZE = 262144, MEM_ALLOC = 524288, PTR_TRUSTED = 1048576, MEM_RCU = 2097152, NON_OWN_REF = 4194304, DYNPTR_TYPE_SKB = 8388608, DYNPTR_TYPE_XDP = 16777216, __BPF_TYPE_FLAG_MAX = 16777217, __BPF_TYPE_LAST_FLAG = 16777216, }; enum bpf_access_src { ACCESS_DIRECT = 1, ACCESS_HELPER = 2, }; enum bpf_core_relo_kind { BPF_CORE_FIELD_BYTE_OFFSET = 0, BPF_CORE_FIELD_BYTE_SIZE = 1, BPF_CORE_FIELD_EXISTS = 2, BPF_CORE_FIELD_SIGNED = 3, BPF_CORE_FIELD_LSHIFT_U64 = 4, BPF_CORE_FIELD_RSHIFT_U64 = 5, BPF_CORE_TYPE_ID_LOCAL = 6, BPF_CORE_TYPE_ID_TARGET = 7, BPF_CORE_TYPE_EXISTS = 8, BPF_CORE_TYPE_SIZE = 9, BPF_CORE_ENUMVAL_EXISTS = 10, BPF_CORE_ENUMVAL_VALUE = 11, BPF_CORE_TYPE_MATCHES = 12, }; enum { DISCOVERED = 16, EXPLORED = 32, FALLTHROUGH = 1, BRANCH = 2, }; enum { DONE_EXPLORING = 0, KEEP_EXPLORING = 1, }; enum reg_arg_type { SRC_OP = 0, DST_OP = 1, DST_OP_NO_MARK = 2, }; enum { REASON_BOUNDS = -1, REASON_TYPE = -2, REASON_PATHS = -3, REASON_LIMIT = -4, REASON_STACK = -5, }; enum kfunc_ptr_arg_type { KF_ARG_PTR_TO_CTX = 0, KF_ARG_PTR_TO_ALLOC_BTF_ID = 1, KF_ARG_PTR_TO_REFCOUNTED_KPTR = 2, KF_ARG_PTR_TO_DYNPTR = 3, KF_ARG_PTR_TO_ITER = 4, KF_ARG_PTR_TO_LIST_HEAD = 5, KF_ARG_PTR_TO_LIST_NODE = 6, KF_ARG_PTR_TO_BTF_ID = 7, KF_ARG_PTR_TO_MEM = 8, KF_ARG_PTR_TO_MEM_SIZE = 9, KF_ARG_PTR_TO_CALLBACK = 10, KF_ARG_PTR_TO_RB_ROOT = 11, KF_ARG_PTR_TO_RB_NODE = 12, }; enum { KF_ARG_DYNPTR_ID = 0, KF_ARG_LIST_HEAD_ID = 1, KF_ARG_LIST_NODE_ID = 2, KF_ARG_RB_ROOT_ID = 3, KF_ARG_RB_NODE_ID = 4, }; enum { BTF_TRACING_TYPE_TASK = 0, BTF_TRACING_TYPE_FILE = 1, BTF_TRACING_TYPE_VMA = 2, MAX_BTF_TRACING_TYPE = 3, }; enum sk_action { SK_DROP = 0, SK_PASS = 1, }; enum { AT_PKT_END = -1, BEYOND_PKT_END = -2, }; enum { BPF_MAX_LOOPS = 8388608, }; struct bpf_iter_meta__safe_trusted { struct seq_file *seq; }; struct bpf_iter__task__safe_trusted { struct bpf_iter_meta *meta; struct task_struct *task; }; struct linux_binprm__safe_trusted { struct file *file; }; struct file__safe_trusted { struct inode *f_inode; }; struct dentry__safe_trusted { struct inode *d_inode; }; struct socket__safe_trusted { struct sock *sk; }; struct task_struct__safe_rcu { const cpumask_t *cpus_ptr; struct css_set __attribute__((btf_type_tag("rcu"))) *cgroups; struct task_struct __attribute__((btf_type_tag("rcu"))) *real_parent; struct task_struct *group_leader; }; struct cgroup__safe_rcu { struct kernfs_node *kn; }; struct css_set__safe_rcu { struct cgroup *dfl_cgrp; }; struct mm_struct__safe_rcu_or_null { struct file __attribute__((btf_type_tag("rcu"))) *exe_file; }; struct sk_buff__safe_rcu_or_null { struct sock *sk; }; struct request_sock__safe_rcu_or_null { struct sock *sk; }; struct btf_var_secinfo { __u32 type; __u32 offset; __u32 size; }; struct bpf_iter; typedef void (*bpf_insn_print_t)(void *, const char *, ...); typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *); typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64); struct bpf_insn_cbs { bpf_insn_print_t cb_print; bpf_insn_revmap_call_t cb_call; bpf_insn_print_imm_t cb_imm; void *private_data; }; typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); struct bpf_core_relo { __u32 insn_off; __u32 type_id; __u32 access_str_off; enum bpf_core_relo_kind kind; }; struct bpf_core_ctx { struct bpf_verifier_log *log; const struct btf *btf; }; struct bpf_struct_ops { const struct bpf_verifier_ops *verifier_ops; int (*init)(struct btf *); int (*check_member)(const struct btf_type *, const struct btf_member *, const struct bpf_prog *); int (*init_member)(const struct btf_type *, const struct btf_member *, void *, const void *); int (*reg)(void *); void (*unreg)(void *); int (*update)(void *, void *); int (*validate)(void *); const struct btf_type *type; const struct btf_type *value_type; const char *name; struct btf_func_model func_models[64]; u32 type_id; u32 value_id; u64 android_kabi_reserved1; }; struct bpf_kfunc_call_arg_meta { struct btf *btf; u32 func_id; u32 kfunc_flags; const struct btf_type *func_proto; const char *func_name; u32 ref_obj_id; u8 release_regno; bool r0_rdonly; u32 ret_btf_id; u64 r0_size; u32 subprogno; struct { u64 value; bool found; } arg_constant; struct btf *arg_btf; u32 arg_btf_id; bool arg_owning_ref; struct { struct btf_field *field; } arg_list_head; struct { struct btf_field *field; } arg_rbtree_root; struct { enum bpf_dynptr_type type; u32 id; u32 ref_obj_id; } initialized_dynptr; struct { u8 spi; u8 frameno; } iter; u64 mem_size; }; struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; u8 release_regno; int regno; int access_size; int mem_size; u64 msize_max_value; int ref_obj_id; int dynptr_id; int map_uid; int func_id; struct btf *btf; u32 btf_id; struct btf *ret_btf; u32 ret_btf_id; u32 subprogno; struct btf_field *kptr_field; }; struct bpf_sanitize_info { struct bpf_insn_aux_data aux; bool mask_to_left; }; typedef int (*set_callee_state_fn)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *, int); struct bpf_preload_info; struct bpf_preload_ops { int (*preload)(struct bpf_preload_info *); struct module *owner; }; struct bpf_preload_info { char link_name[16]; struct bpf_link *link; }; struct tree_descr { const char *name; const struct file_operations *ops; int mode; }; enum bpf_type { BPF_TYPE_UNSPEC = 0, BPF_TYPE_PROG = 1, BPF_TYPE_MAP = 2, BPF_TYPE_LINK = 3, }; enum { OPT_MODE = 0, }; struct map_iter { void *key; bool done; }; struct bpf_mount_opts { umode_t mode; }; struct bpf_hrtimer { struct hrtimer timer; struct bpf_map *map; struct bpf_prog *prog; void __attribute__((btf_type_tag("rcu"))) *callback_fn; void *value; struct callback_head rcu; }; struct bpf_bprintf_buffers { char bin_args[512]; char buf[1024]; }; enum { BPF_F_TIMER_ABS = 1, }; typedef u64 (*btf_bpf_map_lookup_elem)(struct bpf_map *, void *); typedef u64 (*btf_bpf_map_update_elem)(struct bpf_map *, void *, void *, u64); typedef u64 (*btf_bpf_map_delete_elem)(struct bpf_map *, void *); typedef u64 (*btf_bpf_map_push_elem)(struct bpf_map *, void *, u64); typedef u64 (*btf_bpf_map_pop_elem)(struct bpf_map *, void *); typedef u64 (*btf_bpf_map_peek_elem)(struct bpf_map *, void *); typedef u64 (*btf_bpf_map_lookup_percpu_elem)(struct bpf_map *, void *, u32); typedef u64 (*btf_bpf_get_smp_processor_id)(); typedef u64 (*btf_bpf_get_numa_node_id)(); typedef u64 (*btf_bpf_ktime_get_ns)(); typedef u64 (*btf_bpf_ktime_get_boot_ns)(); typedef u64 (*btf_bpf_ktime_get_coarse_ns)(); typedef u64 (*btf_bpf_ktime_get_tai_ns)(); typedef u64 (*btf_bpf_get_current_pid_tgid)(); typedef u64 (*btf_bpf_get_current_uid_gid)(); typedef u64 (*btf_bpf_get_current_comm)(char *, u32); typedef u64 (*btf_bpf_spin_lock)(struct bpf_spin_lock *); typedef u64 (*btf_bpf_spin_unlock)(struct bpf_spin_lock *); typedef u64 (*btf_bpf_jiffies64)(); typedef u64 (*btf_bpf_get_current_cgroup_id)(); typedef u64 (*btf_bpf_get_current_ancestor_cgroup_id)(int); typedef u64 (*btf_bpf_strtol)(const char *, size_t, u64, long *); typedef u64 (*btf_bpf_strtoul)(const char *, size_t, u64, unsigned long *); typedef u64 (*btf_bpf_strncmp)(const char *, u32, const char *); struct bpf_pidns_info; typedef u64 (*btf_bpf_get_ns_current_pid_tgid)(u64, u64, struct bpf_pidns_info *, u32); struct bpf_pidns_info { __u32 pid; __u32 tgid; }; typedef u64 (*btf_bpf_event_output_data)(void *, struct bpf_map *, u64, void *, u64); typedef u64 (*btf_bpf_copy_from_user)(void *, u32, const void __attribute__((btf_type_tag("user"))) *); typedef u64 (*btf_bpf_copy_from_user_task)(void *, u32, const void __attribute__((btf_type_tag("user"))) *, struct task_struct *, u64); typedef u64 (*btf_bpf_per_cpu_ptr)(const void *, u32); typedef u64 (*btf_bpf_this_cpu_ptr)(const void *); typedef u64 (*btf_bpf_snprintf)(char *, u32, char *, const void *, u32); struct bpf_timer_kern; typedef u64 (*btf_bpf_timer_init)(struct bpf_timer_kern *, struct bpf_map *, u64); struct bpf_timer_kern { struct bpf_hrtimer *timer; struct bpf_spin_lock lock; }; typedef u64 (*btf_bpf_timer_set_callback)(struct bpf_timer_kern *, void *, struct bpf_prog_aux *); typedef u64 (*btf_bpf_timer_start)(struct bpf_timer_kern *, u64, u64); typedef u64 (*btf_bpf_timer_cancel)(struct bpf_timer_kern *); typedef u64 (*btf_bpf_kptr_xchg)(void *, void *); typedef u64 (*btf_bpf_dynptr_from_mem)(void *, u32, u64, struct bpf_dynptr_kern *); typedef u64 (*btf_bpf_dynptr_read)(void *, u32, const struct bpf_dynptr_kern *, u32, u64); typedef u64 (*btf_bpf_dynptr_write)(const struct bpf_dynptr_kern *, u32, void *, u32, u64); typedef u64 (*btf_bpf_dynptr_data)(const struct bpf_dynptr_kern *, u32, u32); struct bpf_refcount { int: 32; }; struct bpf_rb_node_kern { struct rb_node rb_node; void *owner; }; struct bpf_rb_node { long: 64; long: 64; long: 64; long: 64; }; struct bpf_timer { long: 64; long: 64; }; struct bpf_dynptr { long: 64; long: 64; }; struct bpf_list_node_kern { struct list_head list_head; void *owner; }; struct bpf_list_node { long: 64; long: 64; long: 64; }; struct bpf_list_head { long: 64; long: 64; }; struct bpf_rb_root { long: 64; long: 64; }; struct btf_id_dtor_kfunc { u32 btf_id; u32 kfunc_btf_id; }; enum bpf_iter_feature { BPF_ITER_RESCHED = 1, }; struct bpf_iter_target_info { struct list_head list; const struct bpf_iter_reg *reg_info; u32 btf_id; }; struct bpf_iter_link { struct bpf_link link; struct bpf_iter_aux_info aux; struct bpf_iter_target_info *tinfo; }; struct bpf_iter_priv_data { struct bpf_iter_target_info *tinfo; const struct bpf_iter_seq_info *seq_info; struct bpf_prog *prog; u64 session_id; u64 seq_num; bool done_stop; long: 0; u8 target_private[0]; }; typedef u64 (*btf_bpf_for_each_map_elem)(struct bpf_map *, void *, void *, u64); typedef u64 (*btf_bpf_loop)(u32, void *, void *, u64); struct btf_iter_num; struct bpf_iter_num { __u64 __opaque[1]; }; struct bpf_iter_num_kern { int cur; int end; }; struct bpf_iter__bpf_map { union { struct bpf_iter_meta *meta; }; union { struct bpf_map *map; }; }; struct bpf_iter_seq_map_info { u32 map_id; }; struct mmap_unlock_irq_work { struct irq_work irq_work; struct mm_struct *mm; }; enum bpf_task_vma_iter_find_op { task_vma_iter_first_vma = 0, task_vma_iter_next_vma = 1, task_vma_iter_find_vma = 2, }; typedef u64 (*btf_bpf_find_vma)(struct task_struct *, u64, bpf_callback_t, void *, u64); struct bpf_iter__task { union { struct bpf_iter_meta *meta; }; union { struct task_struct *task; }; }; struct bpf_iter_seq_task_common { struct pid_namespace *ns; enum bpf_iter_task_type type; u32 pid; u32 pid_visiting; }; struct bpf_iter__task_file { union { struct bpf_iter_meta *meta; }; union { struct task_struct *task; }; u32 fd; union { struct file *file; }; }; struct bpf_iter_seq_task_file_info { struct bpf_iter_seq_task_common common; struct task_struct *task; u32 tid; u32 fd; }; struct bpf_iter__task_vma { union { struct bpf_iter_meta *meta; }; union { struct task_struct *task; }; union { struct vm_area_struct *vma; }; }; struct bpf_iter_seq_task_vma_info { struct bpf_iter_seq_task_common common; struct task_struct *task; struct mm_struct *mm; struct vm_area_struct *vma; u32 tid; unsigned long prev_vm_start; unsigned long prev_vm_end; }; struct bpf_iter_seq_task_info { struct bpf_iter_seq_task_common common; u32 tid; }; struct bpf_iter__bpf_prog { union { struct bpf_iter_meta *meta; }; union { struct bpf_prog *prog; }; }; struct bpf_iter_seq_prog_info { u32 prog_id; }; struct bpf_iter__bpf_link { union { struct bpf_iter_meta *meta; }; union { struct bpf_link *link; }; }; struct bpf_iter_seq_link_info { u32 link_id; }; struct pcpu_freelist_node; struct pcpu_freelist_head { struct pcpu_freelist_node *first; raw_spinlock_t lock; }; struct pcpu_freelist { struct pcpu_freelist_head __attribute__((btf_type_tag("percpu"))) *freelist; struct pcpu_freelist_head extralist; }; struct bpf_lru_list { struct list_head lists[3]; unsigned int counts[2]; struct list_head *next_inactive_rotation; raw_spinlock_t lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_lru_locallist; struct bpf_common_lru { struct bpf_lru_list lru_list; struct bpf_lru_locallist __attribute__((btf_type_tag("percpu"))) *local_list; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_lru_node; typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *); struct bpf_lru { union { struct bpf_common_lru common_lru; struct bpf_lru_list __attribute__((btf_type_tag("percpu"))) *percpu_lru; }; del_from_htab_func del_from_htab; void *del_arg; unsigned int hash_offset; unsigned int nr_scans; bool percpu; long: 64; long: 64; long: 64; long: 64; }; struct bucket; struct htab_elem; struct bpf_htab { struct bpf_map map; struct bpf_mem_alloc ma; struct bpf_mem_alloc pcpu_ma; struct bucket *buckets; void *elems; long: 64; long: 64; long: 64; long: 64; union { struct pcpu_freelist freelist; struct bpf_lru lru; }; struct htab_elem * __attribute__((btf_type_tag("percpu"))) *extra_elems; struct percpu_counter pcount; atomic_t count; bool use_percpu_counter; u32 n_buckets; u32 elem_size; u32 hashrnd; struct lock_class_key lockdep_key; int __attribute__((btf_type_tag("percpu"))) *map_locked[8]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bucket { struct hlist_nulls_head head; raw_spinlock_t raw_lock; }; struct pcpu_freelist_node { struct pcpu_freelist_node *next; }; struct bpf_lru_locallist { struct list_head lists[2]; u16 next_steal; raw_spinlock_t lock; }; struct bpf_lru_node { struct list_head list; u16 cpu; u8 type; u8 ref; }; struct htab_elem { union { struct hlist_nulls_node hash_node; struct { void *padding; union { struct pcpu_freelist_node fnode; struct htab_elem *batch_flink; }; }; }; union { void *ptr_to_pptr; struct bpf_lru_node lru_node; }; u32 hash; long: 0; char key[0]; }; struct bpf_iter_seq_hash_map_info { struct bpf_map *map; struct bpf_htab *htab; void *percpu_value_buf; u32 bucket_id; u32 skip_elems; }; struct bpf_iter__bpf_map_elem { union { struct bpf_iter_meta *meta; }; union { struct bpf_map *map; }; union { void *key; }; union { void *value; }; }; struct prog_poke_elem { struct list_head list; struct bpf_prog_aux *aux; }; struct bpf_iter_seq_array_map_info { struct bpf_map *map; void *percpu_value_buf; u32 index; }; enum bpf_lru_list_type { BPF_LRU_LIST_T_ACTIVE = 0, BPF_LRU_LIST_T_INACTIVE = 1, BPF_LRU_LIST_T_FREE = 2, BPF_LRU_LOCAL_LIST_T_FREE = 3, BPF_LRU_LOCAL_LIST_T_PENDING = 4, }; struct lpm_trie_node; struct lpm_trie { struct bpf_map map; struct lpm_trie_node __attribute__((btf_type_tag("rcu"))) *root; size_t n_entries; size_t max_prefixlen; size_t data_size; spinlock_t lock; long: 64; long: 64; long: 64; }; struct lpm_trie_node { struct callback_head rcu; struct lpm_trie_node __attribute__((btf_type_tag("rcu"))) *child[2]; u32 prefixlen; u32 flags; u8 data[0]; }; struct bpf_lpm_trie_key { __u32 prefixlen; __u8 data[0]; }; struct bpf_bloom_filter { struct bpf_map map; u32 bitset_mask; u32 hash_seed; u32 nr_hash_funcs; unsigned long bitset[0]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_cgroup_storage_map { struct bpf_map map; spinlock_t lock; struct rb_root root; struct list_head list; long: 64; long: 64; long: 64; long: 64; }; enum bpf_cgroup_storage_type { BPF_CGROUP_STORAGE_SHARED = 0, BPF_CGROUP_STORAGE_PERCPU = 1, __BPF_CGROUP_STORAGE_MAX = 2, }; struct bpf_queue_stack { struct bpf_map map; raw_spinlock_t lock; u32 head; u32 tail; u32 size; char elements[0]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; enum { BPF_RINGBUF_BUSY_BIT = 2147483648, BPF_RINGBUF_DISCARD_BIT = 1073741824, BPF_RINGBUF_HDR_SZ = 8, }; enum { BPF_RB_NO_WAKEUP = 1, BPF_RB_FORCE_WAKEUP = 2, }; enum { BPF_RB_AVAIL_DATA = 0, BPF_RB_RING_SIZE = 1, BPF_RB_CONS_POS = 2, BPF_RB_PROD_POS = 3, }; typedef u64 (*btf_bpf_ringbuf_reserve)(struct bpf_map *, u64, u64); typedef u64 (*btf_bpf_ringbuf_submit)(void *, u64); typedef u64 (*btf_bpf_ringbuf_discard)(void *, u64); typedef u64 (*btf_bpf_ringbuf_output)(struct bpf_map *, void *, u64, u64); typedef u64 (*btf_bpf_ringbuf_query)(struct bpf_map *, u64); typedef u64 (*btf_bpf_ringbuf_reserve_dynptr)(struct bpf_map *, u32, u64, struct bpf_dynptr_kern *); typedef u64 (*btf_bpf_ringbuf_submit_dynptr)(struct bpf_dynptr_kern *, u64); typedef u64 (*btf_bpf_ringbuf_discard_dynptr)(struct bpf_dynptr_kern *, u64); typedef u64 (*btf_bpf_user_ringbuf_drain)(struct bpf_map *, void *, void *, u64); struct bpf_ringbuf { wait_queue_head_t waitq; struct irq_work work; u64 mask; struct page **pages; int nr_pages; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t spinlock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; atomic_t busy; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; unsigned long consumer_pos; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; unsigned long producer_pos; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; char data[0]; }; struct bpf_ringbuf_map { struct bpf_map map; struct bpf_ringbuf *rb; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_ringbuf_hdr { u32 len; u32 pg_off; }; struct bpf_local_storage_elem { struct hlist_node map_node; struct hlist_node snode; struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) *local_storage; struct callback_head rcu; long: 64; struct bpf_local_storage_data sdata; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_local_storage_cache { spinlock_t idx_lock; u64 idx_usage_counts[16]; }; enum { BPF_LOCAL_STORAGE_GET_F_CREATE = 1, BPF_SK_STORAGE_GET_F_CREATE = 1, }; typedef u64 (*btf_bpf_task_storage_get_recur)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); typedef u64 (*btf_bpf_task_storage_get)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); typedef u64 (*btf_bpf_task_storage_delete_recur)(struct bpf_map *, struct task_struct *); typedef u64 (*btf_bpf_task_storage_delete)(struct bpf_map *, struct task_struct *); typedef u64 (*btf_bpf_inode_storage_get)(struct bpf_map *, struct inode *, void *, u64, gfp_t); typedef u64 (*btf_bpf_inode_storage_delete)(struct bpf_map *, struct inode *); struct bpf_storage_blob { struct bpf_local_storage __attribute__((btf_type_tag("rcu"))) *storage; }; struct bpf_tuple { struct bpf_prog *prog; struct bpf_link *link; }; enum { BPF_MAX_TRAMP_LINKS = 38, }; struct bpf_shim_tramp_link { struct bpf_tramp_link link; struct bpf_trampoline *trampoline; }; struct btf_kfunc_hook_filter { btf_kfunc_filter_t filters[16]; u32 nr_filters; }; struct btf_kfunc_set_tab { struct btf_id_set8 *sets[13]; struct btf_kfunc_hook_filter hook_filters[13]; }; struct btf_id_dtor_kfunc_tab { u32 cnt; struct btf_id_dtor_kfunc dtors[0]; }; struct btf_struct_metas { u32 cnt; struct btf_struct_meta types[0]; }; struct nd_opt_hdr { __u8 nd_opt_type; __u8 nd_opt_len; }; struct ndisc_options { struct nd_opt_hdr *nd_opt_array[15]; struct nd_opt_hdr *nd_opts_ri; struct nd_opt_hdr *nd_opts_ri_end; struct nd_opt_hdr *nd_useropts; struct nd_opt_hdr *nd_useropts_end; struct nd_opt_hdr *nd_802154_opt_array[3]; }; struct prefix_info { __u8 type; __u8 length; __u8 prefix_len; union { __u8 flags; struct { __u8 reserved: 6; __u8 autoconf: 1; __u8 onlink: 1; }; }; __be32 valid; __be32 prefered; __be32 reserved2; struct in6_addr prefix; }; struct inet_ehash_bucket; struct inet_bind_hashbucket; struct inet_listen_hashbucket; struct inet_hashinfo { struct inet_ehash_bucket *ehash; spinlock_t *ehash_locks; unsigned int ehash_mask; unsigned int ehash_locks_mask; struct kmem_cache *bind_bucket_cachep; struct inet_bind_hashbucket *bhash; struct kmem_cache *bind2_bucket_cachep; struct inet_bind_hashbucket *bhash2; unsigned int bhash_size; unsigned int lhash2_mask; struct inet_listen_hashbucket *lhash2; bool pernet; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct inet_ehash_bucket { struct hlist_nulls_head chain; }; struct inet_bind_hashbucket { spinlock_t lock; struct hlist_head chain; }; struct inet_listen_hashbucket { spinlock_t lock; struct hlist_nulls_head nulls_head; }; struct inet_peer_base { struct rb_root rb_root; seqlock_t lock; int total; }; struct ack_sample { u32 pkts_acked; s32 rtt_us; u32 in_flight; }; struct rate_sample { u64 prior_mstamp; u32 prior_delivered; u32 prior_delivered_ce; s32 delivered; s32 delivered_ce; long interval_us; u32 snd_interval_us; u32 rcv_interval_us; long rtt_us; int losses; u32 acked_sacked; u32 prior_in_flight; u32 last_end_seq; bool is_app_limited; bool is_retrans; bool is_ack_delayed; }; struct lwtunnel_state { __u16 type; __u16 flags; __u16 headroom; atomic_t refcnt; int (*orig_output)(struct net *, struct sock *, struct sk_buff *); int (*orig_input)(struct sk_buff *); struct callback_head rcu; __u8 data[0]; }; struct sk_psock_progs { struct bpf_prog *msg_parser; struct bpf_prog *stream_parser; struct bpf_prog *stream_verdict; struct bpf_prog *skb_verdict; }; struct sk_psock_work_state { u32 len; u32 off; }; struct sk_msg; struct sk_psock { struct sock *sk; struct sock *sk_redir; u32 apply_bytes; u32 cork_bytes; u32 eval; bool redir_ingress; struct sk_msg *cork; struct sk_psock_progs progs; struct sk_buff_head ingress_skb; struct list_head ingress_msg; spinlock_t ingress_lock; unsigned long state; struct list_head link; spinlock_t link_lock; refcount_t refcnt; void (*saved_unhash)(struct sock *); void (*saved_destroy)(struct sock *); void (*saved_close)(struct sock *, long); void (*saved_write_space)(struct sock *); void (*saved_data_ready)(struct sock *); int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); struct proto *sk_proto; struct mutex work_mutex; struct sk_psock_work_state work_state; struct delayed_work work; struct sock *sk_pair; struct rcu_work rwork; }; struct sk_msg_sg { u32 start; u32 curr; u32 end; u32 size; u32 copybreak; unsigned long copy[1]; struct scatterlist data[19]; }; struct sk_msg { struct sk_msg_sg sg; void *data; void *data_end; u32 apply_bytes; u32 cork_bytes; u32 flags; struct sk_buff *skb; struct sock *sk_redir; struct sock *sk; struct list_head list; }; struct bpf_flow_keys; struct bpf_sock; struct __sk_buff { __u32 len; __u32 pkt_type; __u32 mark; __u32 queue_mapping; __u32 protocol; __u32 vlan_present; __u32 vlan_tci; __u32 vlan_proto; __u32 priority; __u32 ingress_ifindex; __u32 ifindex; __u32 tc_index; __u32 cb[5]; __u32 hash; __u32 tc_classid; __u32 data; __u32 data_end; __u32 napi_id; __u32 family; __u32 remote_ip4; __u32 local_ip4; __u32 remote_ip6[4]; __u32 local_ip6[4]; __u32 remote_port; __u32 local_port; __u32 data_meta; union { struct bpf_flow_keys *flow_keys; }; __u64 tstamp; __u32 wire_len; __u32 gso_segs; union { struct bpf_sock *sk; }; __u32 gso_size; __u8 tstamp_type; __u64 hwtstamp; }; struct bpf_sock { __u32 bound_dev_if; __u32 family; __u32 type; __u32 protocol; __u32 mark; __u32 priority; __u32 src_ip4; __u32 src_ip6[4]; __u32 src_port; __be16 dst_port; __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; __s32 rx_queue_mapping; }; struct bpf_sock_addr { __u32 user_family; __u32 user_ip4; __u32 user_ip6[4]; __u32 user_port; __u32 family; __u32 type; __u32 protocol; __u32 msg_src_ip4; __u32 msg_src_ip6[4]; union { struct bpf_sock *sk; }; }; struct bpf_sock_addr_kern { struct sock *sk; struct sockaddr *uaddr; u64 tmp_reg; void *t_ctx; u32 uaddrlen; }; struct bpf_sock_ops { __u32 op; union { __u32 args[4]; __u32 reply; __u32 replylong[4]; }; __u32 family; __u32 remote_ip4; __u32 local_ip4; __u32 remote_ip6[4]; __u32 local_ip6[4]; __u32 remote_port; __u32 local_port; __u32 is_fullsock; __u32 snd_cwnd; __u32 srtt_us; __u32 bpf_sock_ops_cb_flags; __u32 state; __u32 rtt_min; __u32 snd_ssthresh; __u32 rcv_nxt; __u32 snd_nxt; __u32 snd_una; __u32 mss_cache; __u32 ecn_flags; __u32 rate_delivered; __u32 rate_interval_us; __u32 packets_out; __u32 retrans_out; __u32 total_retrans; __u32 segs_in; __u32 data_segs_in; __u32 segs_out; __u32 data_segs_out; __u32 lost_out; __u32 sacked_out; __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; union { struct bpf_sock *sk; }; union { void *skb_data; }; union { void *skb_data_end; }; __u32 skb_len; __u32 skb_tcp_flags; __u64 skb_hwtstamp; }; struct bpf_sock_ops_kern { struct sock *sk; union { u32 args[4]; u32 reply; u32 replylong[4]; }; struct sk_buff *syn_skb; struct sk_buff *skb; void *skb_data_end; u8 op; u8 is_fullsock; u8 remaining_opt_len; u64 temp; }; struct sk_msg_md { union { void *data; }; union { void *data_end; }; __u32 family; __u32 remote_ip4; __u32 local_ip4; __u32 remote_ip6[4]; __u32 local_ip6[4]; __u32 remote_port; __u32 local_port; __u32 size; union { struct bpf_sock *sk; }; }; struct bpf_flow_dissector { struct bpf_flow_keys *flow_keys; const struct sk_buff *skb; const void *data; const void *data_end; }; struct bpf_perf_event_data { bpf_user_pt_regs_t regs; __u64 sample_period; __u64 addr; }; struct bpf_cgroup_dev_ctx { __u32 access_type; __u32 major; __u32 minor; }; struct bpf_sysctl { __u32 write; __u32 file_pos; }; struct bpf_sysctl_kern { struct ctl_table_header *head; struct ctl_table *table; void *cur_val; size_t cur_len; void *new_val; size_t new_len; int new_updated; int write; loff_t *ppos; u64 tmp_reg; }; struct bpf_sockopt { union { struct bpf_sock *sk; }; union { void *optval; }; union { void *optval_end; }; __s32 level; __s32 optname; __s32 optlen; __s32 retval; }; struct bpf_sockopt_kern { struct sock *sk; u8 *optval; u8 *optval_end; s32 level; s32 optname; s32 optlen; struct task_struct *current_task; u64 tmp_reg; }; struct sk_reuseport_md { union { void *data; }; union { void *data_end; }; __u32 len; __u32 eth_protocol; __u32 ip_protocol; __u32 bind_inany; __u32 hash; union { struct bpf_sock *sk; }; union { struct bpf_sock *migrating_sk; }; }; struct sk_reuseport_kern { struct sk_buff *skb; struct sock *sk; struct sock *selected_sk; struct sock *migrating_sk; void *data_end; u32 hash; u32 reuseport_id; bool bind_inany; }; struct bpf_sk_lookup { union { union { struct bpf_sock *sk; }; __u64 cookie; }; __u32 family; __u32 protocol; __u32 remote_ip4; __u32 remote_ip6[4]; __be16 remote_port; __u32 local_ip4; __u32 local_ip6[4]; __u32 local_port; __u32 ingress_ifindex; }; struct bpf_sk_lookup_kern { u16 family; u16 protocol; __be16 sport; u16 dport; struct { __be32 saddr; __be32 daddr; } v4; struct { const struct in6_addr *saddr; const struct in6_addr *daddr; } v6; struct sock *selected_sk; u32 ingress_ifindex; bool no_reuseport; }; struct bpf_nf_ctx { const struct nf_hook_state *state; struct sk_buff *skb; }; struct fuse_bpf_in_arg { uint32_t size; uint32_t padding; union { const void *value; uint64_t padding2; }; union { const void *end_offset; uint64_t padding3; }; }; struct fuse_bpf_arg { uint32_t size; uint32_t padding; union { void *value; uint64_t padding2; }; union { void *end_offset; uint64_t padding3; }; }; struct fuse_bpf_args { uint64_t nodeid; uint32_t opcode; uint32_t error_in; uint32_t in_numargs; uint32_t out_numargs; uint32_t flags; uint32_t padding; struct fuse_bpf_in_arg in_args[5]; struct fuse_bpf_arg out_args[3]; }; struct bpf_ctx_convert { struct __sk_buff BPF_PROG_TYPE_SOCKET_FILTER_prog; struct sk_buff BPF_PROG_TYPE_SOCKET_FILTER_kern; struct __sk_buff BPF_PROG_TYPE_SCHED_CLS_prog; struct sk_buff BPF_PROG_TYPE_SCHED_CLS_kern; struct __sk_buff BPF_PROG_TYPE_SCHED_ACT_prog; struct sk_buff BPF_PROG_TYPE_SCHED_ACT_kern; struct xdp_md BPF_PROG_TYPE_XDP_prog; struct xdp_buff BPF_PROG_TYPE_XDP_kern; struct __sk_buff BPF_PROG_TYPE_CGROUP_SKB_prog; struct sk_buff BPF_PROG_TYPE_CGROUP_SKB_kern; struct bpf_sock BPF_PROG_TYPE_CGROUP_SOCK_prog; struct sock BPF_PROG_TYPE_CGROUP_SOCK_kern; struct bpf_sock_addr BPF_PROG_TYPE_CGROUP_SOCK_ADDR_prog; struct bpf_sock_addr_kern BPF_PROG_TYPE_CGROUP_SOCK_ADDR_kern; struct __sk_buff BPF_PROG_TYPE_LWT_IN_prog; struct sk_buff BPF_PROG_TYPE_LWT_IN_kern; struct __sk_buff BPF_PROG_TYPE_LWT_OUT_prog; struct sk_buff BPF_PROG_TYPE_LWT_OUT_kern; struct __sk_buff BPF_PROG_TYPE_LWT_XMIT_prog; struct sk_buff BPF_PROG_TYPE_LWT_XMIT_kern; struct __sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_prog; struct sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_kern; struct bpf_sock_ops BPF_PROG_TYPE_SOCK_OPS_prog; struct bpf_sock_ops_kern BPF_PROG_TYPE_SOCK_OPS_kern; struct __sk_buff BPF_PROG_TYPE_SK_SKB_prog; struct sk_buff BPF_PROG_TYPE_SK_SKB_kern; struct sk_msg_md BPF_PROG_TYPE_SK_MSG_prog; struct sk_msg BPF_PROG_TYPE_SK_MSG_kern; struct __sk_buff BPF_PROG_TYPE_FLOW_DISSECTOR_prog; struct bpf_flow_dissector BPF_PROG_TYPE_FLOW_DISSECTOR_kern; bpf_user_pt_regs_t BPF_PROG_TYPE_KPROBE_prog; struct pt_regs BPF_PROG_TYPE_KPROBE_kern; __u64 BPF_PROG_TYPE_TRACEPOINT_prog; u64 BPF_PROG_TYPE_TRACEPOINT_kern; struct bpf_perf_event_data BPF_PROG_TYPE_PERF_EVENT_prog; struct bpf_perf_event_data_kern BPF_PROG_TYPE_PERF_EVENT_kern; struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_prog; u64 BPF_PROG_TYPE_RAW_TRACEPOINT_kern; struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_prog; u64 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_kern; void *BPF_PROG_TYPE_TRACING_prog; void *BPF_PROG_TYPE_TRACING_kern; struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_prog; struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_kern; struct bpf_sysctl BPF_PROG_TYPE_CGROUP_SYSCTL_prog; struct bpf_sysctl_kern BPF_PROG_TYPE_CGROUP_SYSCTL_kern; struct bpf_sockopt BPF_PROG_TYPE_CGROUP_SOCKOPT_prog; struct bpf_sockopt_kern BPF_PROG_TYPE_CGROUP_SOCKOPT_kern; __u32 BPF_PROG_TYPE_LIRC_MODE2_prog; u32 BPF_PROG_TYPE_LIRC_MODE2_kern; struct sk_reuseport_md BPF_PROG_TYPE_SK_REUSEPORT_prog; struct sk_reuseport_kern BPF_PROG_TYPE_SK_REUSEPORT_kern; struct bpf_sk_lookup BPF_PROG_TYPE_SK_LOOKUP_prog; struct bpf_sk_lookup_kern BPF_PROG_TYPE_SK_LOOKUP_kern; void *BPF_PROG_TYPE_STRUCT_OPS_prog; void *BPF_PROG_TYPE_STRUCT_OPS_kern; void *BPF_PROG_TYPE_EXT_prog; void *BPF_PROG_TYPE_EXT_kern; void *BPF_PROG_TYPE_LSM_prog; void *BPF_PROG_TYPE_LSM_kern; void *BPF_PROG_TYPE_SYSCALL_prog; void *BPF_PROG_TYPE_SYSCALL_kern; struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_prog; struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_kern; struct fuse_bpf_args BPF_PROG_TYPE_FUSE_prog; struct fuse_bpf_args BPF_PROG_TYPE_FUSE_kern; }; struct bpf_flow_keys { __u16 nhoff; __u16 thoff; __u16 addr_proto; __u8 is_frag; __u8 is_first_frag; __u8 is_encap; __u8 ip_proto; __be16 n_proto; __be16 sport; __be16 dport; union { struct { __be32 ipv4_src; __be32 ipv4_dst; }; struct { __u32 ipv6_src[4]; __u32 ipv6_dst[4]; }; }; __u32 flags; __be32 flow_label; }; struct nf_hook_state { u8 hook; u8 pf; struct net_device *in; struct net_device *out; struct sock *sk; struct net *net; int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; struct btf_verifier_env; struct resolve_vertex; struct btf_show; struct btf_kind_operations { s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32); int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *); int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); int (*check_kflag_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); void (*log_details)(struct btf_verifier_env *, const struct btf_type *); void (*show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct btf_show *); }; struct resolve_vertex { const struct btf_type *t; u32 type_id; u16 next_member; }; enum verifier_phase { CHECK_META = 0, CHECK_TYPE = 1, }; enum resolve_mode { RESOLVE_TBD = 0, RESOLVE_PTR = 1, RESOLVE_STRUCT_OR_ARRAY = 2, }; struct btf_verifier_env { struct btf *btf; u8 *visit_states; struct resolve_vertex stack[32]; struct bpf_verifier_log log; u32 log_type_id; u32 top_stack; enum verifier_phase phase; enum resolve_mode resolve_mode; }; struct btf_show { u64 flags; void *target; void (*showfn)(struct btf_show *, const char *, va_list); const struct btf *btf; struct { u8 depth; u8 depth_to_show; u8 depth_check; u8 array_member: 1; u8 array_terminated: 1; u16 array_encoding; u32 type_id; int status; const struct btf_type *type; const struct btf_member *member; char name[80]; } state; struct { u32 size; void *head; void *data; u8 safe[32]; } obj; }; struct bpf_cand_cache { const char *name; u32 name_len; u16 kind; u16 cnt; struct { const struct btf *btf; u32 id; } cands[0]; }; enum bpf_struct_walk_result { WALK_SCALAR = 0, WALK_PTR = 1, WALK_STRUCT = 2, }; enum { BTF_MODULE_F_LIVE = 1, }; enum btf_kfunc_hook { BTF_KFUNC_HOOK_COMMON = 0, BTF_KFUNC_HOOK_XDP = 1, BTF_KFUNC_HOOK_TC = 2, BTF_KFUNC_HOOK_STRUCT_OPS = 3, BTF_KFUNC_HOOK_TRACING = 4, BTF_KFUNC_HOOK_SYSCALL = 5, BTF_KFUNC_HOOK_FMODRET = 6, BTF_KFUNC_HOOK_CGROUP_SKB = 7, BTF_KFUNC_HOOK_SCHED_ACT = 8, BTF_KFUNC_HOOK_SK_SKB = 9, BTF_KFUNC_HOOK_SOCKET_FILTER = 10, BTF_KFUNC_HOOK_LWT = 11, BTF_KFUNC_HOOK_NETFILTER = 12, BTF_KFUNC_HOOK_MAX = 13, }; enum { BTF_KFUNC_SET_MAX_CNT = 256, BTF_DTOR_KFUNC_MAX_CNT = 256, BTF_KFUNC_FILTER_MAX_CNT = 16, }; enum { BTF_FIELD_IGNORE = 0, BTF_FIELD_FOUND = 1, }; enum visit_state { NOT_VISITED = 0, VISITED = 1, RESOLVED = 2, }; enum { BTF_VAR_STATIC = 0, BTF_VAR_GLOBAL_ALLOCATED = 1, BTF_VAR_GLOBAL_EXTERN = 2, }; struct btf_module { struct list_head list; struct module *module; struct btf *btf; struct bin_attribute *sysfs_attr; int flags; }; typedef u64 (*btf_bpf_btf_find_by_name_kind)(char *, int, u32, int); struct btf_decl_tag { __s32 component_idx; }; struct btf_sec_info { u32 off; u32 len; }; struct btf_enum { __u32 name_off; __s32 val; }; struct btf_var { __u32 linkage; }; struct btf_enum64 { __u32 name_off; __u32 val_lo32; __u32 val_hi32; }; struct btf_show_snprintf { struct btf_show show; int len_left; int len; }; struct btf_field_info { enum btf_field_type type; u32 off; union { struct { u32 type_id; } kptr; struct { const char *node_name; u32 value_btf_id; } graph_root; }; }; typedef int (*cmp_r_func_t)(const void *, const void *, const void *); typedef void (*swap_r_func_t)(void *, void *, int, const void *); struct bpf_core_cand; struct bpf_core_cand_list { struct bpf_core_cand *cands; int len; }; struct bpf_core_cand { const struct btf *btf; __u32 id; }; struct bpf_core_accessor { __u32 type_id; __u32 idx; const char *name; }; struct bpf_core_spec { const struct btf *btf; struct bpf_core_accessor spec[64]; __u32 root_type_id; enum bpf_core_relo_kind relo_kind; int len; int raw_spec[64]; int raw_len; __u32 bit_offset; }; struct bpf_core_relo_res { __u64 orig_val; __u64 new_val; bool poison; bool validate; bool fail_memsz_adjust; __u32 orig_sz; __u32 orig_type_id; __u32 new_sz; __u32 new_type_id; }; struct bpf_dispatcher_prog { struct bpf_prog *prog; refcount_t users; }; struct bpf_dispatcher { struct mutex mutex; void *func; struct bpf_dispatcher_prog progs[48]; int num_progs; void *image; void *rw_image; u32 image_off; struct bpf_ksym ksym; u64 android_kabi_reserved1; }; enum net_device_flags { IFF_UP = 1, IFF_BROADCAST = 2, IFF_DEBUG = 4, IFF_LOOPBACK = 8, IFF_POINTOPOINT = 16, IFF_NOTRAILERS = 32, IFF_RUNNING = 64, IFF_NOARP = 128, IFF_PROMISC = 256, IFF_ALLMULTI = 512, IFF_MASTER = 1024, IFF_SLAVE = 2048, IFF_MULTICAST = 4096, IFF_PORTSEL = 8192, IFF_AUTOMEDIA = 16384, IFF_DYNAMIC = 32768, IFF_LOWER_UP = 65536, IFF_DORMANT = 131072, IFF_ECHO = 262144, }; enum netdev_priv_flags { IFF_802_1Q_VLAN = 1ULL, IFF_EBRIDGE = 2ULL, IFF_BONDING = 4ULL, IFF_ISATAP = 8ULL, IFF_WAN_HDLC = 16ULL, IFF_XMIT_DST_RELEASE = 32ULL, IFF_DONT_BRIDGE = 64ULL, IFF_DISABLE_NETPOLL = 128ULL, IFF_MACVLAN_PORT = 256ULL, IFF_BRIDGE_PORT = 512ULL, IFF_OVS_DATAPATH = 1024ULL, IFF_TX_SKB_SHARING = 2048ULL, IFF_UNICAST_FLT = 4096ULL, IFF_TEAM_PORT = 8192ULL, IFF_SUPP_NOFCS = 16384ULL, IFF_LIVE_ADDR_CHANGE = 32768ULL, IFF_MACVLAN = 65536ULL, IFF_XMIT_DST_RELEASE_PERM = 131072ULL, IFF_L3MDEV_MASTER = 262144ULL, IFF_NO_QUEUE = 524288ULL, IFF_OPENVSWITCH = 1048576ULL, IFF_L3MDEV_SLAVE = 2097152ULL, IFF_TEAM = 4194304ULL, IFF_RXFH_CONFIGURED = 8388608ULL, IFF_PHONY_HEADROOM = 16777216ULL, IFF_MACSEC = 33554432ULL, IFF_NO_RX_HANDLER = 67108864ULL, IFF_FAILOVER = 134217728ULL, IFF_FAILOVER_SLAVE = 268435456ULL, IFF_L3MDEV_RX_HANDLER = 536870912ULL, IFF_NO_ADDRCONF = 1073741824ULL, IFF_TX_SKB_NO_LINEAR = 2147483648ULL, IFF_CHANGE_PROTO_DOWN = 4294967296ULL, IFF_SEE_ALL_HWTSTAMP_REQUESTS = 8589934592ULL, }; enum netdev_xdp_act { NETDEV_XDP_ACT_BASIC = 1, NETDEV_XDP_ACT_REDIRECT = 2, NETDEV_XDP_ACT_NDO_XMIT = 4, NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, NETDEV_XDP_ACT_HW_OFFLOAD = 16, NETDEV_XDP_ACT_RX_SG = 32, NETDEV_XDP_ACT_NDO_XMIT_SG = 64, NETDEV_XDP_ACT_MASK = 127, }; enum xdp_buff_flags { XDP_FLAGS_HAS_FRAGS = 1, XDP_FLAGS_FRAGS_PF_MEMALLOC = 2, }; enum { BPF_F_BROADCAST = 8, BPF_F_EXCLUDE_INGRESS = 16, }; enum netdev_cmd { NETDEV_UP = 1, NETDEV_DOWN = 2, NETDEV_REBOOT = 3, NETDEV_CHANGE = 4, NETDEV_REGISTER = 5, NETDEV_UNREGISTER = 6, NETDEV_CHANGEMTU = 7, NETDEV_CHANGEADDR = 8, NETDEV_PRE_CHANGEADDR = 9, NETDEV_GOING_DOWN = 10, NETDEV_CHANGENAME = 11, NETDEV_FEAT_CHANGE = 12, NETDEV_BONDING_FAILOVER = 13, NETDEV_PRE_UP = 14, NETDEV_PRE_TYPE_CHANGE = 15, NETDEV_POST_TYPE_CHANGE = 16, NETDEV_POST_INIT = 17, NETDEV_PRE_UNINIT = 18, NETDEV_RELEASE = 19, NETDEV_NOTIFY_PEERS = 20, NETDEV_JOIN = 21, NETDEV_CHANGEUPPER = 22, NETDEV_RESEND_IGMP = 23, NETDEV_PRECHANGEMTU = 24, NETDEV_CHANGEINFODATA = 25, NETDEV_BONDING_INFO = 26, NETDEV_PRECHANGEUPPER = 27, NETDEV_CHANGELOWERSTATE = 28, NETDEV_UDP_TUNNEL_PUSH_INFO = 29, NETDEV_UDP_TUNNEL_DROP_INFO = 30, NETDEV_CHANGE_TX_QUEUE_LEN = 31, NETDEV_CVLAN_FILTER_PUSH_INFO = 32, NETDEV_CVLAN_FILTER_DROP_INFO = 33, NETDEV_SVLAN_FILTER_PUSH_INFO = 34, NETDEV_SVLAN_FILTER_DROP_INFO = 35, NETDEV_OFFLOAD_XSTATS_ENABLE = 36, NETDEV_OFFLOAD_XSTATS_DISABLE = 37, NETDEV_OFFLOAD_XSTATS_REPORT_USED = 38, NETDEV_OFFLOAD_XSTATS_REPORT_DELTA = 39, NETDEV_XDP_FEAT_CHANGE = 40, }; struct bpf_dtab_netdev; struct bpf_dtab { struct bpf_map map; struct bpf_dtab_netdev __attribute__((btf_type_tag("rcu"))) **netdev_map; struct list_head list; struct hlist_head *dev_index_head; spinlock_t index_lock; unsigned int items; u32 n_buckets; long: 64; long: 64; }; struct bpf_devmap_val { __u32 ifindex; union { int fd; __u32 id; } bpf_prog; }; struct bpf_dtab_netdev { struct net_device *dev; struct hlist_node index_hlist; struct bpf_prog *xdp_prog; struct callback_head rcu; unsigned int idx; struct bpf_devmap_val val; }; typedef struct bio_vec skb_frag_t; struct skb_shared_info { __u8 flags; __u8 meta_len; __u8 nr_frags; __u8 tx_flags; unsigned short gso_size; unsigned short gso_segs; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; unsigned int gso_type; u32 tskey; atomic_t dataref; unsigned int xdp_frags_size; void *destructor_arg; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_oem_data1[3]; skb_frag_t frags[17]; }; struct bpf_nh_params { u32 nh_family; union { u32 ipv4_nh; struct in6_addr ipv6_nh; }; }; struct bpf_redirect_info { u64 tgt_index; void *tgt_value; struct bpf_map *map; u32 flags; u32 kern_flags; u32 map_id; enum bpf_map_type map_type; struct bpf_nh_params nh; }; struct netdev_notifier_info { struct net_device *dev; struct netlink_ext_ack *extack; }; struct bpf_cpu_map_entry; struct xdp_bulk_queue { void *q[8]; struct list_head flush_node; struct bpf_cpu_map_entry *obj; unsigned int count; }; struct bpf_cpumap_val { __u32 qsize; union { int fd; __u32 id; } bpf_prog; }; struct ptr_ring; struct bpf_cpu_map_entry { u32 cpu; int map_id; struct xdp_bulk_queue __attribute__((btf_type_tag("percpu"))) *bulkq; struct ptr_ring *queue; struct task_struct *kthread; struct bpf_cpumap_val value; struct bpf_prog *prog; struct completion kthread_running; struct rcu_work free_work; }; struct ptr_ring { int producer; spinlock_t producer_lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; int consumer_head; int consumer_tail; spinlock_t consumer_lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; int size; int batch; void **queue; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_cpu_map { struct bpf_map map; struct bpf_cpu_map_entry __attribute__((btf_type_tag("rcu"))) **cpu_map; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_prog_offload_ops; struct bpf_offload_dev { const struct bpf_prog_offload_ops *ops; struct list_head netdevs; void *priv; }; struct bpf_prog_offload_ops { int (*insn_hook)(struct bpf_verifier_env *, int, int); int (*finalize)(struct bpf_verifier_env *); int (*replace_insn)(struct bpf_verifier_env *, u32, struct bpf_insn *); int (*remove_insns)(struct bpf_verifier_env *, u32, u32); int (*prepare)(struct bpf_prog *); int (*translate)(struct bpf_prog *); void (*destroy)(struct bpf_prog *); u64 android_kabi_reserved1; }; enum { XDP_METADATA_KFUNC_RX_TIMESTAMP = 0, XDP_METADATA_KFUNC_RX_HASH = 1, MAX_XDP_METADATA_KFUNC = 2, }; struct bpf_offload_netdev { struct rhash_head l; struct net_device *netdev; struct bpf_offload_dev *offdev; struct list_head progs; struct list_head maps; struct list_head offdev_netdevs; }; typedef struct ns_common *ns_get_path_helper_t(void *); struct ns_get_path_bpf_prog_args { struct bpf_prog *prog; struct bpf_prog_info *info; }; struct ns_get_path_bpf_map_args { struct bpf_offloaded_map *offmap; struct bpf_map_info *info; }; enum netns_bpf_attach_type { NETNS_BPF_INVALID = -1, NETNS_BPF_FLOW_DISSECTOR = 0, NETNS_BPF_SK_LOOKUP = 1, MAX_NETNS_BPF_ATTACH_TYPE = 2, }; struct bpf_netns_link { struct bpf_link link; enum bpf_attach_type type; enum netns_bpf_attach_type netns_type; struct net *net; struct list_head node; }; struct mini_Qdisc; struct tcx_entry { struct mini_Qdisc __attribute__((btf_type_tag("rcu"))) *miniq; struct bpf_mprog_bundle bundle; bool miniq_active; struct callback_head rcu; }; struct mini_Qdisc { struct tcf_proto *filter_list; struct tcf_block *block; struct gnet_stats_basic_sync __attribute__((btf_type_tag("percpu"))) *cpu_bstats; struct gnet_stats_queue __attribute__((btf_type_tag("percpu"))) *cpu_qstats; unsigned long rcu_state; }; struct tcx_link { struct bpf_link link; struct net_device *dev; u32 location; }; enum { BPF_F_SKIP_FIELD_MASK = 255, BPF_F_USER_STACK = 256, BPF_F_FAST_STACK_CMP = 512, BPF_F_REUSE_STACKID = 1024, BPF_F_USER_BUILD_ID = 2048, }; enum bpf_stack_build_id_status { BPF_STACK_BUILD_ID_EMPTY = 0, BPF_STACK_BUILD_ID_VALID = 1, BPF_STACK_BUILD_ID_IP = 2, }; enum perf_callchain_context { PERF_CONTEXT_HV = 18446744073709551584ULL, PERF_CONTEXT_KERNEL = 18446744073709551488ULL, PERF_CONTEXT_USER = 18446744073709551104ULL, PERF_CONTEXT_GUEST = 18446744073709549568ULL, PERF_CONTEXT_GUEST_KERNEL = 18446744073709549440ULL, PERF_CONTEXT_GUEST_USER = 18446744073709549056ULL, PERF_CONTEXT_MAX = 18446744073709547521ULL, }; typedef u64 (*btf_bpf_get_stackid)(struct pt_regs *, struct bpf_map *, u64); typedef u64 (*btf_bpf_get_stackid_pe)(struct bpf_perf_event_data_kern *, struct bpf_map *, u64); typedef u64 (*btf_bpf_get_stack)(struct pt_regs *, void *, u32, u64); typedef u64 (*btf_bpf_get_task_stack)(struct task_struct *, void *, u32, u64); typedef u64 (*btf_bpf_get_stack_pe)(struct bpf_perf_event_data_kern *, void *, u32, u64); struct stack_map_bucket; struct bpf_stack_map { struct bpf_map map; void *elems; struct pcpu_freelist freelist; u32 n_buckets; struct stack_map_bucket *buckets[0]; long: 64; long: 64; long: 64; }; struct stack_map_bucket { struct pcpu_freelist_node fnode; u32 hash; u32 nr; u64 data[0]; }; struct bpf_stack_build_id { __s32 status; unsigned char build_id[20]; union { __u64 offset; __u64 ip; }; }; struct cgroup_iter_priv { struct cgroup_subsys_state *start_css; bool visited_all; bool terminate; int order; }; struct bpf_iter__cgroup { union { struct bpf_iter_meta *meta; }; union { struct cgroup *cgroup; }; }; typedef u64 (*btf_bpf_cgrp_storage_get)(struct bpf_map *, struct cgroup *, void *, u64, gfp_t); typedef u64 (*btf_bpf_cgrp_storage_delete)(struct bpf_map *, struct cgroup *); struct cgroup_lsm_atype { u32 attach_btf_id; int refcnt; }; enum { TCPF_ESTABLISHED = 2, TCPF_SYN_SENT = 4, TCPF_SYN_RECV = 8, TCPF_FIN_WAIT1 = 16, TCPF_FIN_WAIT2 = 32, TCPF_TIME_WAIT = 64, TCPF_CLOSE = 128, TCPF_CLOSE_WAIT = 256, TCPF_LAST_ACK = 512, TCPF_LISTEN = 1024, TCPF_CLOSING = 2048, TCPF_NEW_SYN_RECV = 4096, }; enum { BPF_F_SYSCTL_BASE_NAME = 1, }; typedef u64 (*btf_bpf_get_local_storage)(struct bpf_map *, u64); typedef u64 (*btf_bpf_get_retval)(); typedef u64 (*btf_bpf_set_retval)(int); typedef u64 (*btf_bpf_sysctl_get_name)(struct bpf_sysctl_kern *, char *, size_t, u64); typedef u64 (*btf_bpf_sysctl_get_current_value)(struct bpf_sysctl_kern *, char *, size_t); typedef u64 (*btf_bpf_sysctl_get_new_value)(struct bpf_sysctl_kern *, char *, size_t); typedef u64 (*btf_bpf_sysctl_set_new_value)(struct bpf_sysctl_kern *, const char *, size_t); typedef u64 (*btf_bpf_get_netns_cookie_sockopt)(struct bpf_sockopt_kern *); struct bpf_cgroup_link; struct bpf_prog_list { struct hlist_node node; struct bpf_prog *prog; struct bpf_cgroup_link *link; struct bpf_cgroup_storage *storage[2]; }; struct bpf_cgroup_link { struct bpf_link link; struct cgroup *cgroup; enum bpf_attach_type type; }; struct qdisc_skb_cb { struct { unsigned int pkt_len; u16 slave_dev_queue_mapping; u16 tc_classid; }; unsigned char data[20]; }; struct bpf_skb_data_end { struct qdisc_skb_cb qdisc_cb; void *data_meta; void *data_end; }; struct bpf_cg_run_ctx { struct bpf_run_ctx run_ctx; const struct bpf_prog_array_item *prog_item; int retval; }; struct bpf_sockopt_buf { u8 data[32]; }; enum { IPPROTO_IP = 0, IPPROTO_ICMP = 1, IPPROTO_IGMP = 2, IPPROTO_IPIP = 4, IPPROTO_TCP = 6, IPPROTO_EGP = 8, IPPROTO_PUP = 12, IPPROTO_UDP = 17, IPPROTO_IDP = 22, IPPROTO_TP = 29, IPPROTO_DCCP = 33, IPPROTO_IPV6 = 41, IPPROTO_RSVP = 46, IPPROTO_GRE = 47, IPPROTO_ESP = 50, IPPROTO_AH = 51, IPPROTO_MTP = 92, IPPROTO_BEETPH = 94, IPPROTO_ENCAP = 98, IPPROTO_PIM = 103, IPPROTO_COMP = 108, IPPROTO_L2TP = 115, IPPROTO_SCTP = 132, IPPROTO_UDPLITE = 136, IPPROTO_MPLS = 137, IPPROTO_ETHERNET = 143, IPPROTO_RAW = 255, IPPROTO_MPTCP = 262, IPPROTO_MAX = 263, }; enum sock_type { SOCK_STREAM = 1, SOCK_DGRAM = 2, SOCK_RAW = 3, SOCK_RDM = 4, SOCK_SEQPACKET = 5, SOCK_DCCP = 6, SOCK_PACKET = 10, }; enum sock_flags { SOCK_DEAD = 0, SOCK_DONE = 1, SOCK_URGINLINE = 2, SOCK_KEEPOPEN = 3, SOCK_LINGER = 4, SOCK_DESTROY = 5, SOCK_BROADCAST = 6, SOCK_TIMESTAMP = 7, SOCK_ZAPPED = 8, SOCK_USE_WRITE_QUEUE = 9, SOCK_DBG = 10, SOCK_RCVTSTAMP = 11, SOCK_RCVTSTAMPNS = 12, SOCK_LOCALROUTE = 13, SOCK_MEMALLOC = 14, SOCK_TIMESTAMPING_RX_SOFTWARE = 15, SOCK_FASYNC = 16, SOCK_RXQ_OVFL = 17, SOCK_ZEROCOPY = 18, SOCK_WIFI_STATUS = 19, SOCK_NOFCS = 20, SOCK_FILTER_LOCKED = 21, SOCK_SELECT_ERR_QUEUE = 22, SOCK_RCU_FREE = 23, SOCK_TXTIME = 24, SOCK_XDP = 25, SOCK_TSTAMP_NEW = 26, SOCK_RCVMARK = 27, }; struct reuseport_array { struct bpf_map map; struct sock __attribute__((btf_type_tag("rcu"))) *ptrs[0]; }; enum bpf_struct_ops_state { BPF_STRUCT_OPS_STATE_INIT = 0, BPF_STRUCT_OPS_STATE_INUSE = 1, BPF_STRUCT_OPS_STATE_TOBEFREE = 2, BPF_STRUCT_OPS_STATE_READY = 3, }; struct bpf_dummy_ops_state; struct bpf_dummy_ops { int (*test_1)(struct bpf_dummy_ops_state *); int (*test_2)(struct bpf_dummy_ops_state *, int, unsigned short, char, unsigned long); int (*test_sleepable)(struct bpf_dummy_ops_state *); }; struct bpf_struct_ops_bpf_dummy_ops { refcount_t refcnt; enum bpf_struct_ops_state state; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct bpf_dummy_ops data; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_dummy_ops_state { int val; }; struct bpf_struct_ops_tcp_congestion_ops { refcount_t refcnt; enum bpf_struct_ops_state state; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct tcp_congestion_ops data; }; struct bpf_struct_ops_value { refcount_t refcnt; enum bpf_struct_ops_state state; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; char data[0]; }; struct bpf_struct_ops_map { struct bpf_map map; struct callback_head rcu; const struct bpf_struct_ops *st_ops; struct mutex lock; struct bpf_link **links; void *image; struct bpf_struct_ops_value *uvalue; long: 64; long: 64; long: 64; long: 64; struct bpf_struct_ops_value kvalue; }; struct bpf_struct_ops_link { struct bpf_link link; struct bpf_map __attribute__((btf_type_tag("rcu"))) *map; }; struct bpf_cpumask { cpumask_t cpumask; refcount_t usage; }; enum { BTF_SOCK_TYPE_INET = 0, BTF_SOCK_TYPE_INET_CONN = 1, BTF_SOCK_TYPE_INET_REQ = 2, BTF_SOCK_TYPE_INET_TW = 3, BTF_SOCK_TYPE_REQ = 4, BTF_SOCK_TYPE_SOCK = 5, BTF_SOCK_TYPE_SOCK_COMMON = 6, BTF_SOCK_TYPE_TCP = 7, BTF_SOCK_TYPE_TCP_REQ = 8, BTF_SOCK_TYPE_TCP_TW = 9, BTF_SOCK_TYPE_TCP6 = 10, BTF_SOCK_TYPE_UDP = 11, BTF_SOCK_TYPE_UDP6 = 12, BTF_SOCK_TYPE_UNIX = 13, BTF_SOCK_TYPE_MPTCP = 14, BTF_SOCK_TYPE_SOCKET = 15, MAX_BTF_SOCK_TYPE = 16, }; enum { BPF_F_BPRM_SECUREEXEC = 1, }; typedef u64 (*btf_bpf_bprm_opts_set)(struct linux_binprm *, u64); typedef u64 (*btf_bpf_ima_inode_hash)(struct inode *, void *, u32); typedef u64 (*btf_bpf_ima_file_hash)(struct file *, void *, u32); typedef u64 (*btf_bpf_get_attach_cookie)(void *); struct xattr { const char *name; void *value; size_t value_len; }; struct sembuf { unsigned short sem_num; short sem_op; short sem_flg; }; typedef unsigned int kasan_vmalloc_flags_t; struct perf_event_mmap_page; struct perf_buffer { refcount_t refcount; struct callback_head callback_head; int nr_pages; int overwrite; int paused; atomic_t poll; local_t head; unsigned int nest; local_t events; local_t wakeup; local_t lost; long watermark; long aux_watermark; spinlock_t event_lock; struct list_head event_list; atomic_t mmap_count; unsigned long mmap_locked; struct user_struct *mmap_user; long aux_head; unsigned int aux_nest; long aux_wakeup; unsigned long aux_pgoff; int aux_nr_pages; int aux_overwrite; atomic_t aux_mmap_count; unsigned long aux_mmap_locked; void (*free_aux)(void *); refcount_t aux_refcount; int aux_in_sampling; void **aux_pages; void *aux_priv; struct perf_event_mmap_page *user_page; void *data_pages[0]; }; struct perf_event_mmap_page { __u32 version; __u32 compat_version; __u32 lock; __u32 index; __s64 offset; __u64 time_enabled; __u64 time_running; union { __u64 capabilities; struct { __u64 cap_bit0: 1; __u64 cap_bit0_is_deprecated: 1; __u64 cap_user_rdpmc: 1; __u64 cap_user_time: 1; __u64 cap_user_time_zero: 1; __u64 cap_user_time_short: 1; __u64 cap_____res: 58; }; }; __u16 pmc_width; __u16 time_shift; __u32 time_mult; __u64 time_offset; __u64 time_zero; __u32 size; __u32 __reserved_1; __u64 time_cycles; __u64 time_mask; __u8 __reserved[928]; __u64 data_head; __u64 data_tail; __u64 data_offset; __u64 data_size; __u64 aux_head; __u64 aux_tail; __u64 aux_offset; __u64 aux_size; }; struct perf_cpu_context { struct perf_event_context ctx; struct perf_event_context *task_ctx; int online; int heap_size; struct perf_event **heap; struct perf_event *heap_default[2]; }; struct swevent_hlist; struct swevent_htable { struct swevent_hlist *swevent_hlist; struct mutex hlist_mutex; int hlist_refcount; int recursion[4]; }; struct swevent_hlist { struct hlist_head heads[256]; struct callback_head callback_head; }; struct min_heap_callbacks { int elem_size; bool (*less)(const void *, const void *); void (*swp)(void *, void *); }; struct pmu_event_list { raw_spinlock_t lock; struct list_head list; }; enum perf_addr_filter_action_t { PERF_ADDR_FILTER_ACTION_STOP = 0, PERF_ADDR_FILTER_ACTION_START = 1, PERF_ADDR_FILTER_ACTION_FILTER = 2, }; struct match_token { int token; const char *pattern; }; enum event_type_t { EVENT_FLEXIBLE = 1, EVENT_PINNED = 2, EVENT_TIME = 4, EVENT_CPU = 8, EVENT_CGROUP = 16, EVENT_ALL = 3, }; enum perf_event_type { PERF_RECORD_MMAP = 1, PERF_RECORD_LOST = 2, PERF_RECORD_COMM = 3, PERF_RECORD_EXIT = 4, PERF_RECORD_THROTTLE = 5, PERF_RECORD_UNTHROTTLE = 6, PERF_RECORD_FORK = 7, PERF_RECORD_READ = 8, PERF_RECORD_SAMPLE = 9, PERF_RECORD_MMAP2 = 10, PERF_RECORD_AUX = 11, PERF_RECORD_ITRACE_START = 12, PERF_RECORD_LOST_SAMPLES = 13, PERF_RECORD_SWITCH = 14, PERF_RECORD_SWITCH_CPU_WIDE = 15, PERF_RECORD_NAMESPACES = 16, PERF_RECORD_KSYMBOL = 17, PERF_RECORD_BPF_EVENT = 18, PERF_RECORD_CGROUP = 19, PERF_RECORD_TEXT_POKE = 20, PERF_RECORD_AUX_OUTPUT_HW_ID = 21, PERF_RECORD_MAX = 22, }; enum { NET_NS_INDEX = 0, UTS_NS_INDEX = 1, IPC_NS_INDEX = 2, PID_NS_INDEX = 3, USER_NS_INDEX = 4, MNT_NS_INDEX = 5, CGROUP_NS_INDEX = 6, NR_NAMESPACES = 7, }; enum perf_event_task_context { perf_invalid_context = -1, perf_hw_context = 0, perf_sw_context = 1, perf_nr_task_contexts = 2, }; enum perf_event_read_format { PERF_FORMAT_TOTAL_TIME_ENABLED = 1, PERF_FORMAT_TOTAL_TIME_RUNNING = 2, PERF_FORMAT_ID = 4, PERF_FORMAT_GROUP = 8, PERF_FORMAT_LOST = 16, PERF_FORMAT_MAX = 32, }; enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_USER = 1, PERF_SAMPLE_BRANCH_KERNEL = 2, PERF_SAMPLE_BRANCH_HV = 4, PERF_SAMPLE_BRANCH_ANY = 8, PERF_SAMPLE_BRANCH_ANY_CALL = 16, PERF_SAMPLE_BRANCH_ANY_RETURN = 32, PERF_SAMPLE_BRANCH_IND_CALL = 64, PERF_SAMPLE_BRANCH_ABORT_TX = 128, PERF_SAMPLE_BRANCH_IN_TX = 256, PERF_SAMPLE_BRANCH_NO_TX = 512, PERF_SAMPLE_BRANCH_COND = 1024, PERF_SAMPLE_BRANCH_CALL_STACK = 2048, PERF_SAMPLE_BRANCH_IND_JUMP = 4096, PERF_SAMPLE_BRANCH_CALL = 8192, PERF_SAMPLE_BRANCH_NO_FLAGS = 16384, PERF_SAMPLE_BRANCH_NO_CYCLES = 32768, PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536, PERF_SAMPLE_BRANCH_HW_INDEX = 131072, PERF_SAMPLE_BRANCH_PRIV_SAVE = 262144, PERF_SAMPLE_BRANCH_MAX = 524288, }; enum perf_probe_config { PERF_PROBE_CONFIG_IS_RETPROBE = 1, PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 32, }; enum perf_event_ioc_flags { PERF_IOC_FLAG_GROUP = 1, }; enum { IF_STATE_ACTION = 0, IF_STATE_SOURCE = 1, IF_STATE_END = 2, }; enum { IF_ACT_NONE = -1, IF_ACT_FILTER = 0, IF_ACT_START = 1, IF_ACT_STOP = 2, IF_SRC_FILE = 3, IF_SRC_KERNEL = 4, IF_SRC_FILEADDR = 5, IF_SRC_KERNELADDR = 6, }; struct perf_pmu_events_attr { struct device_attribute attr; u64 id; const char *event_str; }; struct perf_addr_filter { struct list_head entry; struct path path; unsigned long offset; unsigned long size; enum perf_addr_filter_action_t action; }; typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, struct perf_event_context *, void *); struct perf_event_header { __u32 type; __u16 misc; __u16 size; }; struct perf_switch_event { struct task_struct *task; struct task_struct *next_prev; struct { struct perf_event_header header; u32 next_prev_pid; u32 next_prev_tid; } event_id; }; typedef void perf_iterate_f(struct perf_event *, void *); struct stop_event_data { struct perf_event *event; unsigned int restart; }; typedef int (*remote_function_f)(void *); struct remote_function_call { struct task_struct *p; remote_function_f func; void *info; int ret; }; struct perf_task_event { struct task_struct *task; struct perf_event_context *task_ctx; struct { struct perf_event_header header; u32 pid; u32 ppid; u32 tid; u32 ptid; u64 time; } event_id; }; struct perf_ns_link_info { __u64 dev; __u64 ino; }; struct perf_comm_event { struct task_struct *task; char *comm; int comm_size; struct { struct perf_event_header header; u32 pid; u32 tid; } event_id; }; struct perf_mmap_event { struct vm_area_struct *vma; const char *file_name; int file_size; int maj; int min; u64 ino; u64 ino_generation; u32 prot; u32 flags; u8 build_id[20]; u32 build_id_size; struct { struct perf_event_header header; u32 pid; u32 tid; u64 start; u64 len; u64 pgoff; } event_id; }; struct perf_aux_event { struct perf_event_header header; u64 offset; u64 size; u64 flags; }; struct perf_aux_event___2 { struct perf_event_header header; u64 hw_id; }; struct __group_key { int cpu; struct pmu *pmu; struct cgroup *cgroup; }; struct min_heap { void *data; int nr; int size; }; struct perf_aux_event___3 { struct perf_event_header header; u32 pid; u32 tid; }; struct perf_read_event { struct perf_event_header header; u32 pid; u32 tid; }; typedef struct { char *from; char *to; } substring_t; struct remote_output { struct perf_buffer *rb; int err; }; struct perf_namespaces_event { struct task_struct *task; struct { struct perf_event_header header; u32 pid; u32 tid; u64 nr_namespaces; struct perf_ns_link_info link_info[7]; } event_id; }; struct perf_ksymbol_event { const char *name; int name_len; struct { struct perf_event_header header; u64 addr; u32 len; u16 ksym_type; u16 flags; } event_id; }; struct perf_bpf_event { struct bpf_prog *prog; struct { struct perf_event_header header; u16 type; u16 flags; u32 id; u8 tag[8]; } event_id; }; struct perf_text_poke_event { const void *old_bytes; const void *new_bytes; size_t pad; u16 old_len; u16 new_len; struct { struct perf_event_header header; u64 addr; } event_id; }; struct event_function_struct { struct perf_event *event; event_f func; void *data; }; struct perf_read_data { struct perf_event *event; bool group; int ret; }; struct callchain_cpus_entries { struct callback_head callback_head; struct perf_callchain_entry *cpu_entries[0]; }; struct bp_slots_histogram { atomic_t *count; }; struct rhltable { struct rhashtable ht; }; struct bp_cpuinfo { unsigned int cpu_pinned; struct bp_slots_histogram tsk_pinned; }; struct xol_area { wait_queue_head_t wq; atomic_t slot_count; unsigned long *bitmap; struct vm_special_mapping xol_mapping; struct page *pages[2]; unsigned long vaddr; }; struct uprobe { struct rb_node rb_node; refcount_t ref; struct rw_semaphore register_rwsem; struct rw_semaphore consumer_rwsem; struct list_head pending_list; struct uprobe_consumer *consumers; struct inode *inode; loff_t offset; loff_t ref_ctr_offset; unsigned long flags; struct arch_uprobe arch; }; typedef __le32 uprobe_opcode_t; struct delayed_uprobe { struct list_head list; struct uprobe *uprobe; struct mm_struct *mm; }; struct page_vma_mapped_walk { unsigned long pfn; unsigned long nr_pages; unsigned long pgoff; struct vm_area_struct *vma; unsigned long address; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; unsigned int flags; }; typedef unsigned int fgf_t; struct __uprobe_key { struct inode *inode; loff_t offset; }; struct map_info { struct map_info *next; struct mm_struct *mm; unsigned long vaddr; }; typedef int filler_t(struct file *, struct folio *); struct static_key_deferred { struct static_key key; unsigned long timeout; struct delayed_work work; }; struct static_key_mod { struct static_key_mod *next; struct jump_entry *entries; struct module *mod; }; struct key_preparsed_payload { const char *orig_description; char *description; union key_payload payload; const void *data; size_t datalen; size_t quotalen; time64_t expiry; }; struct key_match_data { bool (*cmp)(const struct key *, const struct key_match_data *); const void *raw_data; void *preparsed; unsigned int lookup_type; }; enum kernel_pkey_operation { kernel_pkey_encrypt = 0, kernel_pkey_decrypt = 1, kernel_pkey_sign = 2, kernel_pkey_verify = 3, }; struct kernel_pkey_params { struct key *key; const char *encoding; const char *hash_algo; char *info; __u32 in_len; union { __u32 out_len; __u32 in2_len; }; enum kernel_pkey_operation op: 8; }; struct kernel_pkey_query { __u32 supported_ops; __u32 key_size; __u16 max_data_size; __u16 max_sig_size; __u16 max_enc_size; __u16 max_dec_size; }; enum OID { OID_id_dsa_with_sha1 = 0, OID_id_dsa = 1, OID_id_ecPublicKey = 2, OID_id_prime192v1 = 3, OID_id_prime256v1 = 4, OID_id_ecdsa_with_sha1 = 5, OID_id_ecdsa_with_sha224 = 6, OID_id_ecdsa_with_sha256 = 7, OID_id_ecdsa_with_sha384 = 8, OID_id_ecdsa_with_sha512 = 9, OID_rsaEncryption = 10, OID_md2WithRSAEncryption = 11, OID_md3WithRSAEncryption = 12, OID_md4WithRSAEncryption = 13, OID_sha1WithRSAEncryption = 14, OID_sha256WithRSAEncryption = 15, OID_sha384WithRSAEncryption = 16, OID_sha512WithRSAEncryption = 17, OID_sha224WithRSAEncryption = 18, OID_data = 19, OID_signed_data = 20, OID_email_address = 21, OID_contentType = 22, OID_messageDigest = 23, OID_signingTime = 24, OID_smimeCapabilites = 25, OID_smimeAuthenticatedAttrs = 26, OID_md2 = 27, OID_md4 = 28, OID_md5 = 29, OID_mskrb5 = 30, OID_krb5 = 31, OID_krb5u2u = 32, OID_msIndirectData = 33, OID_msStatementType = 34, OID_msSpOpusInfo = 35, OID_msPeImageDataObjId = 36, OID_msIndividualSPKeyPurpose = 37, OID_msOutlookExpress = 38, OID_ntlmssp = 39, OID_negoex = 40, OID_spnego = 41, OID_IAKerb = 42, OID_PKU2U = 43, OID_Scram = 44, OID_certAuthInfoAccess = 45, OID_sha1 = 46, OID_id_ansip384r1 = 47, OID_sha256 = 48, OID_sha384 = 49, OID_sha512 = 50, OID_sha224 = 51, OID_commonName = 52, OID_surname = 53, OID_countryName = 54, OID_locality = 55, OID_stateOrProvinceName = 56, OID_organizationName = 57, OID_organizationUnitName = 58, OID_title = 59, OID_description = 60, OID_name = 61, OID_givenName = 62, OID_initials = 63, OID_generationalQualifier = 64, OID_subjectKeyIdentifier = 65, OID_keyUsage = 66, OID_subjectAltName = 67, OID_issuerAltName = 68, OID_basicConstraints = 69, OID_crlDistributionPoints = 70, OID_certPolicies = 71, OID_authorityKeyIdentifier = 72, OID_extKeyUsage = 73, OID_NetlogonMechanism = 74, OID_appleLocalKdcSupported = 75, OID_gostCPSignA = 76, OID_gostCPSignB = 77, OID_gostCPSignC = 78, OID_gost2012PKey256 = 79, OID_gost2012PKey512 = 80, OID_gost2012Digest256 = 81, OID_gost2012Digest512 = 82, OID_gost2012Signature256 = 83, OID_gost2012Signature512 = 84, OID_gostTC26Sign256A = 85, OID_gostTC26Sign256B = 86, OID_gostTC26Sign256C = 87, OID_gostTC26Sign256D = 88, OID_gostTC26Sign512A = 89, OID_gostTC26Sign512B = 90, OID_gostTC26Sign512C = 91, OID_sm2 = 92, OID_sm3 = 93, OID_SM2_with_SM3 = 94, OID_sm3WithRSAEncryption = 95, OID_TPMLoadableKey = 96, OID_TPMImportableKey = 97, OID_TPMSealedData = 98, OID__NR = 99, }; struct x509_certificate; struct pkcs7_signed_info; struct pkcs7_message { struct x509_certificate *certs; struct x509_certificate *crl; struct pkcs7_signed_info *signed_infos; u8 version; bool have_authattrs; enum OID data_type; size_t data_len; size_t data_hdrlen; const void *data; }; struct compact_control; struct capture_control { struct compact_control *cc; struct page *page; }; struct compact_control { struct list_head freepages; struct list_head migratepages; unsigned int nr_freepages; unsigned int nr_migratepages; unsigned long free_pfn; unsigned long migrate_pfn; unsigned long fast_start_pfn; struct zone *zone; unsigned long total_migrate_scanned; unsigned long total_free_scanned; unsigned short fast_search_fail; short search_order; const gfp_t gfp_mask; int order; int migratetype; const unsigned int alloc_flags; const int highest_zoneidx; enum migrate_mode mode; bool ignore_skip_hint; bool no_set_skip_hint; bool ignore_block_suitable; bool direct_compaction; bool proactive_compaction; bool whole_zone; bool contended; bool finish_pageblock; bool alloc_contig; }; struct fid { union { struct { u32 ino; u32 gen; u32 parent_ino; u32 parent_gen; } i32; struct { u32 block; u16 partref; u16 parent_partref; u32 generation; u32 parent_block; u32 parent_generation; } udf; struct { struct {} __empty_raw; __u32 raw[0]; }; }; }; typedef void (*btf_trace_mm_filemap_delete_from_page_cache)(void *, struct folio *); typedef void (*btf_trace_mm_filemap_add_to_page_cache)(void *, struct folio *); typedef void (*btf_trace_filemap_set_wb_err)(void *, struct address_space *, errseq_t); typedef void (*btf_trace_file_check_and_advance_wb_err)(void *, struct file *, errseq_t); enum mapping_flags { AS_EIO = 0, AS_ENOSPC = 1, AS_MM_ALL_LOCKS = 2, AS_UNEVICTABLE = 3, AS_EXITING = 4, AS_NO_WRITEBACK_TAGS = 5, AS_LARGE_FOLIO_SUPPORT = 6, AS_RELEASE_ALWAYS = 7, AS_STABLE_WRITES = 8, }; enum behavior { EXCLUSIVE = 0, SHARED = 1, DROP = 2, }; enum vm_event_item { PGPGIN = 0, PGPGOUT = 1, PSWPIN = 2, PSWPOUT = 3, PGALLOC_DMA32 = 4, PGALLOC_NORMAL = 5, PGALLOC_MOVABLE = 6, PGALLOC_NOSPLIT = 7, PGALLOC_NOMERGE = 8, ALLOCSTALL_DMA32 = 9, ALLOCSTALL_NORMAL = 10, ALLOCSTALL_MOVABLE = 11, ALLOCSTALL_NOSPLIT = 12, ALLOCSTALL_NOMERGE = 13, PGSCAN_SKIP_DMA32 = 14, PGSCAN_SKIP_NORMAL = 15, PGSCAN_SKIP_MOVABLE = 16, PGSCAN_SKIP_NOSPLIT = 17, PGSCAN_SKIP_NOMERGE = 18, PGFREE = 19, PGACTIVATE = 20, PGDEACTIVATE = 21, PGLAZYFREE = 22, PGFAULT = 23, PGMAJFAULT = 24, PGLAZYFREED = 25, PGREFILL = 26, PGREUSE = 27, PGSTEAL_KSWAPD = 28, PGSTEAL_DIRECT = 29, PGSTEAL_KHUGEPAGED = 30, PGDEMOTE_KSWAPD = 31, PGDEMOTE_DIRECT = 32, PGDEMOTE_KHUGEPAGED = 33, PGSCAN_KSWAPD = 34, PGSCAN_DIRECT = 35, PGSCAN_KHUGEPAGED = 36, PGSCAN_DIRECT_THROTTLE = 37, PGSCAN_ANON = 38, PGSCAN_FILE = 39, PGSTEAL_ANON = 40, PGSTEAL_FILE = 41, PGINODESTEAL = 42, SLABS_SCANNED = 43, KSWAPD_INODESTEAL = 44, KSWAPD_LOW_WMARK_HIT_QUICKLY = 45, KSWAPD_HIGH_WMARK_HIT_QUICKLY = 46, PAGEOUTRUN = 47, PGROTATED = 48, DROP_PAGECACHE = 49, DROP_SLAB = 50, OOM_KILL = 51, PGMIGRATE_SUCCESS = 52, PGMIGRATE_FAIL = 53, THP_MIGRATION_SUCCESS = 54, THP_MIGRATION_FAIL = 55, THP_MIGRATION_SPLIT = 56, COMPACTMIGRATE_SCANNED = 57, COMPACTFREE_SCANNED = 58, COMPACTISOLATED = 59, COMPACTSTALL = 60, COMPACTFAIL = 61, COMPACTSUCCESS = 62, KCOMPACTD_WAKE = 63, KCOMPACTD_MIGRATE_SCANNED = 64, KCOMPACTD_FREE_SCANNED = 65, CMA_ALLOC_SUCCESS = 66, CMA_ALLOC_FAIL = 67, UNEVICTABLE_PGCULLED = 68, UNEVICTABLE_PGSCANNED = 69, UNEVICTABLE_PGRESCUED = 70, UNEVICTABLE_PGMLOCKED = 71, UNEVICTABLE_PGMUNLOCKED = 72, UNEVICTABLE_PGCLEARED = 73, UNEVICTABLE_PGSTRANDED = 74, THP_FAULT_ALLOC = 75, THP_FAULT_FALLBACK = 76, THP_FAULT_FALLBACK_CHARGE = 77, THP_COLLAPSE_ALLOC = 78, THP_COLLAPSE_ALLOC_FAILED = 79, THP_FILE_ALLOC = 80, THP_FILE_FALLBACK = 81, THP_FILE_FALLBACK_CHARGE = 82, THP_FILE_MAPPED = 83, THP_SPLIT_PAGE = 84, THP_SPLIT_PAGE_FAILED = 85, THP_DEFERRED_SPLIT_PAGE = 86, THP_SPLIT_PMD = 87, THP_SHATTER_PAGE = 88, THP_SHATTER_PAGE_FAILED = 89, THP_SHATTER_PAGE_DISCARDED = 90, THP_SCAN_EXCEED_NONE_PTE = 91, THP_SCAN_EXCEED_SWAP_PTE = 92, THP_SCAN_EXCEED_SHARED_PTE = 93, THP_ZERO_PAGE_ALLOC = 94, THP_ZERO_PAGE_ALLOC_FAILED = 95, THP_SWPOUT = 96, THP_SWPOUT_FALLBACK = 97, BALLOON_INFLATE = 98, BALLOON_DEFLATE = 99, BALLOON_MIGRATE = 100, SWAP_RA = 101, SWAP_RA_HIT = 102, NR_VM_EVENT_ITEMS = 103, }; enum positive_aop_returns { AOP_WRITEPAGE_ACTIVATE = 524288, AOP_TRUNCATED_PAGE = 524289, }; enum { SB_UNFROZEN = 0, SB_FREEZE_WRITE = 1, SB_FREEZE_PAGEFAULT = 2, SB_FREEZE_FS = 3, SB_FREEZE_COMPLETE = 4, }; struct trace_event_raw_mm_filemap_op_page_cache { struct trace_entry ent; unsigned long pfn; unsigned long i_ino; unsigned long index; dev_t s_dev; unsigned char order; char __data[0]; }; struct trace_event_raw_filemap_set_wb_err { struct trace_entry ent; unsigned long i_ino; dev_t s_dev; errseq_t errseq; char __data[0]; }; struct trace_event_raw_file_check_and_advance_wb_err { struct trace_entry ent; struct file *file; unsigned long i_ino; dev_t s_dev; errseq_t old; errseq_t new; char __data[0]; }; struct cachestat_range { __u64 off; __u64 len; }; struct cachestat { __u64 nr_cache; __u64 nr_dirty; __u64 nr_writeback; __u64 nr_evicted; __u64 nr_recently_evicted; }; struct folio_batch { unsigned char nr; bool percpu_pvec_drained; struct folio *folios[15]; }; struct wait_page_key { struct folio *folio; int bit_nr; int page_match; }; struct trace_event_data_offsets_mm_filemap_op_page_cache {}; struct trace_event_data_offsets_filemap_set_wb_err {}; struct trace_event_data_offsets_file_check_and_advance_wb_err {}; struct reciprocal_value { u32 m; u8 sh1; u8 sh2; }; struct kmem_cache_order_objects { unsigned int x; }; struct kmem_cache_cpu; struct kmem_cache_node; struct kmem_cache { struct kmem_cache_cpu __attribute__((btf_type_tag("percpu"))) *cpu_slab; slab_flags_t flags; unsigned long min_partial; unsigned int size; unsigned int object_size; struct reciprocal_value reciprocal_size; unsigned int offset; unsigned int cpu_partial; unsigned int cpu_partial_slabs; struct kmem_cache_order_objects oo; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); unsigned int inuse; unsigned int align; unsigned int red_left_pad; const char *name; struct list_head list; struct kobject kobj; unsigned long random; unsigned int *random_seq; unsigned int useroffset; unsigned int usersize; struct kmem_cache_node *node[1]; }; typedef unsigned __int128 __u128; typedef __u128 u128; typedef u128 freelist_full_t; typedef union { struct { void *freelist; unsigned long counter; }; freelist_full_t full; } freelist_aba_t; struct slab; struct kmem_cache_cpu { union { struct { void **freelist; unsigned long tid; }; freelist_aba_t freelist_tid; }; struct slab *slab; struct slab *partial; local_lock_t lock; }; struct slab { unsigned long __page_flags; struct kmem_cache *slab_cache; union { struct { union { struct list_head slab_list; struct { struct slab *next; int slabs; }; }; union { struct { void *freelist; union { unsigned long counters; struct { unsigned int inuse: 16; unsigned int objects: 15; unsigned int frozen: 1; }; }; }; freelist_aba_t freelist_counter; }; }; struct callback_head callback_head; }; unsigned int __unused; atomic_t __page_refcount; unsigned long memcg_data; }; struct kmem_cache_node { spinlock_t list_lock; unsigned long nr_partial; struct list_head partial; atomic_long_t nr_slabs; atomic_long_t total_objects; struct list_head full; }; typedef void (*btf_trace_oom_score_adj_update)(void *, struct task_struct *); typedef void (*btf_trace_reclaim_retry_zone)(void *, struct zoneref *, int, unsigned long, unsigned long, unsigned long, int, bool); typedef void (*btf_trace_mark_victim)(void *, struct task_struct *, uid_t); typedef void (*btf_trace_wake_reaper)(void *, int); typedef void (*btf_trace_start_task_reaping)(void *, int); typedef void (*btf_trace_finish_task_reaping)(void *, int); typedef void (*btf_trace_skip_task_reaping)(void *, int); enum compact_priority { COMPACT_PRIO_SYNC_FULL = 0, MIN_COMPACT_PRIORITY = 0, COMPACT_PRIO_SYNC_LIGHT = 1, MIN_COMPACT_COSTLY_PRIORITY = 1, DEF_COMPACT_PRIORITY = 1, COMPACT_PRIO_ASYNC = 2, INIT_COMPACT_PRIORITY = 2, }; enum compact_result { COMPACT_NOT_SUITABLE_ZONE = 0, COMPACT_SKIPPED = 1, COMPACT_DEFERRED = 2, COMPACT_NO_SUITABLE_PAGE = 3, COMPACT_CONTINUE = 4, COMPACT_COMPLETE = 5, COMPACT_PARTIAL_SKIPPED = 6, COMPACT_CONTENDED = 7, COMPACT_SUCCESS = 8, }; typedef void (*btf_trace_compact_retry)(void *, int, enum compact_priority, enum compact_result, int, int, bool); enum oom_constraint { CONSTRAINT_NONE = 0, CONSTRAINT_CPUSET = 1, CONSTRAINT_MEMORY_POLICY = 2, CONSTRAINT_MEMCG = 3, }; enum memcg_memory_event { MEMCG_LOW = 0, MEMCG_HIGH = 1, MEMCG_MAX = 2, MEMCG_OOM = 3, MEMCG_OOM_KILL = 4, MEMCG_OOM_GROUP_KILL = 5, MEMCG_SWAP_HIGH = 6, MEMCG_SWAP_MAX = 7, MEMCG_SWAP_FAIL = 8, MEMCG_NR_MEMORY_EVENTS = 9, }; struct trace_event_raw_oom_score_adj_update { struct trace_entry ent; pid_t pid; char comm[16]; short oom_score_adj; char __data[0]; }; struct trace_event_raw_reclaim_retry_zone { struct trace_entry ent; int node; int zone_idx; int order; unsigned long reclaimable; unsigned long available; unsigned long min_wmark; int no_progress_loops; bool wmark_check; char __data[0]; }; struct trace_event_raw_mark_victim { struct trace_entry ent; int pid; u32 __data_loc_comm; unsigned long total_vm; unsigned long anon_rss; unsigned long file_rss; unsigned long shmem_rss; uid_t uid; unsigned long pgtables; short oom_score_adj; char __data[0]; }; struct trace_event_raw_wake_reaper { struct trace_entry ent; int pid; char __data[0]; }; struct trace_event_raw_start_task_reaping { struct trace_entry ent; int pid; char __data[0]; }; struct trace_event_raw_finish_task_reaping { struct trace_entry ent; int pid; char __data[0]; }; struct trace_event_raw_skip_task_reaping { struct trace_entry ent; int pid; char __data[0]; }; struct trace_event_raw_compact_retry { struct trace_entry ent; int order; int priority; int result; int retries; int max_retries; bool ret; char __data[0]; }; struct trace_event_data_offsets_mark_victim { u32 comm; }; struct oom_control { struct zonelist *zonelist; nodemask_t *nodemask; struct mem_cgroup *memcg; const gfp_t gfp_mask; const int order; unsigned long totalpages; struct task_struct *chosen; long chosen_points; enum oom_constraint constraint; }; struct encoded_page; struct mmu_gather_batch { struct mmu_gather_batch *next; unsigned int nr; unsigned int max; struct encoded_page *encoded_pages[0]; }; struct mmu_table_batch; struct mmu_gather { struct mm_struct *mm; struct mmu_table_batch *batch; unsigned long start; unsigned long end; unsigned int fullmm: 1; unsigned int need_flush_all: 1; unsigned int freed_tables: 1; unsigned int delayed_rmap: 1; unsigned int cleared_ptes: 1; unsigned int cleared_pmds: 1; unsigned int cleared_puds: 1; unsigned int cleared_p4ds: 1; unsigned int vma_exec: 1; unsigned int vma_huge: 1; unsigned int vma_pfn: 1; unsigned int batch_count; struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[8]; }; struct mmu_table_batch { struct callback_head rcu; unsigned int nr; void *tables[0]; }; struct trace_event_data_offsets_oom_score_adj_update {}; struct trace_event_data_offsets_reclaim_retry_zone {}; struct trace_event_data_offsets_wake_reaper {}; struct trace_event_data_offsets_start_task_reaping {}; struct trace_event_data_offsets_finish_task_reaping {}; struct trace_event_data_offsets_skip_task_reaping {}; struct trace_event_data_offsets_compact_retry {}; enum wb_stat_item { WB_RECLAIMABLE = 0, WB_WRITEBACK = 1, WB_DIRTIED = 2, WB_WRITTEN = 3, NR_WB_STAT_ITEMS = 4, }; enum { XA_CHECK_SCHED = 4096, }; enum vmscan_throttle_state { VMSCAN_THROTTLE_WRITEBACK = 0, VMSCAN_THROTTLE_ISOLATED = 1, VMSCAN_THROTTLE_NOPROGRESS = 2, VMSCAN_THROTTLE_CONGESTED = 3, NR_VMSCAN_THROTTLE = 4, }; enum zone_stat_item { NR_FREE_PAGES = 0, NR_ZONE_LRU_BASE = 1, NR_ZONE_INACTIVE_ANON = 1, NR_ZONE_ACTIVE_ANON = 2, NR_ZONE_INACTIVE_FILE = 3, NR_ZONE_ACTIVE_FILE = 4, NR_ZONE_UNEVICTABLE = 5, NR_ZONE_WRITE_PENDING = 6, NR_MLOCK = 7, NR_BOUNCE = 8, NR_ZSPAGES = 9, NR_FREE_CMA_PAGES = 10, NR_VM_ZONE_STAT_ITEMS = 11, }; enum wb_state { WB_registered = 0, WB_writeback_running = 1, WB_has_dirty_io = 2, WB_start_all = 3, }; enum page_memcg_data_flags { MEMCG_DATA_OBJCGS = 1, MEMCG_DATA_KMEM = 2, __NR_MEMCG_DATA_FLAGS = 4, }; struct dirty_throttle_control { struct wb_domain *dom; struct dirty_throttle_control *gdtc; struct bdi_writeback *wb; struct fprop_local_percpu *wb_completions; unsigned long avail; unsigned long dirty; unsigned long thresh; unsigned long bg_thresh; unsigned long wb_dirty; unsigned long wb_thresh; unsigned long wb_bg_thresh; unsigned long pos_ratio; }; struct wb_lock_cookie { bool locked; unsigned long flags; }; typedef int (*writepage_t)(struct folio *, struct writeback_control *, void *); typedef void (*btf_trace_mm_lru_insertion)(void *, struct folio *); typedef void (*btf_trace_mm_lru_activate)(void *, struct folio *); struct lru_rotate { local_lock_t lock; struct folio_batch fbatch; }; struct cpu_fbatches { local_lock_t lock; struct folio_batch lru_add; struct folio_batch lru_deactivate_file; struct folio_batch lru_deactivate; struct folio_batch lru_lazyfree; struct folio_batch activate; }; enum lru_list { LRU_INACTIVE_ANON = 0, LRU_ACTIVE_ANON = 1, LRU_INACTIVE_FILE = 2, LRU_ACTIVE_FILE = 3, LRU_UNEVICTABLE = 4, NR_LRU_LISTS = 5, }; enum { LRU_GEN_ANON = 0, LRU_GEN_FILE = 1, }; enum { LRU_GEN_CORE = 0, LRU_GEN_MM_WALK = 1, LRU_GEN_NONLEAF_YOUNG = 2, NR_LRU_GEN_CAPS = 3, }; struct trace_event_raw_mm_lru_insertion { struct trace_entry ent; struct folio *folio; unsigned long pfn; enum lru_list lru; unsigned long flags; char __data[0]; }; struct trace_event_raw_mm_lru_activate { struct trace_entry ent; struct folio *folio; unsigned long pfn; char __data[0]; }; typedef void (*move_fn_t)(struct lruvec *, struct folio *); struct trace_event_data_offsets_mm_lru_insertion {}; struct trace_event_data_offsets_mm_lru_activate {}; typedef union { struct page **pages; struct folio **folios; struct encoded_page **encoded_pages; } release_pages_arg; typedef void (*btf_trace_mm_vmscan_kswapd_sleep)(void *, int); typedef void (*btf_trace_mm_vmscan_kswapd_wake)(void *, int, int, int); typedef void (*btf_trace_mm_vmscan_wakeup_kswapd)(void *, int, int, int, gfp_t); typedef void (*btf_trace_mm_vmscan_direct_reclaim_begin)(void *, int, gfp_t); typedef void (*btf_trace_mm_vmscan_memcg_reclaim_begin)(void *, int, gfp_t); typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_begin)(void *, int, gfp_t); typedef void (*btf_trace_mm_vmscan_direct_reclaim_end)(void *, unsigned long); typedef void (*btf_trace_mm_vmscan_memcg_reclaim_end)(void *, unsigned long); typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_end)(void *, unsigned long); typedef void (*btf_trace_mm_shrink_slab_start)(void *, struct shrinker *, struct shrink_control *, long, unsigned long, unsigned long long, unsigned long, int); typedef void (*btf_trace_mm_shrink_slab_end)(void *, struct shrinker *, int, int, long, long, long); typedef unsigned int isolate_mode_t; typedef void (*btf_trace_mm_vmscan_lru_isolate)(void *, int, int, unsigned long, unsigned long, unsigned long, unsigned long, isolate_mode_t, int); typedef void (*btf_trace_mm_vmscan_write_folio)(void *, struct folio *); struct reclaim_stat; typedef void (*btf_trace_mm_vmscan_lru_shrink_inactive)(void *, int, unsigned long, unsigned long, struct reclaim_stat *, int, int); struct reclaim_stat { unsigned int nr_dirty; unsigned int nr_unqueued_dirty; unsigned int nr_congested; unsigned int nr_writeback; unsigned int nr_immediate; unsigned int nr_pageout; unsigned int nr_activate[2]; unsigned int nr_ref_keep; unsigned int nr_unmap_fail; unsigned int nr_lazyfree_fail; }; typedef void (*btf_trace_mm_vmscan_lru_shrink_active)(void *, int, unsigned long, unsigned long, unsigned long, unsigned long, int, int); typedef void (*btf_trace_mm_vmscan_node_reclaim_begin)(void *, int, int, gfp_t); typedef void (*btf_trace_mm_vmscan_node_reclaim_end)(void *, unsigned long); typedef void (*btf_trace_mm_vmscan_throttled)(void *, int, int, int, int); enum page_walk_lock { PGWALK_RDLOCK = 0, PGWALK_WRLOCK = 1, PGWALK_WRLOCK_VERIFY = 2, }; struct mm_walk; struct mm_walk_ops { int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); int (*p4d_entry)(p4d_t *, unsigned long, unsigned long, struct mm_walk *); int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *); int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_hole)(unsigned long, unsigned long, int, struct mm_walk *); int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, unsigned long, struct mm_walk *); int (*test_walk)(unsigned long, unsigned long, struct mm_walk *); int (*pre_vma)(unsigned long, unsigned long, struct mm_walk *); void (*post_vma)(struct mm_walk *); enum page_walk_lock walk_lock; }; enum page_walk_action { ACTION_SUBTREE = 0, ACTION_CONTINUE = 1, ACTION_AGAIN = 2, }; struct mm_walk { const struct mm_walk_ops *ops; struct mm_struct *mm; pgd_t *pgd; struct vm_area_struct *vma; enum page_walk_action action; bool no_vma; void *private; }; enum { MEMCG_LRU_NOP = 0, MEMCG_LRU_HEAD = 1, MEMCG_LRU_TAIL = 2, MEMCG_LRU_OLD = 3, MEMCG_LRU_YOUNG = 4, }; enum folio_references { FOLIOREF_RECLAIM = 0, FOLIOREF_RECLAIM_CLEAN = 1, FOLIOREF_KEEP = 2, FOLIOREF_ACTIVATE = 3, }; enum pgdat_flags { PGDAT_DIRTY = 0, PGDAT_WRITEBACK = 1, PGDAT_RECLAIM_LOCKED = 2, }; enum mthp_stat_item { MTHP_STAT_ANON_FAULT_ALLOC = 0, MTHP_STAT_ANON_FAULT_FALLBACK = 1, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE = 2, MTHP_STAT_SWPOUT = 3, MTHP_STAT_SWPOUT_FALLBACK = 4, __MTHP_STAT_COUNT = 5, }; enum ttu_flags { TTU_SPLIT_HUGE_PMD = 4, TTU_IGNORE_MLOCK = 8, TTU_SYNC = 16, TTU_HWPOISON = 32, TTU_BATCH_FLUSH = 64, TTU_RMAP_LOCKED = 128, }; enum { SWP_USED = 1, SWP_WRITEOK = 2, SWP_DISCARDABLE = 4, SWP_DISCARDING = 8, SWP_SOLIDSTATE = 16, SWP_CONTINUED = 32, SWP_BLKDEV = 64, SWP_ACTIVATED = 128, SWP_FS_OPS = 256, SWP_AREA_DISCARD = 512, SWP_PAGE_DISCARD = 1024, SWP_STABLE_WRITES = 2048, SWP_SYNCHRONOUS_IO = 4096, SWP_SCANNING = 16384, }; enum migrate_reason { MR_COMPACTION = 0, MR_MEMORY_FAILURE = 1, MR_MEMORY_HOTPLUG = 2, MR_SYSCALL = 3, MR_MEMPOLICY_MBIND = 4, MR_NUMA_MISPLACED = 5, MR_CONTIG_RANGE = 6, MR_LONGTERM_PIN = 7, MR_DEMOTION = 8, MR_TYPES = 9, }; enum { MM_LEAF_TOTAL = 0, MM_LEAF_OLD = 1, MM_LEAF_YOUNG = 2, MM_NONLEAF_TOTAL = 3, MM_NONLEAF_FOUND = 4, MM_NONLEAF_ADDED = 5, NR_MM_STATS = 6, }; enum migratetype { MIGRATE_UNMOVABLE = 0, MIGRATE_MOVABLE = 1, MIGRATE_RECLAIMABLE = 2, MIGRATE_FALLBACKS = 3, MIGRATE_CMA = 3, MIGRATE_PCPTYPES = 4, MIGRATE_HIGHATOMIC = 4, MIGRATE_ISOLATE = 5, MIGRATE_TYPES = 6, }; enum zone_watermarks { WMARK_MIN = 0, WMARK_LOW = 1, WMARK_HIGH = 2, WMARK_PROMO = 3, NR_WMARK = 4, }; enum lruvec_flags { LRUVEC_CGROUP_CONGESTED = 0, LRUVEC_NODE_CONGESTED = 1, }; enum scan_balance { SCAN_EQUAL = 0, SCAN_FRACT = 1, SCAN_ANON = 2, SCAN_FILE = 3, }; enum zone_flags { ZONE_BOOSTED_WATERMARK = 0, ZONE_RECLAIM_ACTIVE = 1, }; struct trace_event_raw_mm_vmscan_kswapd_sleep { struct trace_entry ent; int nid; char __data[0]; }; struct trace_event_raw_mm_vmscan_kswapd_wake { struct trace_entry ent; int nid; int zid; int order; char __data[0]; }; struct trace_event_raw_mm_vmscan_wakeup_kswapd { struct trace_entry ent; int nid; int zid; int order; unsigned long gfp_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template { struct trace_entry ent; int order; unsigned long gfp_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_direct_reclaim_end_template { struct trace_entry ent; unsigned long nr_reclaimed; char __data[0]; }; struct trace_event_raw_mm_shrink_slab_start { struct trace_entry ent; struct shrinker *shr; void *shrink; int nid; long nr_objects_to_shrink; unsigned long gfp_flags; unsigned long cache_items; unsigned long long delta; unsigned long total_scan; int priority; char __data[0]; }; struct trace_event_raw_mm_shrink_slab_end { struct trace_entry ent; struct shrinker *shr; int nid; void *shrink; long unused_scan; long new_scan; int retval; long total_scan; char __data[0]; }; struct trace_event_raw_mm_vmscan_lru_isolate { struct trace_entry ent; int highest_zoneidx; int order; unsigned long nr_requested; unsigned long nr_scanned; unsigned long nr_skipped; unsigned long nr_taken; unsigned int isolate_mode; int lru; char __data[0]; }; struct trace_event_raw_mm_vmscan_write_folio { struct trace_entry ent; unsigned long pfn; int reclaim_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_lru_shrink_inactive { struct trace_entry ent; int nid; unsigned long nr_scanned; unsigned long nr_reclaimed; unsigned long nr_dirty; unsigned long nr_writeback; unsigned long nr_congested; unsigned long nr_immediate; unsigned int nr_activate0; unsigned int nr_activate1; unsigned long nr_ref_keep; unsigned long nr_unmap_fail; int priority; int reclaim_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_lru_shrink_active { struct trace_entry ent; int nid; unsigned long nr_taken; unsigned long nr_active; unsigned long nr_deactivated; unsigned long nr_referenced; int priority; int reclaim_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_node_reclaim_begin { struct trace_entry ent; int nid; int order; unsigned long gfp_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_throttled { struct trace_entry ent; int nid; int usec_timeout; int usec_delayed; int reason; char __data[0]; }; struct migration_target_control { int nid; nodemask_t *nmask; gfp_t gfp_mask; }; struct scan_control { unsigned long nr_to_reclaim; nodemask_t *nodemask; struct mem_cgroup *target_mem_cgroup; unsigned long anon_cost; unsigned long file_cost; unsigned int may_deactivate: 2; unsigned int force_deactivate: 1; unsigned int skipped_deactivate: 1; unsigned int may_writepage: 1; unsigned int may_unmap: 1; unsigned int may_swap: 1; unsigned int proactive: 1; unsigned int memcg_low_reclaim: 1; unsigned int memcg_low_skipped: 1; unsigned int hibernation_mode: 1; unsigned int compaction_ready: 1; unsigned int cache_trim_mode: 1; unsigned int file_is_tiny: 1; unsigned int no_demotion: 1; s8 order; s8 priority; s8 reclaim_idx; gfp_t gfp_mask; unsigned long nr_scanned; unsigned long nr_reclaimed; struct { unsigned int dirty; unsigned int unqueued_dirty; unsigned int congested; unsigned int writeback; unsigned int immediate; unsigned int file_taken; unsigned int taken; } nr; struct reclaim_state reclaim_state; u64 android_vendor_data1; }; struct mem_cgroup_reclaim_cookie { pg_data_t *pgdat; unsigned int generation; }; struct ctrl_pos { unsigned long refaulted; unsigned long total; int gain; }; typedef enum { PAGE_KEEP = 0, PAGE_ACTIVATE = 1, PAGE_SUCCESS = 2, PAGE_CLEAN = 3, } pageout_t; struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {}; struct trace_event_data_offsets_mm_vmscan_kswapd_wake {}; struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {}; struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {}; struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {}; struct trace_event_data_offsets_mm_shrink_slab_start {}; struct trace_event_data_offsets_mm_shrink_slab_end {}; struct trace_event_data_offsets_mm_vmscan_lru_isolate {}; struct trace_event_data_offsets_mm_vmscan_write_folio {}; struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {}; struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {}; struct trace_event_data_offsets_mm_vmscan_node_reclaim_begin {}; struct trace_event_data_offsets_mm_vmscan_throttled {}; struct kstatfs { long f_type; long f_bsize; u64 f_blocks; u64 f_bfree; u64 f_bavail; u64 f_files; u64 f_ffree; __kernel_fsid_t f_fsid; long f_namelen; long f_frsize; long f_flags; long f_spare[4]; }; struct fileattr { u32 flags; u32 fsx_xflags; u32 fsx_extsize; u32 fsx_nextents; u32 fsx_projid; u32 fsx_cowextsize; bool flags_valid: 1; bool fsx_valid: 1; }; struct constant_table { const char *name; int value; }; enum sgp_type { SGP_READ = 0, SGP_NOALLOC = 1, SGP_CACHE = 2, SGP_WRITE = 3, SGP_FALLOC = 4, }; enum mfill_atomic_mode { MFILL_ATOMIC_COPY = 0, MFILL_ATOMIC_ZEROPAGE = 1, MFILL_ATOMIC_CONTINUE = 2, MFILL_ATOMIC_POISON = 3, NR_MFILL_ATOMIC_MODES = 4, }; enum shmem_param { Opt_gid = 0, Opt_huge = 1, Opt_mode = 2, Opt_mpol = 3, Opt_nr_blocks = 4, Opt_nr_inodes = 5, Opt_size = 6, Opt_uid = 7, Opt_inode32 = 8, Opt_inode64 = 9, Opt_noswap = 10, Opt_quota = 11, Opt_usrquota = 12, Opt_grpquota = 13, Opt_usrquota_block_hardlimit = 14, Opt_usrquota_inode_hardlimit = 15, Opt_grpquota_block_hardlimit = 16, Opt_grpquota_inode_hardlimit = 17, }; enum fid_type { FILEID_ROOT = 0, FILEID_INO32_GEN = 1, FILEID_INO32_GEN_PARENT = 2, FILEID_BTRFS_WITHOUT_PARENT = 77, FILEID_BTRFS_WITH_PARENT = 78, FILEID_BTRFS_WITH_PARENT_ROOT = 79, FILEID_UDF_WITHOUT_PARENT = 81, FILEID_UDF_WITH_PARENT = 82, FILEID_NILFS_WITHOUT_PARENT = 97, FILEID_NILFS_WITH_PARENT = 98, FILEID_FAT_WITHOUT_PARENT = 113, FILEID_FAT_WITH_PARENT = 114, FILEID_LUSTRE = 151, FILEID_KERNFS = 254, FILEID_INVALID = 255, }; enum { _DQUOT_USAGE_ENABLED = 0, _DQUOT_LIMITS_ENABLED = 1, _DQUOT_SUSPENDED = 2, _DQUOT_STATE_FLAGS = 3, }; struct shared_policy {}; struct simple_xattrs { struct rb_root rb_root; rwlock_t lock; }; struct shmem_inode_info { spinlock_t lock; unsigned int seals; unsigned long flags; unsigned long alloced; unsigned long swapped; unsigned long fallocend; struct list_head shrinklist; struct list_head swaplist; struct shared_policy policy; struct simple_xattrs xattrs; atomic_t stop_eviction; struct timespec64 i_crtime; unsigned int fsflags; struct offset_ctx dir_offsets; struct inode vfs_inode; u64 android_vendor_data1; }; typedef unsigned int uffd_flags_t; struct shmem_quota_limits { qsize_t usrquota_bhardlimit; qsize_t usrquota_ihardlimit; qsize_t grpquota_bhardlimit; qsize_t grpquota_ihardlimit; }; struct mempolicy; struct shmem_sb_info { unsigned long max_blocks; struct percpu_counter used_blocks; unsigned long max_inodes; unsigned long free_ispace; raw_spinlock_t stat_lock; umode_t mode; unsigned char huge; kuid_t uid; kgid_t gid; bool full_inums; bool noswap; ino_t next_ino; ino_t __attribute__((btf_type_tag("percpu"))) *ino_batch; struct mempolicy *mpol; spinlock_t shrinklist_lock; struct list_head shrinklist; unsigned long shrinklist_len; struct shmem_quota_limits qlimits; }; struct mempolicy {}; typedef int (*initxattrs)(struct inode *, const struct xattr *, void *); struct shmem_falloc { wait_queue_head_t *waitq; unsigned long start; unsigned long next; unsigned long nr_falloced; unsigned long nr_unswapped; }; struct shmem_options { unsigned long long blocks; unsigned long long inodes; struct mempolicy *mpol; kuid_t uid; kgid_t gid; umode_t mode; bool full_inums; int huge; int seen; bool noswap; unsigned short quota_types; struct shmem_quota_limits qlimits; }; struct vm_event_state { unsigned long event[103]; }; enum writeback_stat_item { NR_DIRTY_THRESHOLD = 0, NR_DIRTY_BG_THRESHOLD = 1, NR_VM_WRITEBACK_STAT_ITEMS = 2, }; struct contig_page_info { unsigned long free_pages; unsigned long free_blocks_total; unsigned long free_blocks_suitable; }; enum { RADIX_TREE_ITER_TAG_MASK = 15, RADIX_TREE_ITER_TAGGED = 16, RADIX_TREE_ITER_CONTIG = 32, }; struct radix_tree_iter { unsigned long index; unsigned long next_index; unsigned long tags; struct xa_node *node; }; enum mminit_level { MMINIT_WARNING = 0, MMINIT_VERIFY = 1, MMINIT_TRACE = 2, }; enum meminit_context { MEMINIT_EARLY = 0, MEMINIT_HOTPLUG = 1, }; typedef void (*btf_trace_percpu_alloc_percpu)(void *, unsigned long, bool, bool, size_t, size_t, void *, int, void __attribute__((btf_type_tag("percpu"))) *, size_t, gfp_t); typedef void (*btf_trace_percpu_free_percpu)(void *, void *, int, void __attribute__((btf_type_tag("percpu"))) *); typedef void (*btf_trace_percpu_alloc_percpu_fail)(void *, bool, bool, size_t, size_t); typedef void (*btf_trace_percpu_create_chunk)(void *, void *); typedef void (*btf_trace_percpu_destroy_chunk)(void *, void *); enum pcpu_fc { PCPU_FC_AUTO = 0, PCPU_FC_EMBED = 1, PCPU_FC_PAGE = 2, PCPU_FC_NR = 3, }; struct pcpu_block_md { int scan_hint; int scan_hint_start; int contig_hint; int contig_hint_start; int left_free; int right_free; int first_free; int nr_bits; }; struct pcpu_chunk { struct list_head list; int free_bytes; struct pcpu_block_md chunk_md; unsigned long *bound_map; void *base_addr; unsigned long *alloc_map; struct pcpu_block_md *md_blocks; void *data; bool immutable; bool isolated; int start_offset; int end_offset; struct obj_cgroup **obj_cgroups; int nr_pages; int nr_populated; int nr_empty_pop_pages; unsigned long populated[0]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; enum memcg_stat_item { MEMCG_SWAP = 42, MEMCG_SOCK = 43, MEMCG_PERCPU_B = 44, MEMCG_VMALLOC = 45, MEMCG_KMEM = 46, MEMCG_ZSWAP_B = 47, MEMCG_ZSWAPPED = 48, MEMCG_NR_STAT = 49, }; struct trace_event_raw_percpu_alloc_percpu { struct trace_entry ent; unsigned long call_site; bool reserved; bool is_atomic; size_t size; size_t align; void *base_addr; int off; void __attribute__((btf_type_tag("percpu"))) *ptr; size_t bytes_alloc; unsigned long gfp_flags; char __data[0]; }; struct trace_event_raw_percpu_free_percpu { struct trace_entry ent; void *base_addr; int off; void __attribute__((btf_type_tag("percpu"))) *ptr; char __data[0]; }; struct trace_event_raw_percpu_alloc_percpu_fail { struct trace_entry ent; bool reserved; bool is_atomic; size_t size; size_t align; char __data[0]; }; struct trace_event_raw_percpu_create_chunk { struct trace_entry ent; void *base_addr; char __data[0]; }; struct trace_event_raw_percpu_destroy_chunk { struct trace_entry ent; void *base_addr; char __data[0]; }; typedef int pcpu_fc_cpu_to_node_fn_t(int); struct pcpu_group_info { int nr_units; unsigned long base_offset; unsigned int *cpu_map; }; struct pcpu_alloc_info { size_t static_size; size_t reserved_size; size_t dyn_size; size_t unit_size; size_t atom_size; size_t alloc_size; size_t __ai_size; int nr_groups; struct pcpu_group_info groups[0]; }; struct trace_event_data_offsets_percpu_alloc_percpu {}; struct trace_event_data_offsets_percpu_free_percpu {}; struct trace_event_data_offsets_percpu_alloc_percpu_fail {}; struct trace_event_data_offsets_percpu_create_chunk {}; struct trace_event_data_offsets_percpu_destroy_chunk {}; typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int); typedef void (*btf_trace_kmem_cache_alloc)(void *, unsigned long, const void *, struct kmem_cache *, gfp_t, int); typedef void (*btf_trace_kmalloc)(void *, unsigned long, const void *, size_t, size_t, gfp_t, int); typedef void (*btf_trace_kfree)(void *, unsigned long, const void *); typedef void (*btf_trace_kmem_cache_free)(void *, unsigned long, const void *, const struct kmem_cache *); typedef void (*btf_trace_mm_page_free)(void *, struct page *, unsigned int); typedef void (*btf_trace_mm_page_free_batched)(void *, struct page *); typedef void (*btf_trace_mm_page_alloc)(void *, struct page *, unsigned int, gfp_t, int); typedef void (*btf_trace_mm_page_alloc_zone_locked)(void *, struct page *, unsigned int, int, int); typedef void (*btf_trace_mm_page_pcpu_drain)(void *, struct page *, unsigned int, int); typedef void (*btf_trace_mm_page_alloc_extfrag)(void *, struct page *, int, int, int, int); typedef void (*btf_trace_rss_stat)(void *, struct mm_struct *, int); struct kmalloc_info_struct { const char *name[3]; unsigned int size; }; enum slab_state { DOWN = 0, PARTIAL = 1, PARTIAL_NODE = 2, UP = 3, FULL = 4, }; struct trace_event_raw_kmem_cache_alloc { struct trace_entry ent; unsigned long call_site; const void *ptr; size_t bytes_req; size_t bytes_alloc; unsigned long gfp_flags; int node; bool accounted; char __data[0]; }; struct trace_event_raw_kmalloc { struct trace_entry ent; unsigned long call_site; const void *ptr; size_t bytes_req; size_t bytes_alloc; unsigned long gfp_flags; int node; char __data[0]; }; struct trace_event_raw_kfree { struct trace_entry ent; unsigned long call_site; const void *ptr; char __data[0]; }; struct trace_event_raw_kmem_cache_free { struct trace_entry ent; unsigned long call_site; const void *ptr; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_mm_page_free { struct trace_entry ent; unsigned long pfn; unsigned int order; char __data[0]; }; struct trace_event_raw_mm_page_free_batched { struct trace_entry ent; unsigned long pfn; char __data[0]; }; struct trace_event_raw_mm_page_alloc { struct trace_entry ent; unsigned long pfn; unsigned int order; unsigned long gfp_flags; int migratetype; char __data[0]; }; struct trace_event_raw_mm_page { struct trace_entry ent; unsigned long pfn; unsigned int order; int migratetype; int percpu_refill; char __data[0]; }; struct trace_event_raw_mm_page_pcpu_drain { struct trace_entry ent; unsigned long pfn; unsigned int order; int migratetype; char __data[0]; }; struct trace_event_raw_mm_page_alloc_extfrag { struct trace_entry ent; unsigned long pfn; int alloc_order; int fallback_order; int alloc_migratetype; int fallback_migratetype; int change_ownership; char __data[0]; }; struct trace_event_raw_rss_stat { struct trace_entry ent; unsigned int mm_id; unsigned int curr; int member; long size; char __data[0]; }; struct trace_event_data_offsets_kmem_cache_free { u32 name; }; struct kmem_obj_info { void *kp_ptr; struct slab *kp_slab; void *kp_objp; unsigned long kp_data_offset; struct kmem_cache *kp_slab_cache; void *kp_ret; void *kp_stack[16]; void *kp_free_stack[16]; }; struct slabinfo { unsigned long active_objs; unsigned long num_objs; unsigned long active_slabs; unsigned long num_slabs; unsigned long shared_avail; unsigned int limit; unsigned int batchcount; unsigned int shared; unsigned int objects_per_slab; unsigned int cache_order; }; struct trace_event_data_offsets_kmem_cache_alloc {}; struct trace_event_data_offsets_kmalloc {}; struct trace_event_data_offsets_kfree {}; struct trace_event_data_offsets_mm_page_free {}; struct trace_event_data_offsets_mm_page_free_batched {}; struct trace_event_data_offsets_mm_page_alloc {}; struct trace_event_data_offsets_mm_page {}; struct trace_event_data_offsets_mm_page_pcpu_drain {}; struct trace_event_data_offsets_mm_page_alloc_extfrag {}; struct trace_event_data_offsets_rss_stat {}; typedef void (*btf_trace_mm_compaction_isolate_migratepages)(void *, unsigned long, unsigned long, unsigned long, unsigned long); typedef void (*btf_trace_mm_compaction_isolate_freepages)(void *, unsigned long, unsigned long, unsigned long, unsigned long); typedef void (*btf_trace_mm_compaction_fast_isolate_freepages)(void *, unsigned long, unsigned long, unsigned long, unsigned long); typedef void (*btf_trace_mm_compaction_migratepages)(void *, struct compact_control *, unsigned int); typedef void (*btf_trace_mm_compaction_begin)(void *, struct compact_control *, unsigned long, unsigned long, bool); typedef void (*btf_trace_mm_compaction_end)(void *, struct compact_control *, unsigned long, unsigned long, bool, int); typedef void (*btf_trace_mm_compaction_try_to_compact_pages)(void *, int, gfp_t, int); typedef void (*btf_trace_mm_compaction_finished)(void *, struct zone *, int, int); typedef void (*btf_trace_mm_compaction_suitable)(void *, struct zone *, int, int); typedef void (*btf_trace_mm_compaction_deferred)(void *, struct zone *, int); typedef void (*btf_trace_mm_compaction_defer_compaction)(void *, struct zone *, int); typedef void (*btf_trace_mm_compaction_defer_reset)(void *, struct zone *, int); typedef void (*btf_trace_mm_compaction_kcompactd_sleep)(void *, int); typedef void (*btf_trace_mm_compaction_wakeup_kcompactd)(void *, int, int, enum zone_type); typedef void (*btf_trace_mm_compaction_kcompactd_wake)(void *, int, int, enum zone_type); enum pageblock_bits { PB_migrate = 0, PB_migrate_end = 2, PB_migrate_skip = 3, NR_PAGEBLOCK_BITS = 4, }; struct trace_event_raw_mm_compaction_isolate_template { struct trace_entry ent; unsigned long start_pfn; unsigned long end_pfn; unsigned long nr_scanned; unsigned long nr_taken; char __data[0]; }; struct trace_event_raw_mm_compaction_migratepages { struct trace_entry ent; unsigned long nr_migrated; unsigned long nr_failed; char __data[0]; }; struct trace_event_raw_mm_compaction_begin { struct trace_entry ent; unsigned long zone_start; unsigned long migrate_pfn; unsigned long free_pfn; unsigned long zone_end; bool sync; char __data[0]; }; struct trace_event_raw_mm_compaction_end { struct trace_entry ent; unsigned long zone_start; unsigned long migrate_pfn; unsigned long free_pfn; unsigned long zone_end; bool sync; int status; char __data[0]; }; struct trace_event_raw_mm_compaction_try_to_compact_pages { struct trace_entry ent; int order; unsigned long gfp_mask; int prio; char __data[0]; }; struct trace_event_raw_mm_compaction_suitable_template { struct trace_entry ent; int nid; enum zone_type idx; int order; int ret; char __data[0]; }; struct trace_event_raw_mm_compaction_defer_template { struct trace_entry ent; int nid; enum zone_type idx; int order; unsigned int considered; unsigned int defer_shift; int order_failed; char __data[0]; }; struct trace_event_raw_mm_compaction_kcompactd_sleep { struct trace_entry ent; int nid; char __data[0]; }; struct trace_event_raw_kcompactd_wake_template { struct trace_entry ent; int nid; int order; enum zone_type highest_zoneidx; char __data[0]; }; struct movable_operations { bool (*isolate_page)(struct page *, isolate_mode_t); int (*migrate_page)(struct page *, struct page *, enum migrate_mode); void (*putback_page)(struct page *); }; typedef enum { ISOLATE_ABORT = 0, ISOLATE_NONE = 1, ISOLATE_SUCCESS = 2, } isolate_migrate_t; typedef struct folio *new_folio_t(struct folio *, unsigned long); typedef void free_folio_t(struct folio *, unsigned long); struct trace_event_data_offsets_mm_compaction_isolate_template {}; struct trace_event_data_offsets_mm_compaction_migratepages {}; struct trace_event_data_offsets_mm_compaction_begin {}; struct trace_event_data_offsets_mm_compaction_end {}; struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {}; struct trace_event_data_offsets_mm_compaction_suitable_template {}; struct trace_event_data_offsets_mm_compaction_defer_template {}; struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {}; struct trace_event_data_offsets_kcompactd_wake_template {}; struct alloc_context { struct zonelist *zonelist; nodemask_t *nodemask; struct zoneref *preferred_zoneref; int migratetype; enum zone_type highest_zoneidx; bool spread_dirty_pages; }; struct anon_vma_chain { struct vm_area_struct *vma; struct anon_vma *anon_vma; struct list_head same_vma; struct rb_node rb; unsigned long rb_subtree_last; }; enum lru_status { LRU_REMOVED = 0, LRU_REMOVED_RETRY = 1, LRU_ROTATE = 2, LRU_SKIP = 3, LRU_RETRY = 4, }; struct list_lru_memcg { struct callback_head rcu; struct list_lru_one node[0]; }; typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, spinlock_t *, void *); struct list_lru_memcg_table { struct list_lru_memcg *mlru; struct mem_cgroup *memcg; }; enum { FOLL_TOUCH = 65536, FOLL_TRIED = 131072, FOLL_REMOTE = 262144, FOLL_PIN = 524288, FOLL_FAST_ONLY = 1048576, FOLL_UNLOCKABLE = 2097152, FOLL_MADV_POPULATE = 4194304, }; struct follow_page_context { struct dev_pagemap *pgmap; unsigned int page_mask; }; typedef void (*btf_trace_mmap_lock_start_locking)(void *, struct mm_struct *, const char *, bool); typedef void (*btf_trace_mmap_lock_released)(void *, struct mm_struct *, const char *, bool); typedef void (*btf_trace_mmap_lock_acquire_returned)(void *, struct mm_struct *, const char *, bool, bool); struct memcg_path { local_lock_t lock; char __attribute__((btf_type_tag("rcu"))) *buf; local_t buf_idx; }; struct trace_event_raw_mmap_lock { struct trace_entry ent; struct mm_struct *mm; u32 __data_loc_memcg_path; bool write; char __data[0]; }; struct trace_event_raw_mmap_lock_acquire_returned { struct trace_entry ent; struct mm_struct *mm; u32 __data_loc_memcg_path; bool write; bool success; char __data[0]; }; struct trace_event_data_offsets_mmap_lock { u32 memcg_path; }; struct trace_event_data_offsets_mmap_lock_acquire_returned { u32 memcg_path; }; typedef int (*show_pad_smaps_fn)(struct seq_file *, void *); typedef void (*show_pad_maps_fn)(struct seq_file *, struct vm_area_struct *); enum rmap_level { RMAP_LEVEL_PTE = 0, RMAP_LEVEL_PMD = 1, }; enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_UNSUPPORTED = 0, TRANSPARENT_HUGEPAGE_FLAG = 1, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG = 2, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG = 3, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG = 4, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG = 5, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG = 6, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG = 7, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG = 8, }; typedef int rmap_t; typedef int fpb_t; typedef unsigned long pte_marker; typedef struct { u64 val; } pfn_t; typedef unsigned int pgtbl_mod_mask; struct copy_subpage_arg { struct page *dst; struct page *src; struct vm_area_struct *vma; }; struct mlock_fbatch { local_lock_t lock; struct folio_batch fbatch; }; struct vm_unmapped_area_info; typedef void (*btf_trace_vm_unmapped_area)(void *, unsigned long, struct vm_unmapped_area_info *); struct vm_unmapped_area_info { unsigned long flags; unsigned long length; unsigned long low_limit; unsigned long high_limit; unsigned long align_mask; unsigned long align_offset; }; typedef void (*btf_trace_vma_mas_szero)(void *, struct maple_tree *, unsigned long, unsigned long); typedef void (*btf_trace_vma_store)(void *, struct maple_tree *, struct vm_area_struct *); typedef void (*btf_trace_exit_mmap)(void *, struct mm_struct *); enum { HUGETLB_SHMFS_INODE = 1, HUGETLB_ANONHUGE_INODE = 2, }; struct trace_event_raw_vm_unmapped_area { struct trace_entry ent; unsigned long addr; unsigned long total_vm; unsigned long flags; unsigned long length; unsigned long low_limit; unsigned long high_limit; unsigned long align_mask; unsigned long align_offset; char __data[0]; }; struct trace_event_raw_vma_mas_szero { struct trace_entry ent; struct maple_tree *mt; unsigned long start; unsigned long end; char __data[0]; }; struct trace_event_raw_vma_store { struct trace_entry ent; struct maple_tree *mt; struct vm_area_struct *vma; unsigned long vm_start; unsigned long vm_end; char __data[0]; }; struct trace_event_raw_exit_mmap { struct trace_entry ent; struct mm_struct *mm; struct maple_tree *mt; char __data[0]; }; struct vma_prepare { struct vm_area_struct *vma; struct vm_area_struct *adj_next; struct file *file; struct address_space *mapping; struct anon_vma *anon_vma; struct vm_area_struct *insert; struct vm_area_struct *remove; struct vm_area_struct *remove2; }; struct trace_event_data_offsets_vm_unmapped_area {}; struct trace_event_data_offsets_vma_mas_szero {}; struct trace_event_data_offsets_vma_store {}; struct trace_event_data_offsets_exit_mmap {}; enum pgt_entry { NORMAL_PMD = 0, HPAGE_PMD = 1, NORMAL_PUD = 2, HPAGE_PUD = 3, }; struct hstate {}; typedef void (*btf_trace_tlb_flush)(void *, int, unsigned long); typedef void (*btf_trace_mm_migrate_pages)(void *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, enum migrate_mode, int); typedef void (*btf_trace_mm_migrate_pages_start)(void *, enum migrate_mode, int); typedef void (*btf_trace_set_migration_pte)(void *, unsigned long, unsigned long, int); typedef void (*btf_trace_remove_migration_pte)(void *, unsigned long, unsigned long, int); struct trace_event_raw_tlb_flush { struct trace_entry ent; int reason; unsigned long pages; char __data[0]; }; struct trace_event_raw_mm_migrate_pages { struct trace_entry ent; unsigned long succeeded; unsigned long failed; unsigned long thp_succeeded; unsigned long thp_failed; unsigned long thp_split; enum migrate_mode mode; int reason; char __data[0]; }; struct trace_event_raw_mm_migrate_pages_start { struct trace_entry ent; enum migrate_mode mode; int reason; char __data[0]; }; struct trace_event_raw_migration_pte { struct trace_entry ent; unsigned long addr; unsigned long pte; int order; char __data[0]; }; struct rmap_walk_control { void *arg; bool try_lock; bool contended; bool (*rmap_one)(struct folio *, struct vm_area_struct *, unsigned long, void *); int (*done)(struct folio *); struct anon_vma * (*anon_lock)(struct folio *, struct rmap_walk_control *); bool (*invalid_vma)(struct vm_area_struct *, void *); }; struct trace_event_data_offsets_tlb_flush {}; struct trace_event_data_offsets_mm_migrate_pages {}; struct trace_event_data_offsets_mm_migrate_pages_start {}; struct trace_event_data_offsets_migration_pte {}; struct folio_referenced_arg { int mapcount; int referenced; unsigned long vm_flags; struct mem_cgroup *memcg; }; typedef void (*btf_trace_alloc_vmap_area)(void *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, int); typedef void (*btf_trace_purge_vmap_area_lazy)(void *, unsigned long, unsigned long, unsigned int); typedef void (*btf_trace_free_vmap_area_noflush)(void *, unsigned long, unsigned long, unsigned long); struct vfree_deferred { struct llist_head list; struct work_struct wq; }; struct vmap_block_queue { spinlock_t lock; struct list_head free; struct xarray vmap_blocks; }; struct vmap_area { unsigned long va_start; unsigned long va_end; struct rb_node rb_node; struct list_head list; union { unsigned long subtree_max_size; struct vm_struct *vm; }; unsigned long flags; }; enum fit_type { NOTHING_FIT = 0, FL_FIT_TYPE = 1, LE_FIT_TYPE = 2, RE_FIT_TYPE = 3, NE_FIT_TYPE = 4, }; struct trace_event_raw_alloc_vmap_area { struct trace_entry ent; unsigned long addr; unsigned long size; unsigned long align; unsigned long vstart; unsigned long vend; int failed; char __data[0]; }; struct trace_event_raw_purge_vmap_area_lazy { struct trace_entry ent; unsigned long start; unsigned long end; unsigned int npurged; char __data[0]; }; struct trace_event_raw_free_vmap_area_noflush { struct trace_entry ent; unsigned long va_start; unsigned long nr_lazy; unsigned long nr_lazy_max; char __data[0]; }; struct vmap_block { spinlock_t lock; struct vmap_area *va; unsigned long free; unsigned long dirty; unsigned long used_map[16]; unsigned long dirty_min; unsigned long dirty_max; struct list_head free_list; struct callback_head callback_head; struct list_head purge; unsigned int cpu; }; struct trace_event_data_offsets_alloc_vmap_area {}; struct trace_event_data_offsets_purge_vmap_area_lazy {}; struct trace_event_data_offsets_free_vmap_area_noflush {}; typedef int fpi_t; struct va_format { const char *fmt; va_list *va; }; struct page_frag_cache { void *va; __u16 offset; __u16 size; unsigned int pagecnt_bias; bool pfmemalloc; }; struct memblock { bool bottom_up; phys_addr_t current_limit; struct memblock_type memory; struct memblock_type reserved; }; enum memblock_memsize_state { MEMBLOCK_MEMSIZE_NONE = 0, MEMBLOCK_MEMSIZE_DEBUGFS = 1, MEMBLOCK_MEMSIZE_PROCFS = 2, }; struct memsize_rgn_struct { phys_addr_t base; long size; bool nomap; bool reusable; char name[100]; }; typedef void (*online_page_callback_t)(struct page *, unsigned int); enum { MMOP_OFFLINE = 0, MMOP_ONLINE = 1, MMOP_ONLINE_KERNEL = 2, MMOP_ONLINE_MOVABLE = 3, }; enum { ONLINE_POLICY_CONTIG_ZONES = 0, ONLINE_POLICY_AUTO_MOVABLE = 1, }; enum { MEMMAP_ON_MEMORY_DISABLE = 0, MEMMAP_ON_MEMORY_ENABLE = 1, MEMMAP_ON_MEMORY_FORCE = 2, }; typedef int mhp_t; struct memory_group; struct memory_block { unsigned long start_section_nr; unsigned long state; int online_type; int nid; struct zone *zone; struct device dev; struct vmem_altmap *altmap; struct memory_group *group; struct list_head group_next; }; struct memory_group { int nid; struct list_head memory_blocks; unsigned long present_kernel_pages; unsigned long present_movable_pages; bool is_dynamic; union { struct { unsigned long max_pages; } s; struct { unsigned long unit_pages; } d; }; }; struct auto_movable_group_stats { unsigned long movable_pages; unsigned long req_kernel_early_pages; }; struct auto_movable_stats { unsigned long kernel_early_pages; unsigned long movable_pages; }; typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *); typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); enum iter_type { ITER_IOVEC = 0, ITER_KVEC = 1, ITER_BVEC = 2, ITER_XARRAY = 3, ITER_DISCARD = 4, ITER_UBUF = 5, }; struct madvise_walk_private { struct mmu_gather *tlb; bool pageout; }; enum { BIO_PAGE_PINNED = 0, BIO_CLONED = 1, BIO_BOUNCED = 2, BIO_QUIET = 3, BIO_CHAIN = 4, BIO_REFFED = 5, BIO_BPS_THROTTLED = 6, BIO_TRACE_COMPLETION = 7, BIO_CGROUP_ACCT = 8, BIO_QOS_THROTTLED = 9, BIO_QOS_MERGED = 10, BIO_REMAPPED = 11, BIO_ZONE_WRITE_LOCKED = 12, BIO_FLAG_LAST = 13, }; struct swap_iocb { struct kiocb iocb; struct bio_vec bvec[32]; int pages; int len; }; struct vma_swap_readahead { unsigned short win; unsigned short offset; unsigned short nr_pte; }; enum { PERCPU_REF_INIT_ATOMIC = 1, PERCPU_REF_INIT_DEAD = 2, PERCPU_REF_ALLOW_REINIT = 4, }; struct swap_extent { struct rb_node rb_node; unsigned long start_page; unsigned long nr_pages; sector_t start_block; }; union swap_header { struct { char reserved[4086]; char magic[10]; } magic; struct { char bootbits[1024]; __u32 version; __u32 last_page; __u32 nr_badpages; unsigned char sws_uuid[16]; unsigned char sws_volume[16]; __u32 padding[117]; __u32 badpages[1]; } info; }; struct swap_slots_cache { bool lock_initialized; struct mutex alloc_lock; swp_entry_t *slots; int nr; int cur; spinlock_t free_lock; swp_entry_t *slots_ret; int n_ret; }; struct dma_page { struct list_head page_list; void *vaddr; dma_addr_t dma; }; struct dma_block; struct dma_pool { struct list_head page_list; spinlock_t lock; struct dma_block *next_block; size_t nr_blocks; size_t nr_active; size_t nr_pages; struct device *dev; unsigned int size; unsigned int allocation; unsigned int boundary; char name[32]; struct list_head pools; }; struct dma_block { struct dma_block *next_block; dma_addr_t dma; }; struct mmu_notifier_subscriptions { struct hlist_head list; bool has_itree; spinlock_t lock; unsigned long invalidate_seq; unsigned long active_invalidate_ranges; struct rb_root_cached itree; wait_queue_head_t wq; struct hlist_head deferred_list; }; struct mmu_interval_notifier_ops; struct mmu_interval_notifier { struct interval_tree_node interval_tree; const struct mmu_interval_notifier_ops *ops; struct mm_struct *mm; struct hlist_node deferred_item; unsigned long invalidate_seq; }; struct mmu_interval_notifier_ops { bool (*invalidate)(struct mmu_interval_notifier *, const struct mmu_notifier_range *, unsigned long); }; struct slub_flush_work { struct work_struct work; struct kmem_cache *s; bool skip; }; struct slab_attribute { struct attribute attr; ssize_t (*show)(struct kmem_cache *, char *); ssize_t (*store)(struct kmem_cache *, const char *, size_t); }; struct saved_alias { struct kmem_cache *s; const char *name; struct saved_alias *next; }; enum track_item { TRACK_ALLOC = 0, TRACK_FREE = 1, }; enum stat_item { ALLOC_FASTPATH = 0, ALLOC_SLOWPATH = 1, FREE_FASTPATH = 2, FREE_SLOWPATH = 3, FREE_FROZEN = 4, FREE_ADD_PARTIAL = 5, FREE_REMOVE_PARTIAL = 6, ALLOC_FROM_PARTIAL = 7, ALLOC_SLAB = 8, ALLOC_REFILL = 9, ALLOC_NODE_MISMATCH = 10, FREE_SLAB = 11, CPUSLAB_FLUSH = 12, DEACTIVATE_FULL = 13, DEACTIVATE_EMPTY = 14, DEACTIVATE_TO_HEAD = 15, DEACTIVATE_TO_TAIL = 16, DEACTIVATE_REMOTE_FREES = 17, DEACTIVATE_BYPASS = 18, ORDER_FALLBACK = 19, CMPXCHG_DOUBLE_CPU_FAIL = 20, CMPXCHG_DOUBLE_FAIL = 21, CPU_PARTIAL_ALLOC = 22, CPU_PARTIAL_FREE = 23, CPU_PARTIAL_NODE = 24, CPU_PARTIAL_DRAIN = 25, NR_SLUB_STAT_ITEMS = 26, }; enum slab_modes { M_NONE = 0, M_PARTIAL = 1, M_FREE = 2, M_FULL_NOLIST = 3, }; enum slab_stat_type { SL_ALL = 0, SL_PARTIAL = 1, SL_CPU = 2, SL_OBJECTS = 3, SL_TOTAL = 4, }; typedef struct { unsigned long v; } freeptr_t; struct kunit_resource; typedef void (*kunit_resource_free_t)(struct kunit_resource *); struct kunit_resource { void *data; const char *name; kunit_resource_free_t free; struct kref refcount; struct list_head node; bool should_kfree; }; typedef u32 depot_stack_handle_t; struct location { depot_stack_handle_t handle; unsigned long count; unsigned long addr; unsigned long waste; long long sum_time; long min_time; long max_time; long min_pid; long max_pid; unsigned long cpus[1]; nodemask_t nodes; }; struct track { unsigned long addr; depot_stack_handle_t handle; int cpu; int pid; unsigned long when; }; typedef freelist_full_t pcp_op_T__; union __u128_halves { u128 full; struct { u64 low; u64 high; }; }; struct detached_freelist { struct slab *slab; void *tail; void *freelist; int cnt; struct kmem_cache *s; }; struct partial_context { struct slab **slab; gfp_t flags; unsigned int orig_size; }; typedef bool (*kunit_resource_match_t)(struct kunit *, struct kunit_resource *, void *); struct loc_track { unsigned long max; unsigned long count; struct location *loc; loff_t idx; }; enum kasan_report_type { KASAN_REPORT_ACCESS = 0, KASAN_REPORT_INVALID_FREE = 1, KASAN_REPORT_DOUBLE_FREE = 2, }; struct kasan_track { u32 pid; depot_stack_handle_t stack; }; enum kasan_arg_fault { KASAN_ARG_FAULT_DEFAULT = 0, KASAN_ARG_FAULT_REPORT = 1, KASAN_ARG_FAULT_PANIC = 2, KASAN_ARG_FAULT_PANIC_ON_WRITE = 3, }; struct kasan_report_info { enum kasan_report_type type; const void *access_addr; size_t access_size; bool is_write; unsigned long ip; const void *first_bad_addr; struct kmem_cache *cache; void *object; size_t alloc_size; const char *bug_type; struct kasan_track alloc_track; struct kasan_track free_track; }; enum kasan_arg { KASAN_ARG_DEFAULT = 0, KASAN_ARG_OFF = 1, KASAN_ARG_ON = 2, }; enum kasan_arg_mode { KASAN_ARG_MODE_DEFAULT = 0, KASAN_ARG_MODE_SYNC = 1, KASAN_ARG_MODE_ASYNC = 2, KASAN_ARG_MODE_ASYMM = 3, }; enum kasan_arg_vmalloc { KASAN_ARG_VMALLOC_DEFAULT = 0, KASAN_ARG_VMALLOC_OFF = 1, KASAN_ARG_VMALLOC_ON = 2, }; enum kasan_mode { KASAN_MODE_SYNC = 0, KASAN_MODE_ASYNC = 1, KASAN_MODE_ASYMM = 2, }; struct kasan_stack_ring_entry; struct kasan_stack_ring { rwlock_t lock; size_t size; atomic64_t pos; struct kasan_stack_ring_entry *entries; }; struct kasan_stack_ring_entry { void *ptr; size_t size; u32 pid; depot_stack_handle_t stack; bool is_free; }; enum kasan_arg_stacktrace { KASAN_ARG_STACKTRACE_DEFAULT = 0, KASAN_ARG_STACKTRACE_OFF = 1, KASAN_ARG_STACKTRACE_ON = 2, }; enum kfence_object_state { KFENCE_OBJECT_UNUSED = 0, KFENCE_OBJECT_ALLOCATED = 1, KFENCE_OBJECT_FREED = 2, }; struct kfence_track { pid_t pid; int cpu; u64 ts_nsec; int num_stack_entries; unsigned long stack_entries[64]; }; struct kfence_metadata { struct list_head list; struct callback_head callback_head; raw_spinlock_t lock; enum kfence_object_state state; unsigned long addr; size_t size; struct kmem_cache *cache; unsigned long unprotected_page; struct kfence_track alloc_track; struct kfence_track free_track; u32 alloc_stack_hash; struct obj_cgroup *objcg; }; enum kfence_counter_id { KFENCE_COUNTER_ALLOCATED = 0, KFENCE_COUNTER_ALLOCS = 1, KFENCE_COUNTER_FREES = 2, KFENCE_COUNTER_ZOMBIES = 3, KFENCE_COUNTER_BUGS = 4, KFENCE_COUNTER_SKIP_INCOMPAT = 5, KFENCE_COUNTER_SKIP_CAPACITY = 6, KFENCE_COUNTER_SKIP_COVERED = 7, KFENCE_COUNTER_COUNT = 8, }; enum kfence_error_type { KFENCE_ERROR_OOB = 0, KFENCE_ERROR_UAF = 1, KFENCE_ERROR_CORRUPTION = 2, KFENCE_ERROR_INVALID = 3, KFENCE_ERROR_INVALID_FREE = 4, }; struct kcsan_scoped_access {}; typedef __kernel_long_t __kernel_ptrdiff_t; typedef __kernel_ptrdiff_t ptrdiff_t; enum bh_state_bits { BH_Uptodate = 0, BH_Dirty = 1, BH_Lock = 2, BH_Req = 3, BH_Mapped = 4, BH_New = 5, BH_Async_Read = 6, BH_Async_Write = 7, BH_Delay = 8, BH_Boundary = 9, BH_Write_EIO = 10, BH_Unwritten = 11, BH_Quiet = 12, BH_Meta = 13, BH_Prio = 14, BH_Defer_Completion = 15, BH_PrivateStart = 16, }; enum { PAGE_WAS_MAPPED = 1, PAGE_WAS_MLOCKED = 2, PAGE_OLD_STATES = 3, }; struct buffer_head; typedef void bh_end_io_t(struct buffer_head *, int); struct buffer_head { unsigned long b_state; struct buffer_head *b_this_page; union { struct page *b_page; struct folio *b_folio; }; sector_t b_blocknr; size_t b_size; char *b_data; struct block_device *b_bdev; bh_end_io_t *b_end_io; void *b_private; struct list_head b_assoc_buffers; struct address_space *b_assoc_map; atomic_t b_count; spinlock_t b_uptodate_lock; }; struct migrate_pages_stats { int nr_succeeded; int nr_failed_pages; int nr_thp_succeeded; int nr_thp_failed; int nr_thp_split; }; typedef void (*btf_trace_hugepage_set_pmd)(void *, unsigned long, unsigned long); typedef void (*btf_trace_hugepage_set_pud)(void *, unsigned long, unsigned long); typedef void (*btf_trace_hugepage_update_pmd)(void *, unsigned long, unsigned long, unsigned long, unsigned long); typedef void (*btf_trace_hugepage_update_pud)(void *, unsigned long, unsigned long, unsigned long, unsigned long); typedef void (*btf_trace_set_migration_pmd)(void *, unsigned long, unsigned long); typedef void (*btf_trace_remove_migration_pmd)(void *, unsigned long, unsigned long); struct mthp_stat { unsigned long stats[50]; }; struct trace_event_raw_hugepage_set { struct trace_entry ent; unsigned long addr; unsigned long pte; char __data[0]; }; struct trace_event_raw_hugepage_update { struct trace_entry ent; unsigned long addr; unsigned long pte; unsigned long clr; unsigned long set; char __data[0]; }; struct trace_event_raw_migration_pmd { struct trace_entry ent; unsigned long addr; unsigned long pmd; char __data[0]; }; struct thpsize { struct kobject kobj; struct list_head node; int order; }; struct trace_event_data_offsets_hugepage_set {}; struct trace_event_data_offsets_hugepage_update {}; struct trace_event_data_offsets_migration_pmd {}; typedef void (*btf_trace_mm_khugepaged_scan_pmd)(void *, struct mm_struct *, struct page *, bool, int, int, int, int); typedef void (*btf_trace_mm_collapse_huge_page)(void *, struct mm_struct *, int, int); typedef void (*btf_trace_mm_collapse_huge_page_isolate)(void *, struct page *, int, int, bool, int); typedef void (*btf_trace_mm_collapse_huge_page_swapin)(void *, struct mm_struct *, int, int, int); typedef void (*btf_trace_mm_khugepaged_scan_file)(void *, struct mm_struct *, struct page *, struct file *, int, int, int); typedef void (*btf_trace_mm_khugepaged_collapse_file)(void *, struct mm_struct *, struct page *, unsigned long, bool, unsigned long, struct file *, int, int); struct collapse_control { bool is_khugepaged; u32 node_load[1]; nodemask_t alloc_nmask; }; struct khugepaged_mm_slot; struct khugepaged_scan { struct list_head mm_head; struct khugepaged_mm_slot *mm_slot; unsigned long address; }; struct mm_slot { struct hlist_node hash; struct list_head mm_node; struct mm_struct *mm; }; struct khugepaged_mm_slot { struct mm_slot slot; }; enum scan_result { SCAN_FAIL = 0, SCAN_SUCCEED = 1, SCAN_PMD_NULL = 2, SCAN_PMD_NONE = 3, SCAN_PMD_MAPPED = 4, SCAN_EXCEED_NONE_PTE = 5, SCAN_EXCEED_SWAP_PTE = 6, SCAN_EXCEED_SHARED_PTE = 7, SCAN_PTE_NON_PRESENT = 8, SCAN_PTE_UFFD_WP = 9, SCAN_PTE_MAPPED_HUGEPAGE = 10, SCAN_PAGE_RO = 11, SCAN_LACK_REFERENCED_PAGE = 12, SCAN_PAGE_NULL = 13, SCAN_SCAN_ABORT = 14, SCAN_PAGE_COUNT = 15, SCAN_PAGE_LRU = 16, SCAN_PAGE_LOCK = 17, SCAN_PAGE_ANON = 18, SCAN_PAGE_COMPOUND = 19, SCAN_ANY_PROCESS = 20, SCAN_VMA_NULL = 21, SCAN_VMA_CHECK = 22, SCAN_ADDRESS_RANGE = 23, SCAN_DEL_PAGE_LRU = 24, SCAN_ALLOC_HUGE_PAGE_FAIL = 25, SCAN_CGROUP_CHARGE_FAIL = 26, SCAN_TRUNCATED = 27, SCAN_PAGE_HAS_PRIVATE = 28, SCAN_STORE_FAILED = 29, SCAN_COPY_MC = 30, SCAN_PAGE_FILLED = 31, }; struct trace_event_raw_mm_khugepaged_scan_pmd { struct trace_entry ent; struct mm_struct *mm; unsigned long pfn; bool writable; int referenced; int none_or_zero; int status; int unmapped; char __data[0]; }; struct trace_event_raw_mm_collapse_huge_page { struct trace_entry ent; struct mm_struct *mm; int isolated; int status; char __data[0]; }; struct trace_event_raw_mm_collapse_huge_page_isolate { struct trace_entry ent; unsigned long pfn; int none_or_zero; int referenced; bool writable; int status; char __data[0]; }; struct trace_event_raw_mm_collapse_huge_page_swapin { struct trace_entry ent; struct mm_struct *mm; int swapped_in; int referenced; int ret; char __data[0]; }; struct trace_event_raw_mm_khugepaged_scan_file { struct trace_entry ent; struct mm_struct *mm; unsigned long pfn; u32 __data_loc_filename; int present; int swap; int result; char __data[0]; }; struct trace_event_raw_mm_khugepaged_collapse_file { struct trace_entry ent; struct mm_struct *mm; unsigned long hpfn; unsigned long index; unsigned long addr; bool is_shmem; u32 __data_loc_filename; int nr; int result; char __data[0]; }; struct trace_event_data_offsets_mm_khugepaged_scan_file { u32 filename; }; struct trace_event_data_offsets_mm_khugepaged_collapse_file { u32 filename; }; struct trace_event_data_offsets_mm_khugepaged_scan_pmd {}; struct trace_event_data_offsets_mm_collapse_huge_page {}; struct trace_event_data_offsets_mm_collapse_huge_page_isolate {}; struct trace_event_data_offsets_mm_collapse_huge_page_swapin {}; struct memcg_vmstats { long state[49]; unsigned long events[17]; long state_local[49]; unsigned long events_local[17]; long state_pending[49]; unsigned long events_pending[17]; atomic64_t stats_updates; }; struct mem_cgroup_tree_per_node; struct mem_cgroup_tree { struct mem_cgroup_tree_per_node *rb_tree_per_node[1]; }; struct mem_cgroup_tree_per_node { struct rb_root rb_root; struct rb_node *rb_rightmost; spinlock_t lock; }; struct memory_stat { const char *name; unsigned int idx; }; struct move_charge_struct { spinlock_t lock; struct mm_struct *mm; struct mem_cgroup *from; struct mem_cgroup *to; unsigned long flags; unsigned long precharge; unsigned long moved_charge; unsigned long moved_swap; struct task_struct *moving_task; wait_queue_head_t waitq; }; struct memcg_stock_pcp { local_lock_t stock_lock; struct mem_cgroup *cached; unsigned int nr_pages; struct obj_cgroup *cached_objcg; struct pglist_data *cached_pgdat; unsigned int nr_bytes; int nr_slab_reclaimable_b; int nr_slab_unreclaimable_b; struct work_struct work; unsigned long flags; }; enum mem_cgroup_events_target { MEM_CGROUP_TARGET_THRESH = 0, MEM_CGROUP_TARGET_SOFTLIMIT = 1, MEM_CGROUP_NTARGETS = 2, }; enum mc_target_type { MC_TARGET_NONE = 0, MC_TARGET_PAGE = 1, MC_TARGET_SWAP = 2, MC_TARGET_DEVICE = 3, }; enum res_type { _MEM = 0, _MEMSWAP = 1, _KMEM = 2, _TCP = 3, }; enum { RES_USAGE = 0, RES_LIMIT = 1, RES_MAX_USAGE = 2, RES_FAILCNT = 3, RES_SOFT_LIMIT = 4, }; struct oom_wait_info { struct mem_cgroup *memcg; wait_queue_entry_t wait; }; struct mem_cgroup_eventfd_list { struct list_head list; struct eventfd_ctx *eventfd; }; struct mem_cgroup_event { struct mem_cgroup *memcg; struct eventfd_ctx *eventfd; struct list_head list; int (*register_event)(struct mem_cgroup *, struct eventfd_ctx *, const char *); void (*unregister_event)(struct mem_cgroup *, struct eventfd_ctx *); poll_table pt; wait_queue_head_t *wqh; wait_queue_entry_t wait; struct work_struct remove; }; struct uncharge_gather { struct mem_cgroup *memcg; unsigned long nr_memory; unsigned long pgpgout; unsigned long nr_kmem; int nid; }; union mc_target { struct folio *folio; swp_entry_t ent; }; enum vmpressure_levels { VMPRESSURE_LOW = 0, VMPRESSURE_MEDIUM = 1, VMPRESSURE_CRITICAL = 2, VMPRESSURE_NUM_LEVELS = 3, }; enum vmpressure_modes { VMPRESSURE_NO_PASSTHROUGH = 0, VMPRESSURE_HIERARCHY = 1, VMPRESSURE_LOCAL = 2, VMPRESSURE_NUM_MODES = 3, }; struct vmpressure_event { struct eventfd_ctx *efd; enum vmpressure_levels level; enum vmpressure_modes mode; struct list_head node; }; struct swap_cgroup_ctrl { struct page **map; unsigned long length; spinlock_t lock; }; struct swap_cgroup { unsigned short id; }; struct page_ext_operations { size_t offset; size_t size; bool (*need)(); void (*init)(); bool need_shared_flags; }; enum page_ext_flags { PAGE_EXT_OWNER = 0, PAGE_EXT_OWNER_ALLOCATED = 1, PAGE_EXT_PINNER_MIGRATION_FAILED = 2, }; struct page_owner { unsigned short order; short last_migrate_reason; gfp_t gfp_mask; depot_stack_handle_t handle; depot_stack_handle_t free_handle; u64 ts_nsec; u64 free_ts_nsec; char comm[16]; pid_t pid; pid_t tgid; pid_t free_pid; pid_t free_tgid; }; struct cleancache_filekey; struct cleancache_ops { int (*init_fs)(size_t); int (*init_shared_fs)(uuid_t *, size_t); int (*get_page)(int, struct cleancache_filekey, unsigned long, struct page *); void (*put_page)(int, struct cleancache_filekey, unsigned long, struct page *); void (*invalidate_page)(int, struct cleancache_filekey, unsigned long); void (*invalidate_inode)(int, struct cleancache_filekey); void (*invalidate_fs)(int); }; struct cleancache_filekey { union { ino_t ino; __u32 fh[6]; u32 key[6]; } u; }; struct captured_pinner; struct page_pinner_buffer { spinlock_t lock; unsigned long index; struct captured_pinner *buffer; }; enum pp_state { PP_PUT = 0, PP_FREE = 1, PP_FAIL_DETECTED = 2, }; struct captured_pinner { depot_stack_handle_t handle; union { u64 ts_usec; u64 elapsed; }; unsigned long pfn; int count; int mapcount; struct address_space *mapping; unsigned long flags; enum pp_state state; }; struct page_pinner { depot_stack_handle_t handle; u64 ts_usec; atomic_t count; }; typedef void (*btf_trace_test_pages_isolated)(void *, unsigned long, unsigned long, unsigned long); struct trace_event_raw_test_pages_isolated { struct trace_entry ent; unsigned long start_pfn; unsigned long end_pfn; unsigned long fin_pfn; char __data[0]; }; struct trace_event_data_offsets_test_pages_isolated {}; typedef void (*btf_trace_cma_release)(void *, const char *, unsigned long, const struct page *, unsigned long); typedef void (*btf_trace_cma_alloc_start)(void *, const char *, unsigned long, unsigned int); typedef void (*btf_trace_cma_alloc_finish)(void *, const char *, unsigned long, const struct page *, unsigned long, unsigned int, int); typedef void (*btf_trace_cma_alloc_busy_retry)(void *, const char *, unsigned long, const struct page *, unsigned long, unsigned int); struct trace_event_raw_cma_release { struct trace_entry ent; u32 __data_loc_name; unsigned long pfn; const struct page *page; unsigned long count; char __data[0]; }; struct trace_event_raw_cma_alloc_start { struct trace_entry ent; u32 __data_loc_name; unsigned long count; unsigned int align; char __data[0]; }; struct trace_event_raw_cma_alloc_finish { struct trace_entry ent; u32 __data_loc_name; unsigned long pfn; const struct page *page; unsigned long count; unsigned int align; int errorno; char __data[0]; }; struct trace_event_raw_cma_alloc_busy_retry { struct trace_entry ent; u32 __data_loc_name; unsigned long pfn; const struct page *page; unsigned long count; unsigned int align; char __data[0]; }; struct trace_event_data_offsets_cma_release { u32 name; }; struct trace_event_data_offsets_cma_alloc_start { u32 name; }; struct trace_event_data_offsets_cma_alloc_finish { u32 name; }; struct trace_event_data_offsets_cma_alloc_busy_retry { u32 name; }; struct balloon_dev_info { unsigned long isolated_pages; spinlock_t pages_lock; struct list_head pages; int (*migratepage)(struct balloon_dev_info *, struct page *, struct page *, enum migrate_mode); }; struct cma_mem { struct hlist_node node; struct page *p; unsigned long n; }; enum { BAD_STACK = -1, NOT_STACK = 0, GOOD_FRAME = 1, GOOD_STACK = 2, }; struct page_reporting_dev_info { int (*report)(struct page_reporting_dev_info *, struct scatterlist *, unsigned int); struct delayed_work work; atomic_t state; unsigned int order; }; enum { PAGE_REPORTING_IDLE = 0, PAGE_REPORTING_REQUESTED = 1, PAGE_REPORTING_ACTIVE = 2, }; struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; }; struct nlm_lockowner; struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner; }; struct file_lock_operations; struct lock_manager_operations; struct file_lock { struct file_lock *fl_blocker; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_blocked_requests; struct list_head fl_blocked_member; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct { struct list_head link; int state; unsigned int debug_id; } afs; struct { struct inode *inode; } ceph; } fl_u; }; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); }; struct lock_manager_operations { void *lm_mod_owner; fl_owner_t (*lm_get_owner)(fl_owner_t); void (*lm_put_owner)(fl_owner_t); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); bool (*lm_breaker_owns_lease)(struct file_lock *); bool (*lm_lock_expirable)(struct file_lock *); void (*lm_expire_lock)(); }; enum fsnotify_data_type { FSNOTIFY_EVENT_NONE = 0, FSNOTIFY_EVENT_PATH = 1, FSNOTIFY_EVENT_INODE = 2, FSNOTIFY_EVENT_DENTRY = 3, FSNOTIFY_EVENT_ERROR = 4, }; typedef s32 compat_off_t; struct open_flags { int open_flag; umode_t mode; int acc_mode; int intent; int lookup_flags; }; typedef __kernel_rwf_t rwf_t; typedef s64 compat_loff_t; struct files_stat_struct { unsigned long nr_files; unsigned long nr_free_files; unsigned long max_files; }; struct backing_file { struct file file; struct path real_path; }; struct fscrypt_policy_v1 { __u8 version; __u8 contents_encryption_mode; __u8 filenames_encryption_mode; __u8 flags; __u8 master_key_descriptor[8]; }; struct fscrypt_policy_v2 { __u8 version; __u8 contents_encryption_mode; __u8 filenames_encryption_mode; __u8 flags; __u8 log2_data_unit_size; __u8 __reserved[3]; __u8 master_key_identifier[16]; }; union fscrypt_policy { u8 version; struct fscrypt_policy_v1 v1; struct fscrypt_policy_v2 v2; }; struct char_device_struct { struct char_device_struct *next; unsigned int major; unsigned int baseminor; int minorct; char name[64]; struct cdev *cdev; }; typedef struct kobject *kobj_probe_t(dev_t, int *, void *); struct mount; struct mnt_namespace { struct ns_common ns; struct mount *root; struct list_head list; spinlock_t ns_lock; struct user_namespace *user_ns; struct ucounts *ucounts; u64 seq; wait_queue_head_t poll; u64 event; unsigned int mounts; unsigned int pending_mounts; }; struct mnt_pcp; struct mountpoint; struct mount { struct hlist_node mnt_hash; struct mount *mnt_parent; struct dentry *mnt_mountpoint; struct vfsmount mnt; union { struct callback_head mnt_rcu; struct llist_node mnt_llist; }; struct mnt_pcp __attribute__((btf_type_tag("percpu"))) *mnt_pcp; struct list_head mnt_mounts; struct list_head mnt_child; struct list_head mnt_instance; const char *mnt_devname; struct list_head mnt_list; struct list_head mnt_expire; struct list_head mnt_share; struct list_head mnt_slave_list; struct list_head mnt_slave; struct mount *mnt_master; struct mnt_namespace *mnt_ns; struct mountpoint *mnt_mp; union { struct hlist_node mnt_mp_list; struct hlist_node mnt_umount; }; struct list_head mnt_umounting; struct fsnotify_mark_connector __attribute__((btf_type_tag("rcu"))) *mnt_fsnotify_marks; __u32 mnt_fsnotify_mask; int mnt_id; int mnt_group_id; int mnt_expiry_mark; struct hlist_head mnt_pins; struct hlist_head mnt_stuck_children; }; struct mnt_pcp { int mnt_count; int mnt_writers; }; struct mountpoint { struct hlist_node m_hash; struct dentry *m_dentry; struct hlist_head m_list; int m_count; }; struct stat { unsigned long st_dev; unsigned long st_ino; unsigned int st_mode; unsigned int st_nlink; unsigned int st_uid; unsigned int st_gid; unsigned long st_rdev; unsigned long __pad1; long st_size; int st_blksize; int __pad2; long st_blocks; long st_atime; unsigned long st_atime_nsec; long st_mtime; unsigned long st_mtime_nsec; long st_ctime; unsigned long st_ctime_nsec; unsigned int __unused4; unsigned int __unused5; }; typedef s64 compat_s64; struct stat64 { compat_u64 st_dev; unsigned char __pad0[4]; compat_ulong_t __st_ino; compat_uint_t st_mode; compat_uint_t st_nlink; compat_ulong_t st_uid; compat_ulong_t st_gid; compat_u64 st_rdev; unsigned char __pad3[4]; compat_s64 st_size; compat_ulong_t st_blksize; compat_u64 st_blocks; compat_ulong_t st_atime; compat_ulong_t st_atime_nsec; compat_ulong_t st_mtime; compat_ulong_t st_mtime_nsec; compat_ulong_t st_ctime; compat_ulong_t st_ctime_nsec; compat_u64 st_ino; }; struct statx_timestamp { __s64 tv_sec; __u32 tv_nsec; __s32 __reserved; }; struct statx { __u32 stx_mask; __u32 stx_blksize; __u64 stx_attributes; __u32 stx_nlink; __u32 stx_uid; __u32 stx_gid; __u16 stx_mode; __u16 __spare0[1]; __u64 stx_ino; __u64 stx_size; __u64 stx_blocks; __u64 stx_attributes_mask; struct statx_timestamp stx_atime; struct statx_timestamp stx_btime; struct statx_timestamp stx_ctime; struct statx_timestamp stx_mtime; __u32 stx_rdev_major; __u32 stx_rdev_minor; __u32 stx_dev_major; __u32 stx_dev_minor; __u64 stx_mnt_id; __u32 stx_dio_mem_align; __u32 stx_dio_offset_align; __u64 __spare3[12]; }; typedef u32 compat_dev_t; typedef u32 compat_ino_t; typedef u16 compat_mode_t; typedef u16 compat_ushort_t; typedef u16 __compat_uid16_t; typedef u16 __compat_gid16_t; struct compat_stat { compat_dev_t st_dev; compat_ino_t st_ino; compat_mode_t st_mode; compat_ushort_t st_nlink; __compat_uid16_t st_uid; __compat_gid16_t st_gid; compat_dev_t st_rdev; compat_off_t st_size; compat_off_t st_blksize; compat_off_t st_blocks; old_time32_t st_atime; compat_ulong_t st_atime_nsec; old_time32_t st_mtime; compat_ulong_t st_mtime_nsec; old_time32_t st_ctime; compat_ulong_t st_ctime_nsec; compat_ulong_t __unused4[2]; }; typedef unsigned short ushort; struct user_arg_ptr { bool is_compat; union { const char __attribute__((btf_type_tag("user"))) * const __attribute__((btf_type_tag("user"))) *native; const compat_uptr_t __attribute__((btf_type_tag("user"))) *compat; } ptr; }; enum inode_i_mutex_lock_class { I_MUTEX_NORMAL = 0, I_MUTEX_PARENT = 1, I_MUTEX_CHILD = 2, I_MUTEX_XATTR = 3, I_MUTEX_NONDIR2 = 4, I_MUTEX_PARENT2 = 5, }; struct saved { struct path link; struct delayed_call done; const char *name; unsigned int seq; }; struct nameidata { struct path path; struct qstr last; struct path root; struct inode *inode; unsigned int flags; unsigned int state; unsigned int seq; unsigned int next_seq; unsigned int m_seq; unsigned int r_seq; int last_type; unsigned int depth; int total_link_count; struct saved *stack; struct saved internal[2]; struct filename *name; struct nameidata *saved; unsigned int root_seq; int dfd; vfsuid_t dir_vfsuid; umode_t dir_mode; }; enum { LAST_NORM = 0, LAST_ROOT = 1, LAST_DOT = 2, LAST_DOTDOT = 3, }; enum { WALK_TRAILING = 1, WALK_MORE = 2, WALK_NOFOLLOW = 4, }; struct word_at_a_time { const unsigned long one_bits; const unsigned long high_bits; }; struct name_snapshot { struct qstr name; unsigned char inline_name[32]; }; struct renamedata { struct mnt_idmap *old_mnt_idmap; struct inode *old_dir; struct dentry *old_dentry; struct mnt_idmap *new_mnt_idmap; struct inode *new_dir; struct dentry *new_dentry; struct inode **delegated_inode; unsigned int flags; }; struct f_owner_ex { int type; __kernel_pid_t pid; }; struct flock { short l_type; short l_whence; __kernel_off_t l_start; __kernel_off_t l_len; __kernel_pid_t l_pid; }; struct compat_flock64 { short l_type; short l_whence; compat_loff_t l_start; compat_loff_t l_len; compat_pid_t l_pid; }; struct compat_flock { short l_type; short l_whence; compat_off_t l_start; compat_off_t l_len; compat_pid_t l_pid; }; struct fiemap_extent; struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent __attribute__((btf_type_tag("user"))) *fi_extents_start; }; struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2]; __u32 fe_flags; __u32 fe_reserved[3]; }; struct fsxattr { __u32 fsx_xflags; __u32 fsx_extsize; __u32 fsx_nextents; __u32 fsx_projid; __u32 fsx_cowextsize; unsigned char fsx_pad[8]; }; struct file_clone_range { __s64 src_fd; __u64 src_offset; __u64 src_length; __u64 dest_offset; }; struct fiemap { __u64 fm_start; __u64 fm_length; __u32 fm_flags; __u32 fm_mapped_extents; __u32 fm_extent_count; __u32 fm_reserved; struct fiemap_extent fm_extents[0]; }; struct file_dedupe_range_info { __s64 dest_fd; __u64 dest_offset; __u64 bytes_deduped; __s32 status; __u32 reserved; }; struct file_dedupe_range { __u64 src_offset; __u64 src_length; __u16 dest_count; __u16 reserved1; __u32 reserved2; struct file_dedupe_range_info info[0]; }; struct space_resv { __s16 l_type; __s16 l_whence; __s64 l_start; __s64 l_len; __s32 l_sysid; __u32 l_pid; __s32 l_pad[4]; }; struct linux_dirent { unsigned long d_ino; unsigned long d_off; unsigned short d_reclen; char d_name[0]; }; struct getdents_callback { struct dir_context ctx; struct linux_dirent __attribute__((btf_type_tag("user"))) *current_dir; int prev_reclen; int count; int error; }; struct linux_dirent64 { u64 d_ino; s64 d_off; unsigned short d_reclen; unsigned char d_type; char d_name[0]; }; struct getdents_callback64 { struct dir_context ctx; struct linux_dirent64 __attribute__((btf_type_tag("user"))) *current_dir; int prev_reclen; int count; int error; }; struct compat_old_linux_dirent { compat_ulong_t d_ino; compat_ulong_t d_offset; unsigned short d_namlen; char d_name[0]; }; struct compat_readdir_callback { struct dir_context ctx; struct compat_old_linux_dirent __attribute__((btf_type_tag("user"))) *dirent; int result; }; struct compat_linux_dirent { compat_ulong_t d_ino; compat_ulong_t d_off; unsigned short d_reclen; char d_name[0]; }; struct compat_getdents_callback { struct dir_context ctx; struct compat_linux_dirent __attribute__((btf_type_tag("user"))) *current_dir; int prev_reclen; int count; int error; }; enum poll_time_type { PT_TIMEVAL = 0, PT_OLD_TIMEVAL = 1, PT_TIMESPEC = 2, PT_OLD_TIMESPEC = 3, }; struct poll_table_entry { struct file *filp; __poll_t key; wait_queue_entry_t wait; wait_queue_head_t *wait_address; }; struct poll_table_page; struct poll_wqueues { poll_table pt; struct poll_table_page *table; struct task_struct *polling_task; int triggered; int error; int inline_index; struct poll_table_entry inline_entries[9]; }; struct poll_table_page { struct poll_table_page *next; struct poll_table_entry *entry; struct poll_table_entry entries[0]; }; typedef struct { unsigned long fds_bits[16]; } __kernel_fd_set; typedef __kernel_fd_set fd_set; struct poll_list { struct poll_list *next; int len; struct pollfd entries[0]; }; struct compat_sel_arg_struct { compat_ulong_t n; compat_uptr_t inp; compat_uptr_t outp; compat_uptr_t exp; compat_uptr_t tvp; }; typedef struct { unsigned long *in; unsigned long *out; unsigned long *ex; unsigned long *res_in; unsigned long *res_out; unsigned long *res_ex; } fd_set_bits; struct sigset_argpack { sigset_t __attribute__((btf_type_tag("user"))) *p; size_t size; }; struct compat_sigset_argpack { compat_uptr_t p; compat_size_t size; }; struct dentry_stat_t { long nr_dentry; long nr_unused; long age_limit; long want_pages; long nr_negative; long dummy; }; enum dentry_d_lock_class { DENTRY_D_LOCK_NORMAL = 0, DENTRY_D_LOCK_NESTED = 1, }; enum d_walk_ret { D_WALK_CONTINUE = 0, D_WALK_QUIT = 1, D_WALK_NORETRY = 2, D_WALK_SKIP = 3, }; struct external_name { union { atomic_t count; struct callback_head head; } u; unsigned char name[0]; }; struct check_mount { struct vfsmount *mnt; unsigned int mounted; }; struct select_data { struct dentry *start; union { long found; struct dentry *victim; }; struct list_head dispose; }; struct inodes_stat_t { long nr_inodes; long nr_unused; long dummy[5]; }; enum file_time_flags { S_ATIME = 1, S_MTIME = 2, S_CTIME = 4, S_VERSION = 8, }; enum umount_tree_flags { UMOUNT_SYNC = 1, UMOUNT_PROPAGATE = 2, UMOUNT_CONNECTED = 4, }; enum mnt_tree_flags_t { MNT_TREE_MOVE = 1, MNT_TREE_BENEATH = 2, }; struct mount_attr { __u64 attr_set; __u64 attr_clr; __u64 propagation; __u64 userns_fd; }; struct mount_kattr { unsigned int attr_set; unsigned int attr_clr; unsigned int propagation; unsigned int lookup_flags; bool recurse; struct user_namespace *mnt_userns; struct mnt_idmap *mnt_idmap; }; struct mnt_idmap { struct user_namespace *owner; refcount_t count; }; struct proc_mounts { struct mnt_namespace *ns; struct path root; int (*show)(struct seq_file *, struct vfsmount *); struct mount cursor; }; struct simple_xattr { struct rb_node rb_node; char *name; size_t size; char value[0]; }; struct xattr_name; struct xattr_ctx { union { const void __attribute__((btf_type_tag("user"))) *cvalue; void __attribute__((btf_type_tag("user"))) *value; }; void *kvalue; size_t size; struct xattr_name *kname; unsigned int flags; }; struct xattr_name { char name[256]; }; struct utf8data; struct utf8data_table; struct unicode_map { unsigned int version; const struct utf8data *ntab[2]; const struct utf8data_table *tables; }; struct utf8data { unsigned int maxage; unsigned int offset; }; struct utf8data_table { const unsigned int *utf8agetab; int utf8agetab_size; const struct utf8data *utf8nfdicfdata; int utf8nfdicfdata_size; const struct utf8data *utf8nfdidata; int utf8nfdidata_size; const unsigned char *utf8data; }; struct simple_transaction_argresp { ssize_t size; char data[0]; }; struct simple_attr { int (*get)(void *, u64 *); int (*set)(void *, u64); char get_buf[24]; char set_buf[24]; void *data; const char *fmt; struct mutex mutex; }; typedef void (*btf_trace_writeback_dirty_folio)(void *, struct folio *, struct address_space *); typedef void (*btf_trace_folio_wait_writeback)(void *, struct folio *, struct address_space *); typedef void (*btf_trace_writeback_mark_inode_dirty)(void *, struct inode *, int); typedef void (*btf_trace_writeback_dirty_inode_start)(void *, struct inode *, int); typedef void (*btf_trace_writeback_dirty_inode)(void *, struct inode *, int); typedef void (*btf_trace_inode_foreign_history)(void *, struct inode *, struct writeback_control *, unsigned int); typedef void (*btf_trace_inode_switch_wbs)(void *, struct inode *, struct bdi_writeback *, struct bdi_writeback *); typedef void (*btf_trace_track_foreign_dirty)(void *, struct folio *, struct bdi_writeback *); typedef void (*btf_trace_flush_foreign)(void *, struct bdi_writeback *, unsigned int, unsigned int); typedef void (*btf_trace_writeback_write_inode_start)(void *, struct inode *, struct writeback_control *); typedef void (*btf_trace_writeback_write_inode)(void *, struct inode *, struct writeback_control *); struct wb_writeback_work; typedef void (*btf_trace_writeback_queue)(void *, struct bdi_writeback *, struct wb_writeback_work *); struct wb_writeback_work { long nr_pages; struct super_block *sb; enum writeback_sync_modes sync_mode; unsigned int tagged_writepages: 1; unsigned int for_kupdate: 1; unsigned int range_cyclic: 1; unsigned int for_background: 1; unsigned int for_sync: 1; unsigned int auto_free: 1; enum wb_reason reason; struct list_head list; struct wb_completion *done; }; typedef void (*btf_trace_writeback_exec)(void *, struct bdi_writeback *, struct wb_writeback_work *); typedef void (*btf_trace_writeback_start)(void *, struct bdi_writeback *, struct wb_writeback_work *); typedef void (*btf_trace_writeback_written)(void *, struct bdi_writeback *, struct wb_writeback_work *); typedef void (*btf_trace_writeback_wait)(void *, struct bdi_writeback *, struct wb_writeback_work *); typedef void (*btf_trace_writeback_pages_written)(void *, long); typedef void (*btf_trace_writeback_wake_background)(void *, struct bdi_writeback *); typedef void (*btf_trace_writeback_bdi_register)(void *, struct backing_dev_info *); typedef void (*btf_trace_wbc_writepage)(void *, struct writeback_control *, struct backing_dev_info *); typedef void (*btf_trace_writeback_queue_io)(void *, struct bdi_writeback *, struct wb_writeback_work *, unsigned long, int); typedef void (*btf_trace_global_dirty_state)(void *, unsigned long, unsigned long); typedef void (*btf_trace_bdi_dirty_ratelimit)(void *, struct bdi_writeback *, unsigned long, unsigned long); typedef void (*btf_trace_balance_dirty_pages)(void *, struct bdi_writeback *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, long, unsigned long); typedef void (*btf_trace_writeback_sb_inodes_requeue)(void *, struct inode *); typedef void (*btf_trace_writeback_single_inode_start)(void *, struct inode *, struct writeback_control *, unsigned long); typedef void (*btf_trace_writeback_single_inode)(void *, struct inode *, struct writeback_control *, unsigned long); typedef void (*btf_trace_writeback_lazytime)(void *, struct inode *); typedef void (*btf_trace_writeback_lazytime_iput)(void *, struct inode *); typedef void (*btf_trace_writeback_dirty_inode_enqueue)(void *, struct inode *); typedef void (*btf_trace_sb_mark_inode_writeback)(void *, struct inode *); typedef void (*btf_trace_sb_clear_inode_writeback)(void *, struct inode *); struct trace_event_raw_writeback_folio_template { struct trace_entry ent; char name[32]; ino_t ino; unsigned long index; char __data[0]; }; struct trace_event_raw_writeback_dirty_inode_template { struct trace_entry ent; char name[32]; ino_t ino; unsigned long state; unsigned long flags; char __data[0]; }; struct trace_event_raw_inode_foreign_history { struct trace_entry ent; char name[32]; ino_t ino; ino_t cgroup_ino; unsigned int history; char __data[0]; }; struct trace_event_raw_inode_switch_wbs { struct trace_entry ent; char name[32]; ino_t ino; ino_t old_cgroup_ino; ino_t new_cgroup_ino; char __data[0]; }; struct trace_event_raw_track_foreign_dirty { struct trace_entry ent; char name[32]; u64 bdi_id; ino_t ino; unsigned int memcg_id; ino_t cgroup_ino; ino_t page_cgroup_ino; char __data[0]; }; struct trace_event_raw_flush_foreign { struct trace_entry ent; char name[32]; ino_t cgroup_ino; unsigned int frn_bdi_id; unsigned int frn_memcg_id; char __data[0]; }; struct trace_event_raw_writeback_write_inode_template { struct trace_entry ent; char name[32]; ino_t ino; int sync_mode; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_work_class { struct trace_entry ent; char name[32]; long nr_pages; dev_t sb_dev; int sync_mode; int for_kupdate; int range_cyclic; int for_background; int reason; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_pages_written { struct trace_entry ent; long pages; char __data[0]; }; struct trace_event_raw_writeback_class { struct trace_entry ent; char name[32]; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_bdi_register { struct trace_entry ent; char name[32]; char __data[0]; }; struct trace_event_raw_wbc_class { struct trace_entry ent; char name[32]; long nr_to_write; long pages_skipped; int sync_mode; int for_kupdate; int for_background; int for_reclaim; int range_cyclic; long range_start; long range_end; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_queue_io { struct trace_entry ent; char name[32]; unsigned long older; long age; int moved; int reason; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_global_dirty_state { struct trace_entry ent; unsigned long nr_dirty; unsigned long nr_writeback; unsigned long background_thresh; unsigned long dirty_thresh; unsigned long dirty_limit; unsigned long nr_dirtied; unsigned long nr_written; char __data[0]; }; struct trace_event_raw_bdi_dirty_ratelimit { struct trace_entry ent; char bdi[32]; unsigned long write_bw; unsigned long avg_write_bw; unsigned long dirty_rate; unsigned long dirty_ratelimit; unsigned long task_ratelimit; unsigned long balanced_dirty_ratelimit; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_balance_dirty_pages { struct trace_entry ent; char bdi[32]; unsigned long limit; unsigned long setpoint; unsigned long dirty; unsigned long bdi_setpoint; unsigned long bdi_dirty; unsigned long dirty_ratelimit; unsigned long task_ratelimit; unsigned int dirtied; unsigned int dirtied_pause; unsigned long paused; long pause; unsigned long period; long think; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_sb_inodes_requeue { struct trace_entry ent; char name[32]; ino_t ino; unsigned long state; unsigned long dirtied_when; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_single_inode_template { struct trace_entry ent; char name[32]; ino_t ino; unsigned long state; unsigned long dirtied_when; unsigned long writeback_index; long nr_to_write; unsigned long wrote; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_inode_template { struct trace_entry ent; dev_t dev; ino_t ino; unsigned long state; __u16 mode; unsigned long dirtied_when; char __data[0]; }; struct inode_switch_wbs_context { struct rcu_work work; struct bdi_writeback *new_wb; struct inode *inodes[0]; }; struct trace_event_data_offsets_writeback_folio_template {}; struct trace_event_data_offsets_writeback_dirty_inode_template {}; struct trace_event_data_offsets_inode_foreign_history {}; struct trace_event_data_offsets_inode_switch_wbs {}; struct trace_event_data_offsets_track_foreign_dirty {}; struct trace_event_data_offsets_flush_foreign {}; struct trace_event_data_offsets_writeback_write_inode_template {}; struct trace_event_data_offsets_writeback_work_class {}; struct trace_event_data_offsets_writeback_pages_written {}; struct trace_event_data_offsets_writeback_class {}; struct trace_event_data_offsets_writeback_bdi_register {}; struct trace_event_data_offsets_wbc_class {}; struct trace_event_data_offsets_writeback_queue_io {}; struct trace_event_data_offsets_global_dirty_state {}; struct trace_event_data_offsets_bdi_dirty_ratelimit {}; struct trace_event_data_offsets_balance_dirty_pages {}; struct trace_event_data_offsets_writeback_sb_inodes_requeue {}; struct trace_event_data_offsets_writeback_single_inode_template {}; struct trace_event_data_offsets_writeback_inode_template {}; struct splice_desc { size_t total_len; unsigned int len; unsigned int flags; union { void __attribute__((btf_type_tag("user"))) *userptr; struct file *file; void *data; } u; void (*splice_eof)(struct splice_desc *); loff_t pos; loff_t *opos; size_t num_spliced; bool need_wakeup; }; typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *); struct old_utimbuf32 { old_time32_t actime; old_time32_t modtime; }; struct prepend_buffer { char *buf; int len; }; struct statfs { __kernel_long_t f_type; __kernel_long_t f_bsize; __kernel_long_t f_blocks; __kernel_long_t f_bfree; __kernel_long_t f_bavail; __kernel_long_t f_files; __kernel_long_t f_ffree; __kernel_fsid_t f_fsid; __kernel_long_t f_namelen; __kernel_long_t f_frsize; __kernel_long_t f_flags; __kernel_long_t f_spare[4]; }; struct statfs64 { __kernel_long_t f_type; __kernel_long_t f_bsize; __u64 f_blocks; __u64 f_bfree; __u64 f_bavail; __u64 f_files; __u64 f_ffree; __kernel_fsid_t f_fsid; __kernel_long_t f_namelen; __kernel_long_t f_frsize; __kernel_long_t f_flags; __kernel_long_t f_spare[4]; }; typedef int __kernel_daddr_t; struct ustat { __kernel_daddr_t f_tfree; unsigned long f_tinode; char f_fname[6]; char f_fpack[6]; }; typedef __kernel_fsid_t compat_fsid_t; struct compat_statfs { int f_type; int f_bsize; int f_blocks; int f_bfree; int f_bavail; int f_files; int f_ffree; compat_fsid_t f_fsid; int f_namelen; int f_frsize; int f_flags; int f_spare[4]; }; typedef s32 compat_daddr_t; struct compat_ustat { compat_daddr_t f_tfree; compat_ino_t f_tinode; char f_fname[6]; char f_fpack[6]; }; struct fs_pin { wait_queue_head_t wait; int done; struct hlist_node s_list; struct hlist_node m_list; void (*kill)(struct fs_pin *); }; struct ns_get_path_task_args { const struct proc_ns_operations *ns_ops; struct task_struct *task; }; enum legacy_fs_param { LEGACY_FS_UNSET_PARAMS = 0, LEGACY_FS_MONOLITHIC_PARAMS = 1, LEGACY_FS_INDIVIDUAL_PARAMS = 2, }; struct legacy_fs_context { char *legacy_data; size_t data_size; enum legacy_fs_param param_type; }; enum fsconfig_command { FSCONFIG_SET_FLAG = 0, FSCONFIG_SET_STRING = 1, FSCONFIG_SET_BINARY = 2, FSCONFIG_SET_PATH = 3, FSCONFIG_SET_PATH_EMPTY = 4, FSCONFIG_SET_FD = 5, FSCONFIG_CMD_CREATE = 6, FSCONFIG_CMD_RECONFIGURE = 7, FSCONFIG_CMD_CREATE_EXCL = 8, }; struct iomap_ops { int (*iomap_begin)(struct inode *, loff_t, loff_t, unsigned int, struct iomap *, struct iomap *); int (*iomap_end)(struct inode *, loff_t, loff_t, ssize_t, unsigned int, struct iomap *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct bh_lru { struct buffer_head *bhs[16]; }; struct bh_accounting { int nr; int ratelimit; }; struct postprocess_bh_ctx { struct work_struct work; struct buffer_head *bh; }; struct dax_device; struct iomap_folio_ops; struct iomap { u64 addr; loff_t offset; u64 length; u16 type; u16 flags; struct block_device *bdev; struct dax_device *dax_dev; void *inline_data; void *private; const struct iomap_folio_ops *folio_ops; u64 validity_cookie; u64 android_kabi_reserved1; }; struct iomap_iter; struct iomap_folio_ops { struct folio * (*get_folio)(struct iomap_iter *, loff_t, unsigned int); void (*put_folio)(struct inode *, loff_t, unsigned int, struct folio *); bool (*iomap_valid)(struct inode *, const struct iomap *); }; struct iomap_iter { struct inode *inode; loff_t pos; u64 len; s64 processed; unsigned int flags; struct iomap iomap; struct iomap srcmap; void *private; }; typedef int get_block_t(struct inode *, sector_t, struct buffer_head *, int); struct folio_iter { struct folio *folio; size_t offset; size_t length; struct folio *_next; size_t _seg_count; int _i; }; struct mpage_readpage_args { struct bio *bio; struct folio *folio; unsigned int nr_pages; bool is_readahead; sector_t last_block_in_bio; struct buffer_head map_bh; unsigned long first_logical_block; get_block_t *get_block; }; struct mpage_data { struct bio *bio; sector_t last_block_in_bio; get_block_t *get_block; }; struct proc_fs_opts { int flag; const char *str; }; union proc_op { int (*proc_get_link)(struct dentry *, struct path *); int (*proc_show)(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); const char *lsm; }; struct proc_inode { struct pid *pid; unsigned int fd; union proc_op op; struct proc_dir_entry *pde; struct ctl_table_header *sysctl; struct ctl_table *sysctl_entry; struct hlist_node sibling_inodes; const struct proc_ns_operations *ns_ops; struct inode vfs_inode; }; typedef int (*proc_write_t)(struct file *, char *, size_t); typedef u32 nlink_t; struct proc_dir_entry { atomic_t in_use; refcount_t refcnt; struct list_head pde_openers; spinlock_t pde_unload_lock; struct completion *pde_unload_completion; const struct inode_operations *proc_iops; union { const struct proc_ops *proc_ops; const struct file_operations *proc_dir_ops; }; const struct dentry_operations *proc_dops; union { const struct seq_operations *seq_ops; int (*single_show)(struct seq_file *, void *); }; proc_write_t write; void *data; unsigned int state_size; unsigned int low_ino; nlink_t nlink; kuid_t uid; kgid_t gid; loff_t size; struct proc_dir_entry *parent; struct rb_root subdir; struct rb_node subdir_node; char *name; umode_t mode; u8 flags; u8 namelen; char inline_name[0]; }; enum { DIO_LOCKING = 1, DIO_SKIP_HOLES = 2, }; typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *); struct dio { int flags; blk_opf_t opf; struct gendisk *bio_disk; struct inode *inode; loff_t i_size; dio_iodone_t *end_io; bool is_pinned; void *private; spinlock_t bio_lock; int page_errors; int is_async; bool defer_completion; bool should_dirty; int io_error; unsigned long refcount; struct bio *bio_list; struct task_struct *waiter; struct kiocb *iocb; ssize_t result; union { struct page *pages[64]; struct work_struct complete_work; }; long: 64; }; struct dio_submit { struct bio *bio; unsigned int blkbits; unsigned int blkfactor; unsigned int start_zero_done; int pages_in_io; sector_t block_in_file; unsigned int blocks_available; int reap_counter; sector_t final_block_in_request; int boundary; get_block_t *get_block; loff_t logical_offset_in_bio; sector_t final_block_in_bio; sector_t next_block_for_io; struct page *cur_page; unsigned int cur_page_offset; unsigned int cur_page_len; sector_t cur_page_block; loff_t cur_page_fs_offset; struct iov_iter *iter; unsigned int head; unsigned int tail; size_t from; size_t to; }; typedef unsigned int iov_iter_extraction_t; enum fsnotify_iter_type { FSNOTIFY_ITER_TYPE_INODE = 0, FSNOTIFY_ITER_TYPE_VFSMOUNT = 1, FSNOTIFY_ITER_TYPE_SB = 2, FSNOTIFY_ITER_TYPE_PARENT = 3, FSNOTIFY_ITER_TYPE_INODE2 = 4, FSNOTIFY_ITER_TYPE_COUNT = 5, }; struct fs_error_report { int error; struct inode *inode; struct super_block *sb; }; struct inotify_inode_mark { struct fsnotify_mark fsn_mark; int wd; }; struct file_handle { __u32 handle_bytes; int handle_type; unsigned char f_handle[0]; }; struct inotify_event_info { struct fsnotify_event fse; u32 mask; int wd; u32 sync_cookie; int name_len; char name[0]; }; struct inotify_event { __s32 wd; __u32 mask; __u32 cookie; __u32 len; char name[0]; }; struct epitem; struct eventpoll { struct mutex mtx; wait_queue_head_t wq; wait_queue_head_t poll_wait; struct list_head rdllist; rwlock_t lock; struct rb_root_cached rbr; struct epitem *ovflist; struct wakeup_source *ws; struct user_struct *user; struct file *file; u64 gen; struct hlist_head refs; refcount_t refcount; unsigned int napi_id; }; struct epoll_filefd { struct file *file; int fd; } __attribute__((packed)); struct epoll_event { __poll_t events; __u64 data; }; struct eppoll_entry; struct epitem { union { struct rb_node rbn; struct callback_head rcu; }; struct list_head rdllink; struct epitem *next; struct epoll_filefd ffd; bool dying; struct eppoll_entry *pwqlist; struct eventpoll *ep; struct hlist_node fllink; struct wakeup_source __attribute__((btf_type_tag("rcu"))) *ws; struct epoll_event event; }; struct eppoll_entry { struct eppoll_entry *next; struct epitem *base; wait_queue_entry_t wait; wait_queue_head_t *whead; }; struct epitems_head { struct hlist_head epitems; struct epitems_head *next; }; struct ep_pqueue { poll_table pt; struct epitem *epi; }; struct signalfd_siginfo { __u32 ssi_signo; __s32 ssi_errno; __s32 ssi_code; __u32 ssi_pid; __u32 ssi_uid; __s32 ssi_fd; __u32 ssi_tid; __u32 ssi_band; __u32 ssi_overrun; __u32 ssi_trapno; __s32 ssi_status; __s32 ssi_int; __u64 ssi_ptr; __u64 ssi_utime; __u64 ssi_stime; __u64 ssi_addr; __u16 ssi_addr_lsb; __u16 __pad2; __s32 ssi_syscall; __u64 ssi_call_addr; __u32 ssi_arch; __u8 __pad[28]; }; struct signalfd_ctx { sigset_t sigmask; }; struct timerfd_ctx { union { struct hrtimer tmr; struct alarm alarm; } t; ktime_t tintv; ktime_t moffs; wait_queue_head_t wqh; u64 ticks; int clockid; unsigned short expired; unsigned short settime_flags; struct callback_head rcu; struct list_head clist; spinlock_t cancel_lock; bool might_cancel; }; struct eventfd_ctx { struct kref kref; wait_queue_head_t wqh; __u64 count; unsigned int flags; int id; }; struct userfaultfd_fork_ctx { struct userfaultfd_ctx *orig; struct userfaultfd_ctx *new; struct list_head list; }; struct userfaultfd_unmap_ctx { struct userfaultfd_ctx *ctx; unsigned long start; unsigned long end; struct list_head list; }; struct uffd_msg { __u8 event; __u8 reserved1; __u16 reserved2; __u32 reserved3; union { struct { __u64 flags; __u64 address; union { __u32 ptid; } feat; } pagefault; struct { __u32 ufd; } fork; struct { __u64 from; __u64 to; __u64 len; } remap; struct { __u64 start; __u64 end; } remove; struct { __u64 reserved1; __u64 reserved2; __u64 reserved3; } reserved; } arg; }; struct userfaultfd_wait_queue { struct uffd_msg msg; wait_queue_entry_t wq; struct userfaultfd_ctx *ctx; bool waken; }; struct uffdio_range { __u64 start; __u64 len; }; struct uffdio_register { struct uffdio_range range; __u64 mode; __u64 ioctls; }; struct uffdio_copy { __u64 dst; __u64 src; __u64 len; __u64 mode; __s64 copy; }; struct uffdio_zeropage { struct uffdio_range range; __u64 mode; __s64 zeropage; }; struct uffdio_move { __u64 dst; __u64 src; __u64 len; __u64 mode; __s64 move; }; struct uffdio_writeprotect { struct uffdio_range range; __u64 mode; }; struct uffdio_continue { struct uffdio_range range; __u64 mode; __s64 mapped; }; struct uffdio_poison { struct uffdio_range range; __u64 mode; __s64 updated; }; struct uffdio_api { __u64 api; __u64 features; __u64 ioctls; }; struct userfaultfd_wake_range { unsigned long start; unsigned long len; }; struct kioctx_cpu; struct ctx_rq_wait; struct kioctx { struct percpu_ref users; atomic_t dead; struct percpu_ref reqs; unsigned long user_id; struct kioctx_cpu *cpu; unsigned int req_batch; unsigned int max_reqs; unsigned int nr_events; unsigned long mmap_base; unsigned long mmap_size; struct page **ring_pages; long nr_pages; struct rcu_work free_rwork; struct ctx_rq_wait *rq_wait; long: 64; struct { atomic_t reqs_available; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct { spinlock_t ctx_lock; struct list_head active_reqs; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct { struct mutex ring_lock; wait_queue_head_t wait; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct { unsigned int tail; unsigned int completed_events; spinlock_t completion_lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct page *internal_pages[8]; struct file *aio_ring_file; unsigned int id; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct kioctx_cpu { unsigned int reqs_available; }; struct ctx_rq_wait { struct completion comp; atomic_t count; }; enum { IOCB_CMD_PREAD = 0, IOCB_CMD_PWRITE = 1, IOCB_CMD_FSYNC = 2, IOCB_CMD_FDSYNC = 3, IOCB_CMD_POLL = 5, IOCB_CMD_NOOP = 6, IOCB_CMD_PREADV = 7, IOCB_CMD_PWRITEV = 8, }; struct fsync_iocb { struct file *file; struct work_struct work; bool datasync; struct cred *creds; }; struct poll_iocb { struct file *file; struct wait_queue_head *head; __poll_t events; bool cancelled; bool work_scheduled; bool work_need_resched; struct wait_queue_entry wait; struct work_struct work; }; typedef int kiocb_cancel_fn(struct kiocb *); struct io_event { __u64 data; __u64 obj; __s64 res; __s64 res2; }; struct aio_kiocb { union { struct file *ki_filp; struct kiocb rw; struct fsync_iocb fsync; struct poll_iocb poll; }; struct kioctx *ki_ctx; kiocb_cancel_fn *ki_cancel; struct io_event ki_res; struct list_head ki_list; refcount_t ki_refcnt; struct eventfd_ctx *ki_eventfd; }; typedef __kernel_ulong_t aio_context_t; struct iocb { __u64 aio_data; __u32 aio_key; __kernel_rwf_t aio_rw_flags; __u16 aio_lio_opcode; __s16 aio_reqprio; __u32 aio_fildes; __u64 aio_buf; __u64 aio_nbytes; __s64 aio_offset; __u64 aio_reserved2; __u32 aio_flags; __u32 aio_resfd; }; struct aio_poll_table { struct poll_table_struct pt; struct aio_kiocb *iocb; bool queued; int error; }; typedef u32 compat_aio_context_t; struct __aio_sigset { const sigset_t __attribute__((btf_type_tag("user"))) *sigmask; size_t sigsetsize; }; struct __compat_aio_sigset { compat_uptr_t sigmask; compat_size_t sigsetsize; }; struct aio_ring { unsigned int id; unsigned int nr; unsigned int head; unsigned int tail; unsigned int magic; unsigned int compat_features; unsigned int incompat_features; unsigned int header_length; struct io_event io_events[0]; }; struct fscrypt_prepared_key { struct crypto_skcipher *tfm; struct blk_crypto_key *blk_key; }; struct fscrypt_mode; struct fscrypt_master_key; struct fscrypt_direct_key; struct fscrypt_inode_info { struct fscrypt_prepared_key ci_enc_key; bool ci_owns_key; bool ci_inlinecrypt; u8 ci_data_unit_bits; u8 ci_data_units_per_block_bits; struct fscrypt_mode *ci_mode; struct inode *ci_inode; struct fscrypt_master_key *ci_master_key; struct list_head ci_master_key_link; struct fscrypt_direct_key *ci_direct_key; siphash_key_t ci_dirhash_key; bool ci_dirhash_key_initialized; union fscrypt_policy ci_policy; u8 ci_nonce[16]; u32 ci_hashed_ino; }; struct fscrypt_mode { const char *friendly_name; const char *cipher_str; int keysize; int security_strength; int ivsize; int logged_cryptoapi_impl; int logged_blk_crypto_native; int logged_blk_crypto_fallback; enum blk_crypto_mode_num blk_crypto_mode; }; struct fscrypt_hkdf { struct crypto_shash *hmac_tfm; }; struct fscrypt_master_key_secret { struct fscrypt_hkdf hkdf; bool is_hw_wrapped; u32 size; u8 raw[128]; }; struct fscrypt_key_specifier { __u32 type; __u32 __reserved; union { __u8 __reserved[32]; __u8 descriptor[8]; __u8 identifier[16]; } u; }; struct fscrypt_master_key { struct hlist_node mk_node; struct rw_semaphore mk_sem; refcount_t mk_active_refs; refcount_t mk_struct_refs; struct callback_head mk_rcu_head; struct fscrypt_master_key_secret mk_secret; struct fscrypt_key_specifier mk_spec; struct key *mk_users; struct list_head mk_decrypted_inodes; spinlock_t mk_decrypted_inodes_lock; struct fscrypt_prepared_key mk_direct_keys[11]; struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[11]; struct fscrypt_prepared_key mk_iv_ino_lblk_32_keys[11]; siphash_key_t mk_ino_hash_key; bool mk_ino_hash_key_initialized; bool mk_present; }; struct crypto_wait { struct completion completion; int err; }; union fscrypt_iv { struct { __le64 index; u8 nonce[16]; }; u8 raw[32]; __le64 dun[4]; }; typedef enum { FS_DECRYPT = 0, FS_ENCRYPT = 1, } fscrypt_direction_t; struct fscrypt_str { unsigned char *name; u32 len; }; struct fscrypt_nokey_name { u32 dirhash[2]; u8 bytes[149]; u8 sha256[32]; }; struct fscrypt_name { const struct qstr *usr_fname; struct fscrypt_str disk_name; u32 hash; u32 minor_hash; struct fscrypt_str crypto_buf; bool is_nokey_name; }; struct fscrypt_symlink_data { __le16 len; char encrypted_path[0]; }; struct fscrypt_keyring { spinlock_t lock; struct hlist_head key_hashtable[128]; }; enum key_state { KEY_IS_UNINSTANTIATED = 0, KEY_IS_POSITIVE = 1, }; struct fscrypt_add_key_arg { struct fscrypt_key_specifier key_spec; __u32 raw_size; __u32 key_id; __u32 __reserved[7]; __u32 __flags; __u8 raw[0]; }; struct fscrypt_provisioning_key_payload { __u32 type; __u32 __reserved; __u8 raw[0]; }; struct fscrypt_remove_key_arg { struct fscrypt_key_specifier key_spec; __u32 removal_status_flags; __u32 __reserved[5]; }; struct fscrypt_get_key_status_arg { struct fscrypt_key_specifier key_spec; __u32 __reserved[6]; __u32 status; __u32 status_flags; __u32 user_count; __u32 __out_reserved[13]; }; struct fscrypt_context_v1 { u8 version; u8 contents_encryption_mode; u8 filenames_encryption_mode; u8 flags; u8 master_key_descriptor[8]; u8 nonce[16]; }; struct fscrypt_context_v2 { u8 version; u8 contents_encryption_mode; u8 filenames_encryption_mode; u8 flags; u8 log2_data_unit_size; u8 __reserved[3]; u8 master_key_identifier[16]; u8 nonce[16]; }; union fscrypt_context { u8 version; struct fscrypt_context_v1 v1; struct fscrypt_context_v2 v2; }; struct fscrypt_direct_key { struct super_block *dk_sb; struct hlist_node dk_node; refcount_t dk_refcount; const struct fscrypt_mode *dk_mode; struct fscrypt_prepared_key dk_key; u8 dk_descriptor[8]; u8 dk_raw[64]; }; struct fscrypt_key { __u32 mode; __u8 raw[64]; __u32 size; }; struct user_key_payload { struct callback_head rcu; unsigned short datalen; long: 0; char data[0]; }; struct fscrypt_get_policy_ex_arg { __u64 policy_size; union { __u8 version; struct fscrypt_policy_v1 v1; struct fscrypt_policy_v2 v2; } policy; }; struct fscrypt_dummy_policy { const union fscrypt_policy *policy; }; enum hash_algo { HASH_ALGO_MD4 = 0, HASH_ALGO_MD5 = 1, HASH_ALGO_SHA1 = 2, HASH_ALGO_RIPE_MD_160 = 3, HASH_ALGO_SHA256 = 4, HASH_ALGO_SHA384 = 5, HASH_ALGO_SHA512 = 6, HASH_ALGO_SHA224 = 7, HASH_ALGO_RIPE_MD_128 = 8, HASH_ALGO_RIPE_MD_256 = 9, HASH_ALGO_RIPE_MD_320 = 10, HASH_ALGO_WP_256 = 11, HASH_ALGO_WP_384 = 12, HASH_ALGO_WP_512 = 13, HASH_ALGO_TGR_128 = 14, HASH_ALGO_TGR_160 = 15, HASH_ALGO_TGR_192 = 16, HASH_ALGO_SM3_256 = 17, HASH_ALGO_STREEBOG_256 = 18, HASH_ALGO_STREEBOG_512 = 19, HASH_ALGO__LAST = 20, }; struct fsverity_hash_alg; struct merkle_tree_params { const struct fsverity_hash_alg *hash_alg; const u8 *hashstate; unsigned int digest_size; unsigned int block_size; unsigned int hashes_per_block; unsigned int blocks_per_page; u8 log_digestsize; u8 log_blocksize; u8 log_arity; u8 log_blocks_per_page; unsigned int num_levels; u64 tree_size; unsigned long tree_pages; unsigned long level_start[8]; }; struct fsverity_info { struct merkle_tree_params tree_params; u8 root_hash[64]; u8 file_digest[64]; const struct inode *inode; unsigned long *hash_block_verified; }; struct fsverity_hash_alg { struct crypto_shash *tfm; const char *name; unsigned int digest_size; unsigned int block_size; enum hash_algo algo_id; int mb_max_msgs; }; struct block_buffer { u32 filled; bool is_root_hash; u8 *data; }; struct fsverity_descriptor { __u8 version; __u8 hash_algorithm; __u8 log_blocksize; __u8 salt_size; __le32 sig_size; __le64 data_size; __u8 root_hash[64]; __u8 salt[32]; __u8 __reserved[144]; __u8 signature[0]; }; struct fsverity_enable_arg { __u32 version; __u32 hash_algorithm; __u32 block_size; __u32 salt_size; __u64 salt_ptr; __u32 sig_size; __u32 __reserved1; __u64 sig_ptr; __u64 __reserved2[11]; }; struct fsverity_digest { __u16 digest_algorithm; __u16 digest_size; __u8 digest[0]; }; struct fsverity_read_metadata_arg { __u64 metadata_type; __u64 offset; __u64 length; __u64 buf_ptr; __u64 __reserved; }; struct fsverity_pending_block { const void *data; u64 pos; u8 real_hash[64]; }; struct fsverity_verification_context { struct inode *inode; struct fsverity_info *vi; unsigned long max_ra_pages; int num_pending; struct fsverity_pending_block pending_blocks[2]; }; typedef void (*btf_trace_locks_get_lock_context)(void *, struct inode *, int, struct file_lock_context *); typedef void (*btf_trace_posix_lock_inode)(void *, struct inode *, struct file_lock *, int); typedef void (*btf_trace_fcntl_setlk)(void *, struct inode *, struct file_lock *, int); typedef void (*btf_trace_locks_remove_posix)(void *, struct inode *, struct file_lock *, int); typedef void (*btf_trace_flock_lock_inode)(void *, struct inode *, struct file_lock *, int); typedef void (*btf_trace_break_lease_noblock)(void *, struct inode *, struct file_lock *); typedef void (*btf_trace_break_lease_block)(void *, struct inode *, struct file_lock *); typedef void (*btf_trace_break_lease_unblock)(void *, struct inode *, struct file_lock *); typedef void (*btf_trace_generic_delete_lease)(void *, struct inode *, struct file_lock *); typedef void (*btf_trace_time_out_leases)(void *, struct inode *, struct file_lock *); typedef void (*btf_trace_generic_add_lease)(void *, struct inode *, struct file_lock *); typedef void (*btf_trace_leases_conflict)(void *, bool, struct file_lock *, struct file_lock *); struct file_lock_list_struct { spinlock_t lock; struct hlist_head hlist; }; struct trace_event_raw_locks_get_lock_context { struct trace_entry ent; unsigned long i_ino; dev_t s_dev; unsigned char type; struct file_lock_context *ctx; char __data[0]; }; struct trace_event_raw_filelock_lock { struct trace_entry ent; struct file_lock *fl; unsigned long i_ino; dev_t s_dev; struct file_lock *fl_blocker; fl_owner_t fl_owner; unsigned int fl_pid; unsigned int fl_flags; unsigned char fl_type; loff_t fl_start; loff_t fl_end; int ret; char __data[0]; }; struct trace_event_raw_filelock_lease { struct trace_entry ent; struct file_lock *fl; unsigned long i_ino; dev_t s_dev; struct file_lock *fl_blocker; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned long fl_break_time; unsigned long fl_downgrade_time; char __data[0]; }; struct trace_event_raw_generic_add_lease { struct trace_entry ent; unsigned long i_ino; int wcount; int rcount; int icount; dev_t s_dev; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; char __data[0]; }; struct trace_event_raw_leases_conflict { struct trace_entry ent; void *lease; void *breaker; unsigned int l_fl_flags; unsigned int b_fl_flags; unsigned char l_fl_type; unsigned char b_fl_type; bool conflict; char __data[0]; }; struct flock64 { short l_type; short l_whence; __kernel_loff_t l_start; __kernel_loff_t l_len; __kernel_pid_t l_pid; }; struct trace_event_data_offsets_locks_get_lock_context {}; struct trace_event_data_offsets_filelock_lock {}; struct trace_event_data_offsets_filelock_lease {}; struct trace_event_data_offsets_generic_add_lease {}; struct trace_event_data_offsets_leases_conflict {}; struct locks_iterator { int li_cpu; loff_t li_pos; }; enum { Enabled = 0, Magic = 1, }; typedef struct { struct list_head list; unsigned long flags; int offset; int size; char *magic; char *mask; const char *interpreter; char *name; struct dentry *dentry; struct file *interp_file; } Node; struct gnu_property { u32 pr_type; u32 pr_datasz; }; struct elf64_note { Elf64_Word n_namesz; Elf64_Word n_descsz; Elf64_Word n_type; }; struct memelfnote { const char *name; int type; unsigned int datasz; void *data; }; struct elf_thread_core_info; struct elf_note_info { struct elf_thread_core_info *thread; struct memelfnote psinfo; struct memelfnote signote; struct memelfnote auxv; struct memelfnote files; siginfo_t csigdata; size_t size; int thread_notes; }; struct elf_siginfo { int si_signo; int si_code; int si_errno; }; struct elf_prstatus_common { struct elf_siginfo pr_info; short pr_cursig; unsigned long pr_sigpend; unsigned long pr_sighold; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct __kernel_old_timeval pr_utime; struct __kernel_old_timeval pr_stime; struct __kernel_old_timeval pr_cutime; struct __kernel_old_timeval pr_cstime; }; typedef unsigned long elf_greg_t; typedef elf_greg_t elf_gregset_t[34]; struct elf_prstatus { struct elf_prstatus_common common; elf_gregset_t pr_reg; int pr_fpvalid; }; struct elf_thread_core_info { struct elf_thread_core_info *next; struct task_struct *task; struct elf_prstatus prstatus; struct memelfnote notes[0]; }; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; struct elf_prpsinfo { char pr_state; char pr_sname; char pr_zomb; char pr_nice; unsigned long pr_flag; __kernel_uid_t pr_uid; __kernel_gid_t pr_gid; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; char pr_fname[16]; char pr_psargs[80]; }; struct elf32_phdr { Elf32_Word p_type; Elf32_Off p_offset; Elf32_Addr p_vaddr; Elf32_Addr p_paddr; Elf32_Word p_filesz; Elf32_Word p_memsz; Elf32_Word p_flags; Elf32_Word p_align; }; struct elf_thread_core_info___2; struct elf_note_info___2 { struct elf_thread_core_info___2 *thread; struct memelfnote psinfo; struct memelfnote signote; struct memelfnote auxv; struct memelfnote files; compat_siginfo_t csigdata; size_t size; int thread_notes; }; struct compat_elf_siginfo { compat_int_t si_signo; compat_int_t si_code; compat_int_t si_errno; }; struct compat_elf_prstatus_common { struct compat_elf_siginfo pr_info; short pr_cursig; compat_ulong_t pr_sigpend; compat_ulong_t pr_sighold; compat_pid_t pr_pid; compat_pid_t pr_ppid; compat_pid_t pr_pgrp; compat_pid_t pr_sid; struct old_timeval32 pr_utime; struct old_timeval32 pr_stime; struct old_timeval32 pr_cutime; struct old_timeval32 pr_cstime; }; typedef unsigned int compat_elf_greg_t; typedef compat_elf_greg_t compat_elf_gregset_t[18]; struct compat_elf_prstatus { struct compat_elf_prstatus_common common; compat_elf_gregset_t pr_reg; compat_int_t pr_fpvalid; }; struct elf_thread_core_info___2 { struct elf_thread_core_info___2 *next; struct task_struct *task; struct compat_elf_prstatus prstatus; struct memelfnote notes[0]; }; typedef u16 __compat_uid_t; typedef u16 __compat_gid_t; struct compat_elf_prpsinfo { char pr_state; char pr_sname; char pr_zomb; char pr_nice; compat_ulong_t pr_flag; __compat_uid_t pr_uid; __compat_gid_t pr_gid; compat_pid_t pr_pid; compat_pid_t pr_ppid; compat_pid_t pr_pgrp; compat_pid_t pr_sid; char pr_fname[16]; char pr_psargs[80]; }; struct elf32_shdr { Elf32_Word sh_name; Elf32_Word sh_type; Elf32_Word sh_flags; Elf32_Addr sh_addr; Elf32_Off sh_offset; Elf32_Word sh_size; Elf32_Word sh_link; Elf32_Word sh_info; Elf32_Word sh_addralign; Elf32_Word sh_entsize; }; enum { MBE_REFERENCED_B = 0, MBE_REUSABLE_B = 1, }; struct mb_cache_entry { struct list_head e_list; struct hlist_bl_node e_hash_list; atomic_t e_refcnt; u32 e_key; unsigned long e_flags; u64 e_value; }; struct mb_cache { struct hlist_bl_head *c_hash; int c_bucket_bits; unsigned long c_max_entries; spinlock_t c_list_lock; struct list_head c_list; unsigned long c_entry_count; struct shrinker c_shrink; struct work_struct c_shrink_work; }; struct posix_acl_xattr_header { __le32 a_version; }; struct posix_acl_xattr_entry { __le16 e_tag; __le16 e_perm; __le32 e_id; }; struct core_name { char *corename; int used; int size; }; typedef void (*btf_trace_iomap_readpage)(void *, struct inode *, int); typedef void (*btf_trace_iomap_readahead)(void *, struct inode *, int); typedef void (*btf_trace_iomap_writepage)(void *, struct inode *, loff_t, u64); typedef void (*btf_trace_iomap_release_folio)(void *, struct inode *, loff_t, u64); typedef void (*btf_trace_iomap_invalidate_folio)(void *, struct inode *, loff_t, u64); typedef void (*btf_trace_iomap_dio_invalidate_fail)(void *, struct inode *, loff_t, u64); typedef void (*btf_trace_iomap_dio_rw_queued)(void *, struct inode *, loff_t, u64); typedef void (*btf_trace_iomap_iter_dstmap)(void *, struct inode *, struct iomap *); typedef void (*btf_trace_iomap_iter_srcmap)(void *, struct inode *, struct iomap *); typedef void (*btf_trace_iomap_writepage_map)(void *, struct inode *, struct iomap *); typedef void (*btf_trace_iomap_iter)(void *, struct iomap_iter *, const void *, unsigned long); typedef void (*btf_trace_iomap_dio_rw_begin)(void *, struct kiocb *, struct iov_iter *, unsigned int, size_t); typedef void (*btf_trace_iomap_dio_complete)(void *, struct kiocb *, int, ssize_t); struct trace_event_raw_iomap_readpage_class { struct trace_entry ent; dev_t dev; u64 ino; int nr_pages; char __data[0]; }; struct trace_event_raw_iomap_range_class { struct trace_entry ent; dev_t dev; u64 ino; loff_t size; loff_t offset; u64 length; char __data[0]; }; struct trace_event_raw_iomap_class { struct trace_entry ent; dev_t dev; u64 ino; u64 addr; loff_t offset; u64 length; u16 type; u16 flags; dev_t bdev; char __data[0]; }; struct trace_event_raw_iomap_iter { struct trace_entry ent; dev_t dev; u64 ino; loff_t pos; u64 length; unsigned int flags; const void *ops; unsigned long caller; char __data[0]; }; struct trace_event_raw_iomap_dio_rw_begin { struct trace_entry ent; dev_t dev; ino_t ino; loff_t isize; loff_t pos; size_t count; size_t done_before; int ki_flags; unsigned int dio_flags; bool aio; char __data[0]; }; struct trace_event_raw_iomap_dio_complete { struct trace_entry ent; dev_t dev; ino_t ino; loff_t isize; loff_t pos; int ki_flags; bool aio; int error; ssize_t ret; char __data[0]; }; struct trace_event_data_offsets_iomap_readpage_class {}; struct trace_event_data_offsets_iomap_range_class {}; struct trace_event_data_offsets_iomap_class {}; struct trace_event_data_offsets_iomap_iter {}; struct trace_event_data_offsets_iomap_dio_rw_begin {}; struct trace_event_data_offsets_iomap_dio_complete {}; enum { BIOSET_NEED_BVECS = 1, BIOSET_NEED_RESCUER = 2, BIOSET_PERCPU_CACHE = 4, }; struct iomap_ioend { struct list_head io_list; u16 io_type; u16 io_flags; u32 io_folios; struct inode *io_inode; size_t io_size; loff_t io_offset; sector_t io_sector; struct bio *io_bio; struct bio io_inline_bio; }; struct iomap_readpage_ctx { struct folio *cur_folio; bool cur_folio_in_bio; struct bio *bio; struct readahead_control *rac; }; struct iomap_folio_state { atomic_t read_bytes_pending; atomic_t write_bytes_pending; spinlock_t state_lock; unsigned long state[0]; }; typedef int (*iomap_punch_t)(struct inode *, loff_t, loff_t); struct iomap_writeback_ops; struct iomap_writepage_ctx { struct iomap iomap; struct iomap_ioend *ioend; const struct iomap_writeback_ops *ops; }; struct iomap_writeback_ops { int (*map_blocks)(struct iomap_writepage_ctx *, struct inode *, loff_t); int (*prepare_ioend)(struct iomap_ioend *, int); void (*discard_folio)(struct folio *, loff_t); }; struct iomap_dio_ops; struct iomap_dio { struct kiocb *iocb; const struct iomap_dio_ops *dops; loff_t i_size; loff_t size; atomic_t ref; unsigned int flags; int error; size_t done_before; bool wait_for_completion; union { struct { struct iov_iter *iter; struct task_struct *waiter; } submit; struct { struct work_struct work; } aio; }; }; struct iomap_dio_ops { int (*end_io)(struct kiocb *, ssize_t, int, unsigned int); void (*submit_io)(const struct iomap_iter *, struct bio *, loff_t); struct bio_set *bio_set; }; struct iomap_swapfile_info { struct iomap iomap; struct swap_info_struct *sis; uint64_t lowest_ppage; uint64_t highest_ppage; unsigned long nr_pages; int nr_extents; struct file *file; }; struct dqstats { unsigned long stat[8]; struct percpu_counter counter[8]; }; struct quota_module_name { int qm_fmt_id; char *qm_mod_name; }; enum { DQF_INFO_DIRTY_B = 17, }; enum { DQST_LOOKUPS = 0, DQST_DROPS = 1, DQST_READS = 2, DQST_WRITES = 3, DQST_CACHE_HITS = 4, DQST_ALLOC_DQUOTS = 5, DQST_FREE_DQUOTS = 6, DQST_SYNCS = 7, _DQST_DQSTAT_LAST = 8, }; enum { DQF_ROOT_SQUASH_B = 0, DQF_SYS_FILE_B = 16, DQF_PRIVATE = 17, }; enum { QIF_BLIMITS_B = 0, QIF_SPACE_B = 1, QIF_ILIMITS_B = 2, QIF_INODES_B = 3, QIF_BTIME_B = 4, QIF_ITIME_B = 5, }; typedef __kernel_uid32_t qid_t; struct dquot_warn { struct super_block *w_sb; struct kqid w_dq_id; short w_type; }; struct qtree_fmt_operations { void (*mem2disk_dqblk)(void *, struct dquot *); void (*disk2mem_dqblk)(struct dquot *, void *); int (*is_id)(void *, struct dquot *); }; struct v2_disk_dqheader { __le32 dqh_magic; __le32 dqh_version; }; struct qtree_mem_dqinfo { struct super_block *dqi_sb; int dqi_type; unsigned int dqi_blocks; unsigned int dqi_free_blk; unsigned int dqi_free_entry; unsigned int dqi_blocksize_bits; unsigned int dqi_entry_size; unsigned int dqi_usable_bs; unsigned int dqi_qtree_depth; const struct qtree_fmt_operations *dqi_ops; }; struct v2_disk_dqinfo { __le32 dqi_bgrace; __le32 dqi_igrace; __le32 dqi_flags; __le32 dqi_blocks; __le32 dqi_free_blk; __le32 dqi_free_entry; }; struct v2r0_disk_dqblk { __le32 dqb_id; __le32 dqb_ihardlimit; __le32 dqb_isoftlimit; __le32 dqb_curinodes; __le32 dqb_bhardlimit; __le32 dqb_bsoftlimit; __le64 dqb_curspace; __le64 dqb_btime; __le64 dqb_itime; }; struct v2r1_disk_dqblk { __le32 dqb_id; __le32 dqb_pad; __le64 dqb_ihardlimit; __le64 dqb_isoftlimit; __le64 dqb_curinodes; __le64 dqb_bhardlimit; __le64 dqb_bsoftlimit; __le64 dqb_curspace; __le64 dqb_btime; __le64 dqb_itime; }; struct qt_disk_dqdbheader { __le32 dqdh_next_free; __le32 dqdh_prev_free; __le16 dqdh_entries; __le16 dqdh_pad1; __le32 dqdh_pad2; }; struct if_dqblk { __u64 dqb_bhardlimit; __u64 dqb_bsoftlimit; __u64 dqb_curspace; __u64 dqb_ihardlimit; __u64 dqb_isoftlimit; __u64 dqb_curinodes; __u64 dqb_btime; __u64 dqb_itime; __u32 dqb_valid; }; struct fs_qfilestat { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; }; typedef struct fs_qfilestat fs_qfilestat_t; struct fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; fs_qfilestat_t qs_uquota; fs_qfilestat_t qs_gquota; __u32 qs_incoredqs; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; }; struct fs_qfilestatv { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; __u32 qfs_pad; }; struct fs_quota_statv { __s8 qs_version; __u8 qs_pad1; __u16 qs_flags; __u32 qs_incoredqs; struct fs_qfilestatv qs_uquota; struct fs_qfilestatv qs_gquota; struct fs_qfilestatv qs_pquota; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; __u16 qs_rtbwarnlimit; __u16 qs_pad3; __u32 qs_pad4; __u64 qs_pad2[7]; }; struct fs_disk_quota { __s8 d_version; __s8 d_flags; __u16 d_fieldmask; __u32 d_id; __u64 d_blk_hardlimit; __u64 d_blk_softlimit; __u64 d_ino_hardlimit; __u64 d_ino_softlimit; __u64 d_bcount; __u64 d_icount; __s32 d_itimer; __s32 d_btimer; __u16 d_iwarns; __u16 d_bwarns; __s8 d_itimer_hi; __s8 d_btimer_hi; __s8 d_rtbtimer_hi; __s8 d_padding2; __u64 d_rtb_hardlimit; __u64 d_rtb_softlimit; __u64 d_rtbcount; __s32 d_rtbtimer; __u16 d_rtbwarns; __s16 d_padding3; char d_padding4[8]; }; struct if_dqinfo { __u64 dqi_bgrace; __u64 dqi_igrace; __u32 dqi_flags; __u32 dqi_valid; }; struct if_nextdqblk { __u64 dqb_bhardlimit; __u64 dqb_bsoftlimit; __u64 dqb_curspace; __u64 dqb_ihardlimit; __u64 dqb_isoftlimit; __u64 dqb_curinodes; __u64 dqb_btime; __u64 dqb_itime; __u32 dqb_valid; __u32 dqb_id; }; enum clear_refs_types { CLEAR_REFS_ALL = 1, CLEAR_REFS_ANON = 2, CLEAR_REFS_MAPPED = 3, CLEAR_REFS_SOFT_DIRTY = 4, CLEAR_REFS_MM_HIWATER_RSS = 5, CLEAR_REFS_LAST = 6, }; struct proc_maps_private { struct inode *inode; struct task_struct *task; struct mm_struct *mm; struct vma_iterator iter; }; struct mem_size_stats { unsigned long resident; unsigned long shared_clean; unsigned long shared_dirty; unsigned long private_clean; unsigned long private_dirty; unsigned long referenced; unsigned long anonymous; unsigned long lazyfree; unsigned long anonymous_thp; unsigned long shmem_thp; unsigned long file_thp; unsigned long swap; unsigned long swap_shared; unsigned long writeback; unsigned long same; unsigned long huge; unsigned long shared_hugetlb; unsigned long private_hugetlb; unsigned long ksm; u64 pss; u64 pss_anon; u64 pss_file; u64 pss_shmem; u64 pss_dirty; u64 pss_locked; u64 swap_pss; }; typedef struct { u64 pme; } pagemap_entry_t; struct pagemapread { int pos; int len; pagemap_entry_t *buffer; bool show_pfn; }; struct clear_refs_private { enum clear_refs_types type; }; enum { BIAS = 2147483648, }; enum { PROC_ENTRY_PERMANENT = 1, }; struct pde_opener { struct list_head lh; struct file *file; bool closing; struct completion *c; }; enum proc_param { Opt_gid___2 = 0, Opt_hidepid = 1, Opt_subset = 2, }; struct proc_fs_context { struct pid_namespace *pid_ns; unsigned int mask; enum proc_hidepid hidepid; int gid; enum proc_pidonly pidonly; }; struct pid_entry { const char *name; unsigned int len; umode_t mode; const struct inode_operations *iop; const struct file_operations *fop; union proc_op op; }; struct limit_names { const char *name; const char *unit; }; struct map_files_info { unsigned long start; unsigned long end; fmode_t mode; }; struct syscall_info { __u64 sp; struct seccomp_data data; }; struct genradix_root; struct __genradix { struct genradix_root *root; }; struct tgid_iter { unsigned int tgid; struct task_struct *task; }; typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *); struct fd_data { fmode_t mode; unsigned int fd; }; struct sysctl_alias { const char *kernel_param; const char *sysctl_param; }; struct seq_net_private { struct net *net; netns_tracker ns_tracker; }; struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct idr ino_idr; u32 last_id_lowbits; u32 id_highbits; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; struct rw_semaphore kernfs_rwsem; struct rw_semaphore kernfs_iattr_rwsem; struct rw_semaphore kernfs_supers_rwsem; struct callback_head rcu; }; struct kernfs_iattrs { kuid_t ia_uid; kgid_t ia_gid; struct timespec64 ia_atime; struct timespec64 ia_mtime; struct timespec64 ia_ctime; struct simple_xattrs xattrs; atomic_t nr_user_xattrs; atomic_t user_xattr_size; }; struct kernfs_global_locks { struct mutex open_file_mutex[1024]; }; struct kernfs_super_info { struct super_block *sb; struct kernfs_root *root; const void *ns; struct list_head node; }; enum kernfs_node_flag { KERNFS_ACTIVATED = 16, KERNFS_NS = 32, KERNFS_HAS_SEQ_SHOW = 64, KERNFS_HAS_MMAP = 128, KERNFS_LOCKDEP = 256, KERNFS_HIDDEN = 512, KERNFS_SUICIDAL = 1024, KERNFS_SUICIDED = 2048, KERNFS_EMPTY_DIR = 4096, KERNFS_HAS_RELEASE = 8192, KERNFS_REMOVING = 16384, }; struct configfs_fragment; struct configfs_dirent { atomic_t s_count; int s_dependent_count; struct list_head s_sibling; struct list_head s_children; int s_links; void *s_element; int s_type; umode_t s_mode; struct dentry *s_dentry; struct iattr *s_iattr; struct configfs_fragment *s_frag; }; struct configfs_fragment { atomic_t frag_count; struct rw_semaphore frag_sem; bool frag_dead; }; struct config_item; struct configfs_attribute { const char *ca_name; struct module *ca_owner; umode_t ca_mode; ssize_t (*show)(struct config_item *, char *); ssize_t (*store)(struct config_item *, const char *, size_t); }; struct config_group; struct config_item_type; struct config_item { char *ci_name; char ci_namebuf[20]; struct kref ci_kref; struct list_head ci_entry; struct config_item *ci_parent; struct config_group *ci_group; const struct config_item_type *ci_type; struct dentry *ci_dentry; }; struct configfs_subsystem; struct config_group { struct config_item cg_item; struct list_head cg_children; struct configfs_subsystem *cg_subsys; struct list_head default_groups; struct list_head group_entry; }; struct configfs_subsystem { struct config_group su_group; struct mutex su_mutex; }; struct configfs_item_operations; struct configfs_group_operations; struct configfs_bin_attribute; struct config_item_type { struct module *ct_owner; struct configfs_item_operations *ct_item_ops; struct configfs_group_operations *ct_group_ops; struct configfs_attribute **ct_attrs; struct configfs_bin_attribute **ct_bin_attrs; }; struct configfs_item_operations { void (*release)(struct config_item *); int (*allow_link)(struct config_item *, struct config_item *); void (*drop_link)(struct config_item *, struct config_item *); }; struct configfs_group_operations { struct config_item * (*make_item)(struct config_group *, const char *); struct config_group * (*make_group)(struct config_group *, const char *); void (*disconnect_notify)(struct config_group *, struct config_item *); void (*drop_item)(struct config_group *, struct config_item *); }; struct configfs_bin_attribute { struct configfs_attribute cb_attr; void *cb_private; size_t cb_max_size; ssize_t (*read)(struct config_item *, void *, size_t); ssize_t (*write)(struct config_item *, const void *, size_t); }; struct configfs_buffer { size_t count; loff_t pos; char *page; struct configfs_item_operations *ops; struct mutex mutex; int needs_read_fill; bool read_in_progress; bool write_in_progress; char *bin_buffer; int bin_buffer_size; int cb_max_size; struct config_item *item; struct module *owner; union { struct configfs_attribute *attr; struct configfs_bin_attribute *bin_attr; }; }; enum { Opt_uid___2 = 0, Opt_gid___3 = 1, Opt_mode___2 = 2, Opt_ptmxmode = 3, Opt_newinstance = 4, Opt_max = 5, Opt_err = 6, }; struct pts_mount_opts { int setuid; int setgid; kuid_t uid; kgid_t gid; umode_t mode; umode_t ptmxmode; int reserve; int max; }; struct pts_fs_info { struct ida allocated_ptys; struct pts_mount_opts mount_opts; struct super_block *sb; struct dentry *ptmx_dentry; }; typedef unsigned int tid_t; struct transaction_chp_stats_s { unsigned long cs_chp_time; __u32 cs_forced_to_close; __u32 cs_written; __u32 cs_dropped; }; struct journal_s; typedef struct journal_s journal_t; struct journal_head; struct transaction_s; typedef struct transaction_s transaction_t; struct transaction_s { journal_t *t_journal; tid_t t_tid; enum { T_RUNNING = 0, T_LOCKED = 1, T_SWITCH = 2, T_FLUSH = 3, T_COMMIT = 4, T_COMMIT_DFLUSH = 5, T_COMMIT_JFLUSH = 6, T_COMMIT_CALLBACK = 7, T_FINISHED = 8, } t_state; unsigned long t_log_start; int t_nr_buffers; struct journal_head *t_reserved_list; struct journal_head *t_buffers; struct journal_head *t_forget; struct journal_head *t_checkpoint_list; struct journal_head *t_shadow_list; struct list_head t_inode_list; unsigned long t_max_wait; unsigned long t_start; unsigned long t_requested; struct transaction_chp_stats_s t_chp_stats; atomic_t t_updates; atomic_t t_outstanding_credits; atomic_t t_outstanding_revokes; atomic_t t_handle_count; transaction_t *t_cpnext; transaction_t *t_cpprev; unsigned long t_expires; ktime_t t_start_time; unsigned int t_synchronous_commit: 1; int t_need_data_flush; struct list_head t_private_list; }; struct transaction_run_stats_s { unsigned long rs_wait; unsigned long rs_request_delay; unsigned long rs_running; unsigned long rs_locked; unsigned long rs_flushing; unsigned long rs_logging; __u32 rs_handle_count; __u32 rs_blocks; __u32 rs_blocks_logged; }; struct transaction_stats_s { unsigned long ts_tid; unsigned long ts_requested; struct transaction_run_stats_s run; }; enum passtype { PASS_SCAN = 0, PASS_REVOKE = 1, PASS_REPLAY = 2, }; struct journal_superblock_s; typedef struct journal_superblock_s journal_superblock_t; struct jbd2_revoke_table_s; struct jbd2_inode; struct journal_s { unsigned long j_flags; unsigned long j_atomic_flags; int j_errno; struct mutex j_abort_mutex; struct buffer_head *j_sb_buffer; journal_superblock_t *j_superblock; rwlock_t j_state_lock; int j_barrier_count; struct mutex j_barrier; transaction_t *j_running_transaction; transaction_t *j_committing_transaction; transaction_t *j_checkpoint_transactions; wait_queue_head_t j_wait_transaction_locked; wait_queue_head_t j_wait_done_commit; wait_queue_head_t j_wait_commit; wait_queue_head_t j_wait_updates; wait_queue_head_t j_wait_reserved; wait_queue_head_t j_fc_wait; struct mutex j_checkpoint_mutex; struct buffer_head *j_chkpt_bhs[64]; struct shrinker j_shrinker; struct percpu_counter j_checkpoint_jh_count; transaction_t *j_shrink_transaction; unsigned long j_head; unsigned long j_tail; unsigned long j_free; unsigned long j_first; unsigned long j_last; unsigned long j_fc_first; unsigned long j_fc_off; unsigned long j_fc_last; struct block_device *j_dev; int j_blocksize; unsigned long long j_blk_offset; char j_devname[56]; struct block_device *j_fs_dev; unsigned int j_total_len; atomic_t j_reserved_credits; spinlock_t j_list_lock; struct inode *j_inode; tid_t j_tail_sequence; tid_t j_transaction_sequence; tid_t j_commit_sequence; tid_t j_commit_request; __u8 j_uuid[16]; struct task_struct *j_task; int j_max_transaction_buffers; int j_revoke_records_per_block; unsigned long j_commit_interval; struct timer_list j_commit_timer; spinlock_t j_revoke_lock; struct jbd2_revoke_table_s *j_revoke; struct jbd2_revoke_table_s *j_revoke_table[2]; struct buffer_head **j_wbuf; struct buffer_head **j_fc_wbuf; int j_wbufsize; int j_fc_wbufsize; pid_t j_last_sync_writer; u64 j_average_commit_time; u32 j_min_batch_time; u32 j_max_batch_time; void (*j_commit_callback)(journal_t *, transaction_t *); int (*j_submit_inode_data_buffers)(struct jbd2_inode *); int (*j_finish_inode_data_buffers)(struct jbd2_inode *); spinlock_t j_history_lock; struct proc_dir_entry *j_proc_entry; struct transaction_stats_s j_stats; unsigned int j_failed_commit; void *j_private; struct crypto_shash *j_chksum_driver; __u32 j_csum_seed; void (*j_fc_cleanup_callback)(struct journal_s *, int, tid_t); int (*j_fc_replay_callback)(struct journal_s *, struct buffer_head *, enum passtype, int, tid_t); int (*j_bmap)(struct journal_s *, sector_t *); }; struct journal_header_s { __be32 h_magic; __be32 h_blocktype; __be32 h_sequence; }; typedef struct journal_header_s journal_header_t; struct journal_superblock_s { journal_header_t s_header; __be32 s_blocksize; __be32 s_maxlen; __be32 s_first; __be32 s_sequence; __be32 s_start; __be32 s_errno; __be32 s_feature_compat; __be32 s_feature_incompat; __be32 s_feature_ro_compat; __u8 s_uuid[16]; __be32 s_nr_users; __be32 s_dynsuper; __be32 s_max_transaction; __be32 s_max_trans_data; __u8 s_checksum_type; __u8 s_padding2[3]; __be32 s_num_fc_blks; __be32 s_head; __u32 s_padding[40]; __be32 s_checksum; __u8 s_users[768]; }; struct jbd2_inode { transaction_t *i_transaction; transaction_t *i_next_transaction; struct list_head i_list; struct inode *i_vfs_inode; unsigned long i_flags; loff_t i_dirty_start; loff_t i_dirty_end; }; struct jbd2_buffer_trigger_type; struct journal_head { struct buffer_head *b_bh; spinlock_t b_state_lock; int b_jcount; unsigned int b_jlist; unsigned int b_modified; char *b_frozen_data; char *b_committed_data; transaction_t *b_transaction; transaction_t *b_next_transaction; struct journal_head *b_tnext; struct journal_head *b_tprev; transaction_t *b_cp_transaction; struct journal_head *b_cpnext; struct journal_head *b_cpprev; struct jbd2_buffer_trigger_type *b_triggers; struct jbd2_buffer_trigger_type *b_frozen_triggers; }; struct jbd2_buffer_trigger_type { void (*t_frozen)(struct jbd2_buffer_trigger_type *, struct buffer_head *, void *, size_t); void (*t_abort)(struct jbd2_buffer_trigger_type *, struct buffer_head *); }; enum ext4_li_mode { EXT4_LI_MODE_PREFETCH_BBITMAP = 0, EXT4_LI_MODE_ITABLE = 1, }; enum jbd_state_bits { BH_JBD = 16, BH_JWrite = 17, BH_Freed = 18, BH_Revoked = 19, BH_RevokeValid = 20, BH_JBDDirty = 21, BH_JournalHead = 22, BH_Shadow = 23, BH_Verified = 24, BH_JBDPrivateStart = 25, }; struct ext4_group_desc { __le32 bg_block_bitmap_lo; __le32 bg_inode_bitmap_lo; __le32 bg_inode_table_lo; __le16 bg_free_blocks_count_lo; __le16 bg_free_inodes_count_lo; __le16 bg_used_dirs_count_lo; __le16 bg_flags; __le32 bg_exclude_bitmap_lo; __le16 bg_block_bitmap_csum_lo; __le16 bg_inode_bitmap_csum_lo; __le16 bg_itable_unused_lo; __le16 bg_checksum; __le32 bg_block_bitmap_hi; __le32 bg_inode_bitmap_hi; __le32 bg_inode_table_hi; __le16 bg_free_blocks_count_hi; __le16 bg_free_inodes_count_hi; __le16 bg_used_dirs_count_hi; __le16 bg_itable_unused_hi; __le32 bg_exclude_bitmap_hi; __le16 bg_block_bitmap_csum_hi; __le16 bg_inode_bitmap_csum_hi; __u32 bg_reserved; }; typedef int ext4_grpblk_t; typedef unsigned int ext4_group_t; struct ext4_group_info { unsigned long bb_state; struct rb_root bb_free_root; ext4_grpblk_t bb_first_free; ext4_grpblk_t bb_free; ext4_grpblk_t bb_fragments; int bb_avg_fragment_size_order; ext4_grpblk_t bb_largest_free_order; ext4_group_t bb_group; struct list_head bb_prealloc_list; struct rw_semaphore alloc_sem; struct list_head bb_avg_fragment_size_node; struct list_head bb_largest_free_order_node; ext4_grpblk_t bb_counters[0]; }; typedef unsigned long long ext4_fsblk_t; typedef __u32 ext4_lblk_t; struct extent_status; struct ext4_es_tree { struct rb_root root; struct extent_status *cache_es; }; struct ext4_pending_tree { struct rb_root root; }; struct ext4_inode_info { __le32 i_data[15]; __u32 i_dtime; ext4_fsblk_t i_file_acl; ext4_group_t i_block_group; ext4_lblk_t i_dir_start_lookup; unsigned long i_flags; struct rw_semaphore xattr_sem; union { struct list_head i_orphan; unsigned int i_orphan_idx; }; struct list_head i_fc_dilist; struct list_head i_fc_list; ext4_lblk_t i_fc_lblk_start; ext4_lblk_t i_fc_lblk_len; atomic_t i_fc_updates; wait_queue_head_t i_fc_wait; struct mutex i_fc_lock; loff_t i_disksize; struct rw_semaphore i_data_sem; struct inode vfs_inode; struct jbd2_inode *jinode; spinlock_t i_raw_lock; struct timespec64 i_crtime; atomic_t i_prealloc_active; struct rb_root i_prealloc_node; rwlock_t i_prealloc_lock; struct ext4_es_tree i_es_tree; rwlock_t i_es_lock; struct list_head i_es_list; unsigned int i_es_all_nr; unsigned int i_es_shk_nr; ext4_lblk_t i_es_shrink_lblk; ext4_group_t i_last_alloc_group; unsigned int i_reserved_data_blocks; struct ext4_pending_tree i_pending_tree; __u16 i_extra_isize; u16 i_inline_off; u16 i_inline_size; qsize_t i_reserved_quota; spinlock_t i_completed_io_lock; struct list_head i_rsv_conversion_list; struct work_struct i_rsv_conversion_work; atomic_t i_unwritten; spinlock_t i_block_reservation_lock; tid_t i_sync_tid; tid_t i_datasync_tid; struct dquot __attribute__((btf_type_tag("rcu"))) *i_dquot[3]; __u32 i_csum_seed; kprojid_t i_projid; }; struct extent_status { struct rb_node rb_node; ext4_lblk_t es_lblk; ext4_lblk_t es_len; ext4_fsblk_t es_pblk; }; struct ext4_orphan_block; struct ext4_orphan_info { int of_blocks; __u32 of_csum_seed; struct ext4_orphan_block *of_binfo; }; struct ext4_es_stats { unsigned long es_stats_shrunk; struct percpu_counter es_stats_cache_hits; struct percpu_counter es_stats_cache_misses; u64 es_stats_scan_time; u64 es_stats_max_scan_time; struct percpu_counter es_stats_all_cnt; struct percpu_counter es_stats_shk_cnt; }; struct ext4_journal_trigger { struct jbd2_buffer_trigger_type tr_triggers; struct super_block *sb; }; struct ext4_fc_stats { unsigned int fc_ineligible_reason_count[10]; unsigned long fc_num_commits; unsigned long fc_ineligible_commits; unsigned long fc_failed_commits; unsigned long fc_skipped_commits; unsigned long fc_numblks; u64 s_fc_avg_commit_time; }; struct ext4_fc_alloc_region; struct ext4_fc_replay_state { int fc_replay_num_tags; int fc_replay_expected_off; int fc_current_pass; int fc_cur_tag; int fc_crc; struct ext4_fc_alloc_region *fc_regions; int fc_regions_size; int fc_regions_used; int fc_regions_valid; int *fc_modified_inodes; int fc_modified_inodes_used; int fc_modified_inodes_size; }; struct ext4_super_block; struct blockgroup_lock; struct ext4_system_blocks; struct ext4_locality_group; struct flex_groups; struct ext4_li_request; struct ext4_sb_info { unsigned long s_desc_size; unsigned long s_inodes_per_block; unsigned long s_blocks_per_group; unsigned long s_clusters_per_group; unsigned long s_inodes_per_group; unsigned long s_itb_per_group; unsigned long s_gdb_count; unsigned long s_desc_per_block; ext4_group_t s_groups_count; ext4_group_t s_blockfile_groups; unsigned long s_overhead; unsigned int s_cluster_ratio; unsigned int s_cluster_bits; loff_t s_bitmap_maxbytes; struct buffer_head *s_sbh; struct ext4_super_block *s_es; struct buffer_head * __attribute__((btf_type_tag("rcu"))) *s_group_desc; unsigned int s_mount_opt; unsigned int s_mount_opt2; unsigned long s_mount_flags; unsigned int s_def_mount_opt; unsigned int s_def_mount_opt2; ext4_fsblk_t s_sb_block; atomic64_t s_resv_clusters; kuid_t s_resuid; kgid_t s_resgid; unsigned short s_mount_state; unsigned short s_pad; int s_addr_per_block_bits; int s_desc_per_block_bits; int s_inode_size; int s_first_ino; unsigned int s_inode_readahead_blks; unsigned int s_inode_goal; u32 s_hash_seed[4]; int s_def_hash_version; int s_hash_unsigned; struct percpu_counter s_freeclusters_counter; struct percpu_counter s_freeinodes_counter; struct percpu_counter s_dirs_counter; struct percpu_counter s_dirtyclusters_counter; struct percpu_counter s_sra_exceeded_retry_limit; struct blockgroup_lock *s_blockgroup_lock; struct proc_dir_entry *s_proc; struct kobject s_kobj; struct completion s_kobj_unregister; struct super_block *s_sb; struct buffer_head *s_mmp_bh; struct journal_s *s_journal; unsigned long s_ext4_flags; struct mutex s_orphan_lock; struct list_head s_orphan; struct ext4_orphan_info s_orphan_info; unsigned long s_commit_interval; u32 s_max_batch_time; u32 s_min_batch_time; struct block_device *s_journal_bdev; char __attribute__((btf_type_tag("rcu"))) *s_qf_names[3]; int s_jquota_fmt; unsigned int s_want_extra_isize; struct ext4_system_blocks __attribute__((btf_type_tag("rcu"))) *s_system_blks; struct ext4_group_info ** __attribute__((btf_type_tag("rcu"))) *s_group_info; struct inode *s_buddy_cache; spinlock_t s_md_lock; unsigned short *s_mb_offsets; unsigned int *s_mb_maxs; unsigned int s_group_info_size; unsigned int s_mb_free_pending; struct list_head s_freed_data_list; struct list_head s_discard_list; struct work_struct s_discard_work; atomic_t s_retry_alloc_pending; struct list_head *s_mb_avg_fragment_size; rwlock_t *s_mb_avg_fragment_size_locks; struct list_head *s_mb_largest_free_orders; rwlock_t *s_mb_largest_free_orders_locks; unsigned long s_stripe; unsigned int s_mb_max_linear_groups; unsigned int s_mb_stream_request; unsigned int s_mb_max_to_scan; unsigned int s_mb_min_to_scan; unsigned int s_mb_stats; unsigned int s_mb_order2_reqs; unsigned int s_mb_group_prealloc; unsigned int s_max_dir_size_kb; unsigned long s_mb_last_group; unsigned long s_mb_last_start; unsigned int s_mb_prefetch; unsigned int s_mb_prefetch_limit; unsigned int s_mb_best_avail_max_trim_order; atomic_t s_bal_reqs; atomic_t s_bal_success; atomic_t s_bal_allocated; atomic_t s_bal_ex_scanned; atomic_t s_bal_cX_ex_scanned[5]; atomic_t s_bal_groups_scanned; atomic_t s_bal_goals; atomic_t s_bal_len_goals; atomic_t s_bal_breaks; atomic_t s_bal_2orders; atomic_t s_bal_p2_aligned_bad_suggestions; atomic_t s_bal_goal_fast_bad_suggestions; atomic_t s_bal_best_avail_bad_suggestions; atomic64_t s_bal_cX_groups_considered[5]; atomic64_t s_bal_cX_hits[5]; atomic64_t s_bal_cX_failed[5]; atomic_t s_mb_buddies_generated; atomic64_t s_mb_generation_time; atomic_t s_mb_lost_chunks; atomic_t s_mb_preallocated; atomic_t s_mb_discarded; atomic_t s_lock_busy; struct ext4_locality_group __attribute__((btf_type_tag("percpu"))) *s_locality_groups; unsigned long s_sectors_written_start; u64 s_kbytes_written; unsigned int s_extent_max_zeroout_kb; unsigned int s_log_groups_per_flex; struct flex_groups * __attribute__((btf_type_tag("rcu"))) *s_flex_groups; ext4_group_t s_flex_groups_allocated; struct workqueue_struct *rsv_conversion_wq; struct timer_list s_err_report; struct ext4_li_request *s_li_request; unsigned int s_li_wait_mult; struct task_struct *s_mmp_tsk; unsigned long s_last_trim_minblks; struct crypto_shash *s_chksum_driver; __u32 s_csum_seed; struct shrinker s_es_shrinker; struct list_head s_es_list; long s_es_nr_inode; struct ext4_es_stats s_es_stats; struct mb_cache *s_ea_block_cache; struct mb_cache *s_ea_inode_cache; long: 64; long: 64; spinlock_t s_es_lock; struct ext4_journal_trigger s_journal_triggers[1]; struct ratelimit_state s_err_ratelimit_state; struct ratelimit_state s_warning_ratelimit_state; struct ratelimit_state s_msg_ratelimit_state; atomic_t s_warning_count; atomic_t s_msg_count; struct fscrypt_dummy_policy s_dummy_enc_policy; struct percpu_rw_semaphore s_writepages_rwsem; struct dax_device *s_daxdev; u64 s_dax_part_off; errseq_t s_bdev_wb_err; spinlock_t s_bdev_wb_lock; spinlock_t s_error_lock; int s_add_error_count; int s_first_error_code; __u32 s_first_error_line; __u32 s_first_error_ino; __u64 s_first_error_block; const char *s_first_error_func; time64_t s_first_error_time; int s_last_error_code; __u32 s_last_error_line; __u32 s_last_error_ino; __u64 s_last_error_block; const char *s_last_error_func; time64_t s_last_error_time; struct work_struct s_sb_upd_work; atomic_t s_fc_subtid; struct list_head s_fc_q[2]; struct list_head s_fc_dentry_q[2]; unsigned int s_fc_bytes; spinlock_t s_fc_lock; struct buffer_head *s_fc_bh; struct ext4_fc_stats s_fc_stats; tid_t s_fc_ineligible_tid; struct ext4_fc_replay_state s_fc_replay_state; long: 64; long: 64; long: 64; }; struct ext4_super_block { __le32 s_inodes_count; __le32 s_blocks_count_lo; __le32 s_r_blocks_count_lo; __le32 s_free_blocks_count_lo; __le32 s_free_inodes_count; __le32 s_first_data_block; __le32 s_log_block_size; __le32 s_log_cluster_size; __le32 s_blocks_per_group; __le32 s_clusters_per_group; __le32 s_inodes_per_group; __le32 s_mtime; __le32 s_wtime; __le16 s_mnt_count; __le16 s_max_mnt_count; __le16 s_magic; __le16 s_state; __le16 s_errors; __le16 s_minor_rev_level; __le32 s_lastcheck; __le32 s_checkinterval; __le32 s_creator_os; __le32 s_rev_level; __le16 s_def_resuid; __le16 s_def_resgid; __le32 s_first_ino; __le16 s_inode_size; __le16 s_block_group_nr; __le32 s_feature_compat; __le32 s_feature_incompat; __le32 s_feature_ro_compat; __u8 s_uuid[16]; char s_volume_name[16]; char s_last_mounted[64]; __le32 s_algorithm_usage_bitmap; __u8 s_prealloc_blocks; __u8 s_prealloc_dir_blocks; __le16 s_reserved_gdt_blocks; __u8 s_journal_uuid[16]; __le32 s_journal_inum; __le32 s_journal_dev; __le32 s_last_orphan; __le32 s_hash_seed[4]; __u8 s_def_hash_version; __u8 s_jnl_backup_type; __le16 s_desc_size; __le32 s_default_mount_opts; __le32 s_first_meta_bg; __le32 s_mkfs_time; __le32 s_jnl_blocks[17]; __le32 s_blocks_count_hi; __le32 s_r_blocks_count_hi; __le32 s_free_blocks_count_hi; __le16 s_min_extra_isize; __le16 s_want_extra_isize; __le32 s_flags; __le16 s_raid_stride; __le16 s_mmp_update_interval; __le64 s_mmp_block; __le32 s_raid_stripe_width; __u8 s_log_groups_per_flex; __u8 s_checksum_type; __u8 s_encryption_level; __u8 s_reserved_pad; __le64 s_kbytes_written; __le32 s_snapshot_inum; __le32 s_snapshot_id; __le64 s_snapshot_r_blocks_count; __le32 s_snapshot_list; __le32 s_error_count; __le32 s_first_error_time; __le32 s_first_error_ino; __le64 s_first_error_block; __u8 s_first_error_func[32]; __le32 s_first_error_line; __le32 s_last_error_time; __le32 s_last_error_ino; __le32 s_last_error_line; __le64 s_last_error_block; __u8 s_last_error_func[32]; __u8 s_mount_opts[64]; __le32 s_usr_quota_inum; __le32 s_grp_quota_inum; __le32 s_overhead_clusters; __le32 s_backup_bgs[2]; __u8 s_encrypt_algos[4]; __u8 s_encrypt_pw_salt[16]; __le32 s_lpf_ino; __le32 s_prj_quota_inum; __le32 s_checksum_seed; __u8 s_wtime_hi; __u8 s_mtime_hi; __u8 s_mkfs_time_hi; __u8 s_lastcheck_hi; __u8 s_first_error_time_hi; __u8 s_last_error_time_hi; __u8 s_first_error_errcode; __u8 s_last_error_errcode; __le16 s_encoding; __le16 s_encoding_flags; __le32 s_orphan_file_inum; __le32 s_reserved[94]; __le32 s_checksum; }; struct bgl_lock { spinlock_t lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct blockgroup_lock { struct bgl_lock locks[128]; }; struct ext4_orphan_block { atomic_t ob_free_entries; struct buffer_head *ob_bh; }; struct ext4_system_blocks { struct rb_root root; struct callback_head rcu; }; struct ext4_locality_group { struct mutex lg_mutex; struct list_head lg_prealloc_list[10]; spinlock_t lg_prealloc_lock; }; struct flex_groups { atomic64_t free_clusters; atomic_t free_inodes; atomic_t used_dirs; }; struct ext4_li_request { struct super_block *lr_super; enum ext4_li_mode lr_mode; ext4_group_t lr_first_not_zeroed; ext4_group_t lr_next_group; struct list_head lr_request; unsigned long lr_next_sched; unsigned long lr_timeout; }; struct ext4_fc_alloc_region { ext4_lblk_t lblk; ext4_fsblk_t pblk; int ino; int len; }; struct jbd2_journal_handle; typedef struct jbd2_journal_handle handle_t; struct jbd2_journal_handle { union { transaction_t *h_transaction; journal_t *h_journal; }; handle_t *h_rsv_handle; int h_total_credits; int h_revoke_credits; int h_revoke_credits_requested; int h_ref; int h_err; unsigned int h_sync: 1; unsigned int h_jdata: 1; unsigned int h_reserved: 1; unsigned int h_aborted: 1; unsigned int h_type: 8; unsigned int h_line_no: 16; unsigned long h_start_jiffies; unsigned int h_requested_credits; unsigned int saved_alloc_context; }; struct ext4_allocation_request { struct inode *inode; unsigned int len; ext4_lblk_t logical; ext4_lblk_t lleft; ext4_lblk_t lright; ext4_fsblk_t goal; ext4_fsblk_t pleft; ext4_fsblk_t pright; unsigned int flags; }; struct ext4_system_zone { struct rb_node node; ext4_fsblk_t start_blk; unsigned int count; u32 ino; }; struct ext4_map_blocks { ext4_fsblk_t m_pblk; ext4_lblk_t m_lblk; unsigned int m_len; unsigned int m_flags; }; typedef enum { EXT4_IGET_NORMAL = 0, EXT4_IGET_SPECIAL = 1, EXT4_IGET_HANDLE = 2, EXT4_IGET_BAD = 4, EXT4_IGET_EA_INODE = 8, } ext4_iget_flags; enum { EXT4_INODE_SECRM = 0, EXT4_INODE_UNRM = 1, EXT4_INODE_COMPR = 2, EXT4_INODE_SYNC = 3, EXT4_INODE_IMMUTABLE = 4, EXT4_INODE_APPEND = 5, EXT4_INODE_NODUMP = 6, EXT4_INODE_NOATIME = 7, EXT4_INODE_DIRTY = 8, EXT4_INODE_COMPRBLK = 9, EXT4_INODE_NOCOMPR = 10, EXT4_INODE_ENCRYPT = 11, EXT4_INODE_INDEX = 12, EXT4_INODE_IMAGIC = 13, EXT4_INODE_JOURNAL_DATA = 14, EXT4_INODE_NOTAIL = 15, EXT4_INODE_DIRSYNC = 16, EXT4_INODE_TOPDIR = 17, EXT4_INODE_HUGE_FILE = 18, EXT4_INODE_EXTENTS = 19, EXT4_INODE_VERITY = 20, EXT4_INODE_EA_INODE = 21, EXT4_INODE_DAX = 25, EXT4_INODE_INLINE_DATA = 28, EXT4_INODE_PROJINHERIT = 29, EXT4_INODE_CASEFOLD = 30, EXT4_INODE_RESERVED = 31, }; struct fname { __u32 hash; __u32 minor_hash; struct rb_node rb_hash; struct fname *next; __u32 inode; __u8 name_len; __u8 file_type; char name[0]; }; struct ext4_dir_entry_2 { __le32 inode; __le16 rec_len; __u8 name_len; __u8 file_type; char name[255]; }; struct ext4_dir_entry_hash { __le32 hash; __le32 minor_hash; }; struct dir_private_info { struct rb_root root; struct rb_node *curr_node; struct fname *extra_fname; loff_t last_pos; __u32 curr_hash; __u32 curr_minor_hash; __u32 next_hash; }; enum ext4_journal_trigger_type { EXT4_JTR_ORPHAN_FILE = 0, EXT4_JTR_NONE = 1, }; enum { EXT4_STATE_NEW = 0, EXT4_STATE_XATTR = 1, EXT4_STATE_NO_EXPAND = 2, EXT4_STATE_DA_ALLOC_CLOSE = 3, EXT4_STATE_EXT_MIGRATE = 4, EXT4_STATE_NEWENTRY = 5, EXT4_STATE_MAY_INLINE_DATA = 6, EXT4_STATE_EXT_PRECACHED = 7, EXT4_STATE_LUSTRE_EA_INODE = 8, EXT4_STATE_VERITY_IN_PROGRESS = 9, EXT4_STATE_FC_COMMITTING = 10, EXT4_STATE_ORPHAN_FILE = 11, }; struct partial_cluster { ext4_fsblk_t pclu; ext4_lblk_t lblk; enum { initial = 0, tofree = 1, nofree = 2, } state; }; enum { ES_WRITTEN_B = 0, ES_UNWRITTEN_B = 1, ES_DELAYED_B = 2, ES_HOLE_B = 3, ES_REFERENCED_B = 4, ES_FLAGS = 5, }; enum { EXT4_FC_REASON_XATTR = 0, EXT4_FC_REASON_CROSS_RENAME = 1, EXT4_FC_REASON_JOURNAL_FLAG_CHANGE = 2, EXT4_FC_REASON_NOMEM = 3, EXT4_FC_REASON_SWAP_BOOT = 4, EXT4_FC_REASON_RESIZE = 5, EXT4_FC_REASON_RENAME_DIR = 6, EXT4_FC_REASON_FALLOC_RANGE = 7, EXT4_FC_REASON_INODE_JOURNAL_DATA = 8, EXT4_FC_REASON_ENCRYPTED_FILENAME = 9, EXT4_FC_REASON_MAX = 10, }; enum SHIFT_DIRECTION { SHIFT_LEFT = 0, SHIFT_RIGHT = 1, }; struct ext4_extent_idx { __le32 ei_block; __le32 ei_leaf_lo; __le16 ei_leaf_hi; __u16 ei_unused; }; struct ext4_extent { __le32 ee_block; __le16 ee_len; __le16 ee_start_hi; __le32 ee_start_lo; }; struct ext4_io_end_vec { struct list_head list; loff_t offset; ssize_t size; }; struct ext4_extent_tail { __le32 et_checksum; }; struct ext4_extent_header { __le16 eh_magic; __le16 eh_entries; __le16 eh_max; __le16 eh_depth; __le32 eh_generation; }; struct ext4_ext_path { ext4_fsblk_t p_block; __u16 p_depth; __u16 p_maxdepth; struct ext4_extent *p_ext; struct ext4_extent_idx *p_idx; struct ext4_extent_header *p_hdr; struct buffer_head *p_bh; }; struct ext4_iloc { struct buffer_head *bh; unsigned long offset; ext4_group_t block_group; }; struct ext4_io_end { struct list_head list; handle_t *handle; struct inode *inode; struct bio *bio; unsigned int flag; refcount_t count; struct list_head list_vec; }; typedef struct ext4_io_end ext4_io_end_t; struct pending_reservation { struct rb_node rb_node; ext4_lblk_t lclu; }; struct rsvd_count { int ndelonly; bool first_do_lblk_found; ext4_lblk_t first_do_lblk; ext4_lblk_t last_do_lblk; struct extent_status *left_es; bool partial; ext4_lblk_t lclu; }; enum { EXT4_MF_MNTDIR_SAMPLED = 0, EXT4_MF_FC_INELIGIBLE = 1, }; struct ext4_fsmap { struct list_head fmr_list; dev_t fmr_device; uint32_t fmr_flags; uint64_t fmr_physical; uint64_t fmr_owner; uint64_t fmr_length; }; typedef int (*ext4_mballoc_query_range_fn)(struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t, void *); typedef int (*ext4_fsmap_format_t)(struct ext4_fsmap *, void *); struct ext4_fsmap_head; struct ext4_getfsmap_info { struct ext4_fsmap_head *gfi_head; ext4_fsmap_format_t gfi_formatter; void *gfi_format_arg; ext4_fsblk_t gfi_next_fsblk; u32 gfi_dev; ext4_group_t gfi_agno; struct ext4_fsmap gfi_low; struct ext4_fsmap gfi_high; struct ext4_fsmap gfi_lastfree; struct list_head gfi_meta_list; bool gfi_last; }; struct ext4_fsmap_head { uint32_t fmh_iflags; uint32_t fmh_oflags; unsigned int fmh_count; unsigned int fmh_entries; struct ext4_fsmap fmh_keys[2]; }; struct fsmap { __u32 fmr_device; __u32 fmr_flags; __u64 fmr_physical; __u64 fmr_owner; __u64 fmr_offset; __u64 fmr_length; __u64 fmr_reserved[3]; }; struct ext4_getfsmap_dev { int (*gfd_fn)(struct super_block *, struct ext4_fsmap *, struct ext4_getfsmap_info *); u32 gfd_dev; }; struct dx_hash_info { u32 hash; u32 minor_hash; int hash_version; u32 *seed; }; struct ext4_inode { __le16 i_mode; __le16 i_uid; __le32 i_size_lo; __le32 i_atime; __le32 i_ctime; __le32 i_mtime; __le32 i_dtime; __le16 i_gid; __le16 i_links_count; __le32 i_blocks_lo; __le32 i_flags; union { struct { __le32 l_i_version; } linux1; struct { __u32 h_i_translator; } hurd1; struct { __u32 m_i_reserved1; } masix1; } osd1; __le32 i_block[15]; __le32 i_generation; __le32 i_file_acl_lo; __le32 i_size_high; __le32 i_obso_faddr; union { struct { __le16 l_i_blocks_high; __le16 l_i_file_acl_high; __le16 l_i_uid_high; __le16 l_i_gid_high; __le16 l_i_checksum_lo; __le16 l_i_reserved; } linux2; struct { __le16 h_i_reserved1; __u16 h_i_mode_high; __u16 h_i_uid_high; __u16 h_i_gid_high; __u32 h_i_author; } hurd2; struct { __le16 h_i_reserved1; __le16 m_i_file_acl_high; __u32 m_i_reserved2[2]; } masix2; } osd2; __le16 i_extra_isize; __le16 i_checksum_hi; __le32 i_ctime_extra; __le32 i_mtime_extra; __le32 i_atime_extra; __le32 i_crtime; __le32 i_crtime_extra; __le32 i_version_hi; __le32 i_projid; }; typedef unsigned int __kernel_mode_t; typedef __kernel_mode_t mode_t; struct orlov_stats { __u64 free_clusters; __u32 free_inodes; __u32 used_dirs; }; typedef struct { __le32 *p; __le32 key; struct buffer_head *bh; } Indirect; struct ext4_xattr_ibody_header { __le32 h_magic; }; struct ext4_xattr_entry { __u8 e_name_len; __u8 e_name_index; __le16 e_value_offs; __le32 e_value_inum; __le32 e_value_size; __le32 e_hash; char e_name[0]; }; struct ext4_xattr_info { const char *name; const void *value; size_t value_len; int name_index; int in_inode; }; struct ext4_xattr_search { struct ext4_xattr_entry *first; void *base; void *end; struct ext4_xattr_entry *here; int not_found; }; struct ext4_xattr_ibody_find { struct ext4_xattr_search s; struct ext4_iloc iloc; }; struct ext4_filename { const struct qstr *usr_fname; struct fscrypt_str disk_name; struct dx_hash_info hinfo; struct fscrypt_str crypto_buf; struct fscrypt_str cf_name; }; typedef unsigned short __kernel_uid16_t; typedef __kernel_uid16_t uid16_t; typedef unsigned short __kernel_gid16_t; typedef __kernel_gid16_t gid16_t; struct ext4_xattr_inode_array { unsigned int count; struct inode *inodes[0]; }; struct ext4_io_submit { struct writeback_control *io_wbc; struct bio *io_bio; ext4_io_end_t *io_end; sector_t io_next_block; }; struct mpage_da_data { struct inode *inode; struct writeback_control *wbc; unsigned int can_map: 1; unsigned long first_page; unsigned long next_page; unsigned long last_page; struct ext4_map_blocks map; struct ext4_io_submit io_submit; unsigned int do_map: 1; unsigned int scanned_until_end: 1; unsigned int journalled_more_data: 1; }; struct move_extent { __u32 reserved; __u32 donor_fd; __u64 orig_start; __u64 donor_start; __u64 len; __u64 moved_len; }; struct ext4_new_group_input { __u32 group; __u64 block_bitmap; __u64 inode_bitmap; __u64 inode_table; __u32 blocks_count; __u16 reserved_blocks; __u16 unused; }; struct fstrim_range { __u64 start; __u64 len; __u64 minlen; }; struct ext4_new_group_data { __u32 group; __u64 block_bitmap; __u64 inode_bitmap; __u64 inode_table; __u32 blocks_count; __u16 reserved_blocks; __u16 mdata_blocks; __u32 free_clusters_count; }; struct fsmap_head { __u32 fmh_iflags; __u32 fmh_oflags; __u32 fmh_count; __u32 fmh_entries; __u64 fmh_reserved[6]; struct fsmap fmh_keys[2]; struct fsmap fmh_recs[0]; }; struct getfsmap_info { struct super_block *gi_sb; struct fsmap_head __attribute__((btf_type_tag("user"))) *gi_data; unsigned int gi_idx; __u32 gi_last_flags; }; struct fsuuid { __u32 fsu_len; __u32 fsu_flags; __u8 fsu_uuid[0]; }; typedef void ext4_update_sb_callback(struct ext4_super_block *, const void *); struct compat_ext4_new_group_input { u32 group; compat_u64 block_bitmap; compat_u64 inode_bitmap; compat_u64 inode_table; u32 blocks_count; u16 reserved_blocks; u16 unused; }; struct sg { struct ext4_group_info info; ext4_grpblk_t counters[18]; }; enum criteria { CR_POWER2_ALIGNED = 0, CR_GOAL_LEN_FAST = 1, CR_BEST_AVAIL_LEN = 2, CR_GOAL_LEN_SLOW = 3, CR_ANY_FREE = 4, EXT4_MB_NUM_CRS = 5, }; enum { MB_INODE_PA = 0, MB_GROUP_PA = 1, }; enum blk_default_limits { BLK_MAX_SEGMENTS = 128, BLK_SAFE_MAX_SECTORS = 255, BLK_MAX_SEGMENT_SIZE = 65536, BLK_SEG_BOUNDARY_MASK = 4294967295, }; struct ext4_free_data { struct list_head efd_list; struct rb_node efd_node; ext4_group_t efd_group; ext4_grpblk_t efd_start_cluster; ext4_grpblk_t efd_count; tid_t efd_tid; }; struct ext4_prealloc_space { union { struct rb_node inode_node; struct list_head lg_list; } pa_node; struct list_head pa_group_list; union { struct list_head pa_tmp_list; struct callback_head pa_rcu; } u; spinlock_t pa_lock; atomic_t pa_count; unsigned int pa_deleted; ext4_fsblk_t pa_pstart; ext4_lblk_t pa_lstart; ext4_grpblk_t pa_len; ext4_grpblk_t pa_free; unsigned short pa_type; union { rwlock_t *inode_lock; spinlock_t *lg_lock; } pa_node_lock; struct inode *pa_inode; }; struct ext4_buddy { struct page *bd_buddy_page; void *bd_buddy; struct page *bd_bitmap_page; void *bd_bitmap; struct ext4_group_info *bd_info; struct super_block *bd_sb; __u16 bd_blkbits; ext4_group_t bd_group; }; struct ext4_free_extent { ext4_lblk_t fe_logical; ext4_grpblk_t fe_start; ext4_group_t fe_group; ext4_grpblk_t fe_len; }; struct ext4_allocation_context { struct inode *ac_inode; struct super_block *ac_sb; struct ext4_free_extent ac_o_ex; struct ext4_free_extent ac_g_ex; struct ext4_free_extent ac_b_ex; struct ext4_free_extent ac_f_ex; ext4_grpblk_t ac_orig_goal_len; __u32 ac_groups_considered; __u32 ac_flags; __u16 ac_groups_scanned; __u16 ac_groups_linear_remaining; __u16 ac_found; __u16 ac_cX_found[5]; __u16 ac_tail; __u16 ac_buddy; __u8 ac_status; __u8 ac_criteria; __u8 ac_2order; __u8 ac_op; struct page *ac_bitmap_page; struct page *ac_buddy_page; struct ext4_prealloc_space *ac_pa; struct ext4_locality_group *ac_lg; }; struct migrate_struct { ext4_lblk_t first_block; ext4_lblk_t last_block; ext4_lblk_t curr_block; ext4_fsblk_t first_pblock; ext4_fsblk_t last_pblock; }; struct mmp_struct { __le32 mmp_magic; __le32 mmp_seq; __le64 mmp_time; char mmp_nodename[64]; char mmp_bdevname[32]; __le16 mmp_check_interval; __le16 mmp_pad1; __le32 mmp_pad2[226]; __le32 mmp_checksum; }; struct ext4_dir_entry_tail { __le32 det_reserved_zero1; __le16 det_rec_len; __u8 det_reserved_zero2; __u8 det_reserved_ft; __le32 det_checksum; }; struct dx_entry; struct dx_frame { struct buffer_head *bh; struct dx_entry *entries; struct dx_entry *at; }; struct dx_entry { __le32 hash; __le32 block; }; struct fake_dirent { __le32 inode; __le16 rec_len; u8 name_len; u8 file_type; }; struct dx_root_info { __le32 reserved_zero; u8 hash_version; u8 info_length; u8 indirect_levels; u8 unused_flags; }; struct dx_root { struct fake_dirent dot; char dot_name[4]; struct fake_dirent dotdot; char dotdot_name[4]; struct dx_root_info info; struct dx_entry entries[0]; }; struct dx_node { struct fake_dirent fake; struct dx_entry entries[0]; }; struct dx_countlimit { __le16 limit; __le16 count; }; struct ext4_dir_entry { __le32 inode; __le16 rec_len; __le16 name_len; char name[255]; }; struct dx_tail { u32 dt_reserved; __le32 dt_checksum; }; struct dx_map_entry { u32 hash; u16 offs; u16 size; }; struct ext4_renament { struct inode *dir; struct dentry *dentry; struct inode *inode; bool is_dir; int dir_nlink_delta; struct buffer_head *bh; struct ext4_dir_entry_2 *de; int inlined; struct buffer_head *dir_bh; struct ext4_dir_entry_2 *parent_de; int dir_inlined; }; typedef enum { EITHER = 0, INDEX = 1, DIRENT = 2, DIRENT_HTREE = 3, } dirblock_type_t; enum bio_post_read_step { STEP_INITIAL = 0, STEP_DECRYPT = 1, STEP_VERITY = 2, STEP_MAX = 3, }; struct bio_post_read_ctx { struct bio *bio; struct work_struct work; unsigned int cur_step; unsigned int enabled_steps; }; enum { BLOCK_BITMAP = 0, INODE_BITMAP = 1, INODE_TABLE = 2, GROUP_TABLE_COUNT = 3, }; struct ext4_rcu_ptr { struct callback_head rcu; void *ptr; }; struct ext4_new_flex_group_data { struct ext4_new_group_data *groups; __u16 *bg_flags; ext4_group_t resize_bg; ext4_group_t count; }; typedef void (*btf_trace_ext4_other_inode_update_time)(void *, struct inode *, ino_t); typedef void (*btf_trace_ext4_free_inode)(void *, struct inode *); typedef void (*btf_trace_ext4_request_inode)(void *, struct inode *, int); typedef void (*btf_trace_ext4_allocate_inode)(void *, struct inode *, struct inode *, int); typedef void (*btf_trace_ext4_evict_inode)(void *, struct inode *); typedef void (*btf_trace_ext4_drop_inode)(void *, struct inode *, int); typedef void (*btf_trace_ext4_nfs_commit_metadata)(void *, struct inode *); typedef void (*btf_trace_ext4_mark_inode_dirty)(void *, struct inode *, unsigned long); typedef void (*btf_trace_ext4_begin_ordered_truncate)(void *, struct inode *, loff_t); typedef void (*btf_trace_ext4_write_begin)(void *, struct inode *, loff_t, unsigned int); typedef void (*btf_trace_ext4_da_write_begin)(void *, struct inode *, loff_t, unsigned int); typedef void (*btf_trace_ext4_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); typedef void (*btf_trace_ext4_journalled_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); typedef void (*btf_trace_ext4_da_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); typedef void (*btf_trace_ext4_writepages)(void *, struct inode *, struct writeback_control *); typedef void (*btf_trace_ext4_da_write_pages)(void *, struct inode *, unsigned long, struct writeback_control *); typedef void (*btf_trace_ext4_da_write_pages_extent)(void *, struct inode *, struct ext4_map_blocks *); typedef void (*btf_trace_ext4_writepages_result)(void *, struct inode *, struct writeback_control *, int, int); typedef void (*btf_trace_ext4_read_folio)(void *, struct inode *, struct folio *); typedef void (*btf_trace_ext4_release_folio)(void *, struct inode *, struct folio *); typedef void (*btf_trace_ext4_invalidate_folio)(void *, struct folio *, size_t, size_t); typedef void (*btf_trace_ext4_journalled_invalidate_folio)(void *, struct folio *, size_t, size_t); typedef void (*btf_trace_ext4_discard_blocks)(void *, struct super_block *, unsigned long long, unsigned long long); typedef void (*btf_trace_ext4_mb_new_inode_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); typedef void (*btf_trace_ext4_mb_new_group_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); typedef void (*btf_trace_ext4_mb_release_inode_pa)(void *, struct ext4_prealloc_space *, unsigned long long, unsigned int); typedef void (*btf_trace_ext4_mb_release_group_pa)(void *, struct super_block *, struct ext4_prealloc_space *); typedef void (*btf_trace_ext4_discard_preallocations)(void *, struct inode *, unsigned int, unsigned int); typedef void (*btf_trace_ext4_mb_discard_preallocations)(void *, struct super_block *, int); typedef void (*btf_trace_ext4_request_blocks)(void *, struct ext4_allocation_request *); typedef void (*btf_trace_ext4_allocate_blocks)(void *, struct ext4_allocation_request *, unsigned long long); typedef void (*btf_trace_ext4_free_blocks)(void *, struct inode *, __u64, unsigned long, int); typedef void (*btf_trace_ext4_sync_file_enter)(void *, struct file *, int); typedef void (*btf_trace_ext4_sync_file_exit)(void *, struct inode *, int); typedef void (*btf_trace_ext4_sync_fs)(void *, struct super_block *, int); typedef void (*btf_trace_ext4_alloc_da_blocks)(void *, struct inode *); typedef void (*btf_trace_ext4_mballoc_alloc)(void *, struct ext4_allocation_context *); typedef void (*btf_trace_ext4_mballoc_prealloc)(void *, struct ext4_allocation_context *); typedef void (*btf_trace_ext4_mballoc_discard)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); typedef void (*btf_trace_ext4_mballoc_free)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); typedef void (*btf_trace_ext4_forget)(void *, struct inode *, int, __u64); typedef void (*btf_trace_ext4_da_update_reserve_space)(void *, struct inode *, int, int); typedef void (*btf_trace_ext4_da_reserve_space)(void *, struct inode *); typedef void (*btf_trace_ext4_da_release_space)(void *, struct inode *, int); typedef void (*btf_trace_ext4_mb_bitmap_load)(void *, struct super_block *, unsigned long); typedef void (*btf_trace_ext4_mb_buddy_bitmap_load)(void *, struct super_block *, unsigned long); typedef void (*btf_trace_ext4_load_inode_bitmap)(void *, struct super_block *, unsigned long); typedef void (*btf_trace_ext4_read_block_bitmap_load)(void *, struct super_block *, unsigned long, bool); typedef void (*btf_trace_ext4_fallocate_enter)(void *, struct inode *, loff_t, loff_t, int); typedef void (*btf_trace_ext4_punch_hole)(void *, struct inode *, loff_t, loff_t, int); typedef void (*btf_trace_ext4_zero_range)(void *, struct inode *, loff_t, loff_t, int); typedef void (*btf_trace_ext4_fallocate_exit)(void *, struct inode *, loff_t, unsigned int, int); typedef void (*btf_trace_ext4_unlink_enter)(void *, struct inode *, struct dentry *); typedef void (*btf_trace_ext4_unlink_exit)(void *, struct dentry *, int); typedef void (*btf_trace_ext4_truncate_enter)(void *, struct inode *); typedef void (*btf_trace_ext4_truncate_exit)(void *, struct inode *); typedef void (*btf_trace_ext4_ext_convert_to_initialized_enter)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *); typedef void (*btf_trace_ext4_ext_convert_to_initialized_fastpath)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *, struct ext4_extent *); typedef void (*btf_trace_ext4_ext_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); typedef void (*btf_trace_ext4_ind_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); typedef void (*btf_trace_ext4_ext_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); typedef void (*btf_trace_ext4_ind_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); typedef void (*btf_trace_ext4_ext_load_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t); typedef void (*btf_trace_ext4_load_inode)(void *, struct super_block *, unsigned long); typedef void (*btf_trace_ext4_journal_start_sb)(void *, struct super_block *, int, int, int, int, unsigned long); typedef void (*btf_trace_ext4_journal_start_inode)(void *, struct inode *, int, int, int, int, unsigned long); typedef void (*btf_trace_ext4_journal_start_reserved)(void *, struct super_block *, int, unsigned long); typedef void (*btf_trace_ext4_trim_extent)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); typedef void (*btf_trace_ext4_trim_all_free)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); typedef void (*btf_trace_ext4_ext_handle_unwritten_extents)(void *, struct inode *, struct ext4_map_blocks *, int, unsigned int, ext4_fsblk_t); typedef void (*btf_trace_ext4_get_implied_cluster_alloc_exit)(void *, struct super_block *, struct ext4_map_blocks *, int); typedef void (*btf_trace_ext4_ext_show_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t, unsigned short); typedef void (*btf_trace_ext4_remove_blocks)(void *, struct inode *, struct ext4_extent *, ext4_lblk_t, ext4_fsblk_t, struct partial_cluster *); typedef void (*btf_trace_ext4_ext_rm_leaf)(void *, struct inode *, ext4_lblk_t, struct ext4_extent *, struct partial_cluster *); typedef void (*btf_trace_ext4_ext_rm_idx)(void *, struct inode *, ext4_fsblk_t); typedef void (*btf_trace_ext4_ext_remove_space)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int); typedef void (*btf_trace_ext4_ext_remove_space_done)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, struct partial_cluster *, __le16); typedef void (*btf_trace_ext4_es_insert_extent)(void *, struct inode *, struct extent_status *); typedef void (*btf_trace_ext4_es_cache_extent)(void *, struct inode *, struct extent_status *); typedef void (*btf_trace_ext4_es_remove_extent)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t); typedef void (*btf_trace_ext4_es_find_extent_range_enter)(void *, struct inode *, ext4_lblk_t); typedef void (*btf_trace_ext4_es_find_extent_range_exit)(void *, struct inode *, struct extent_status *); typedef void (*btf_trace_ext4_es_lookup_extent_enter)(void *, struct inode *, ext4_lblk_t); typedef void (*btf_trace_ext4_es_lookup_extent_exit)(void *, struct inode *, struct extent_status *, int); typedef void (*btf_trace_ext4_es_shrink_count)(void *, struct super_block *, int, int); typedef void (*btf_trace_ext4_es_shrink_scan_enter)(void *, struct super_block *, int, int); typedef void (*btf_trace_ext4_es_shrink_scan_exit)(void *, struct super_block *, int, int); typedef void (*btf_trace_ext4_collapse_range)(void *, struct inode *, loff_t, loff_t); typedef void (*btf_trace_ext4_insert_range)(void *, struct inode *, loff_t, loff_t); typedef void (*btf_trace_ext4_es_shrink)(void *, struct super_block *, int, u64, int, int); typedef void (*btf_trace_ext4_es_insert_delayed_block)(void *, struct inode *, struct extent_status *, bool); typedef void (*btf_trace_ext4_fsmap_low_key)(void *, struct super_block *, u32, u32, u64, u64, u64); typedef void (*btf_trace_ext4_fsmap_high_key)(void *, struct super_block *, u32, u32, u64, u64, u64); typedef void (*btf_trace_ext4_fsmap_mapping)(void *, struct super_block *, u32, u32, u64, u64, u64); typedef void (*btf_trace_ext4_getfsmap_low_key)(void *, struct super_block *, struct ext4_fsmap *); typedef void (*btf_trace_ext4_getfsmap_high_key)(void *, struct super_block *, struct ext4_fsmap *); typedef void (*btf_trace_ext4_getfsmap_mapping)(void *, struct super_block *, struct ext4_fsmap *); typedef void (*btf_trace_ext4_shutdown)(void *, struct super_block *, unsigned long); typedef void (*btf_trace_ext4_error)(void *, struct super_block *, const char *, unsigned int); typedef void (*btf_trace_ext4_prefetch_bitmaps)(void *, struct super_block *, ext4_group_t, ext4_group_t, unsigned int); typedef void (*btf_trace_ext4_lazy_itable_init)(void *, struct super_block *, ext4_group_t); typedef void (*btf_trace_ext4_fc_replay_scan)(void *, struct super_block *, int, int); typedef void (*btf_trace_ext4_fc_replay)(void *, struct super_block *, int, int, int, int); typedef void (*btf_trace_ext4_fc_commit_start)(void *, struct super_block *, tid_t); typedef void (*btf_trace_ext4_fc_commit_stop)(void *, struct super_block *, int, int, tid_t); typedef void (*btf_trace_ext4_fc_stats)(void *, struct super_block *); typedef void (*btf_trace_ext4_fc_track_create)(void *, handle_t *, struct inode *, struct dentry *, int); typedef void (*btf_trace_ext4_fc_track_link)(void *, handle_t *, struct inode *, struct dentry *, int); typedef void (*btf_trace_ext4_fc_track_unlink)(void *, handle_t *, struct inode *, struct dentry *, int); typedef void (*btf_trace_ext4_fc_track_inode)(void *, handle_t *, struct inode *, int); typedef void (*btf_trace_ext4_fc_track_range)(void *, handle_t *, struct inode *, long, long, int); typedef void (*btf_trace_ext4_fc_cleanup)(void *, journal_t *, int, tid_t); typedef void (*btf_trace_ext4_update_sb)(void *, struct super_block *, ext4_fsblk_t, unsigned int); struct ext4_lazy_init { unsigned long li_state; struct list_head li_request_list; struct mutex li_list_mtx; }; struct ext4_err_translation { int code; int errno; }; struct mount_opts { int token; int mount_opt; int flags; }; struct ext4_sb_encodings { __u16 magic; char *name; unsigned int version; }; enum stat_group { STAT_READ = 0, STAT_WRITE = 1, STAT_DISCARD = 2, STAT_FLUSH = 3, NR_STAT_GROUPS = 4, }; enum { Opt_bsd_df = 0, Opt_minix_df = 1, Opt_grpid = 2, Opt_nogrpid = 3, Opt_resgid = 4, Opt_resuid = 5, Opt_sb = 6, Opt_nouid32 = 7, Opt_debug = 8, Opt_removed = 9, Opt_user_xattr = 10, Opt_acl = 11, Opt_auto_da_alloc = 12, Opt_noauto_da_alloc = 13, Opt_noload = 14, Opt_commit = 15, Opt_min_batch_time = 16, Opt_max_batch_time = 17, Opt_journal_dev = 18, Opt_journal_path = 19, Opt_journal_checksum = 20, Opt_journal_async_commit = 21, Opt_abort = 22, Opt_data_journal = 23, Opt_data_ordered = 24, Opt_data_writeback = 25, Opt_data_err_abort = 26, Opt_data_err_ignore = 27, Opt_test_dummy_encryption = 28, Opt_inlinecrypt = 29, Opt_usrjquota = 30, Opt_grpjquota = 31, Opt_quota___2 = 32, Opt_noquota = 33, Opt_barrier = 34, Opt_nobarrier = 35, Opt_err___2 = 36, Opt_usrquota___2 = 37, Opt_grpquota___2 = 38, Opt_prjquota = 39, Opt_dax = 40, Opt_dax_always = 41, Opt_dax_inode = 42, Opt_dax_never = 43, Opt_stripe = 44, Opt_delalloc = 45, Opt_nodelalloc = 46, Opt_warn_on_error = 47, Opt_nowarn_on_error = 48, Opt_mblk_io_submit = 49, Opt_debug_want_extra_isize = 50, Opt_nomblk_io_submit = 51, Opt_block_validity = 52, Opt_noblock_validity = 53, Opt_inode_readahead_blks = 54, Opt_journal_ioprio = 55, Opt_dioread_nolock = 56, Opt_dioread_lock = 57, Opt_discard = 58, Opt_nodiscard = 59, Opt_init_itable = 60, Opt_noinit_itable = 61, Opt_max_dir_size_kb = 62, Opt_nojournal_checksum = 63, Opt_nombcache = 64, Opt_no_prefetch_block_bitmaps = 65, Opt_mb_optimize_scan = 66, Opt_errors = 67, Opt_data = 68, Opt_data_err = 69, Opt_jqfmt = 70, Opt_dax_type = 71, }; enum { IOPRIO_CLASS_NONE = 0, IOPRIO_CLASS_RT = 1, IOPRIO_CLASS_BE = 2, IOPRIO_CLASS_IDLE = 3, IOPRIO_CLASS_INVALID = 7, }; enum { IOPRIO_HINT_NONE = 0, IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1, IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2, IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3, IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4, IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5, IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6, IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7, }; enum { I_DATA_SEM_NORMAL = 0, I_DATA_SEM_OTHER = 1, I_DATA_SEM_QUOTA = 2, I_DATA_SEM_EA = 3, }; struct trace_event_raw_ext4_other_inode_update_time { struct trace_entry ent; dev_t dev; ino_t ino; ino_t orig_ino; uid_t uid; gid_t gid; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_free_inode { struct trace_entry ent; dev_t dev; ino_t ino; uid_t uid; gid_t gid; __u64 blocks; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_request_inode { struct trace_entry ent; dev_t dev; ino_t dir; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_allocate_inode { struct trace_entry ent; dev_t dev; ino_t ino; ino_t dir; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_evict_inode { struct trace_entry ent; dev_t dev; ino_t ino; int nlink; char __data[0]; }; struct trace_event_raw_ext4_drop_inode { struct trace_entry ent; dev_t dev; ino_t ino; int drop; char __data[0]; }; struct trace_event_raw_ext4_nfs_commit_metadata { struct trace_entry ent; dev_t dev; ino_t ino; char __data[0]; }; struct trace_event_raw_ext4_mark_inode_dirty { struct trace_entry ent; dev_t dev; ino_t ino; unsigned long ip; char __data[0]; }; struct trace_event_raw_ext4_begin_ordered_truncate { struct trace_entry ent; dev_t dev; ino_t ino; loff_t new_size; char __data[0]; }; struct trace_event_raw_ext4__write_begin { struct trace_entry ent; dev_t dev; ino_t ino; loff_t pos; unsigned int len; char __data[0]; }; struct trace_event_raw_ext4__write_end { struct trace_entry ent; dev_t dev; ino_t ino; loff_t pos; unsigned int len; unsigned int copied; char __data[0]; }; struct trace_event_raw_ext4_writepages { struct trace_entry ent; dev_t dev; ino_t ino; long nr_to_write; long pages_skipped; loff_t range_start; loff_t range_end; unsigned long writeback_index; int sync_mode; char for_kupdate; char range_cyclic; char __data[0]; }; struct trace_event_raw_ext4_da_write_pages { struct trace_entry ent; dev_t dev; ino_t ino; unsigned long first_page; long nr_to_write; int sync_mode; char __data[0]; }; struct trace_event_raw_ext4_da_write_pages_extent { struct trace_entry ent; dev_t dev; ino_t ino; __u64 lblk; __u32 len; __u32 flags; char __data[0]; }; struct trace_event_raw_ext4_writepages_result { struct trace_entry ent; dev_t dev; ino_t ino; int ret; int pages_written; long pages_skipped; unsigned long writeback_index; int sync_mode; char __data[0]; }; struct trace_event_raw_ext4__folio_op { struct trace_entry ent; dev_t dev; ino_t ino; unsigned long index; char __data[0]; }; struct trace_event_raw_ext4_invalidate_folio_op { struct trace_entry ent; dev_t dev; ino_t ino; unsigned long index; size_t offset; size_t length; char __data[0]; }; struct trace_event_raw_ext4_discard_blocks { struct trace_entry ent; dev_t dev; __u64 blk; __u64 count; char __data[0]; }; struct trace_event_raw_ext4__mb_new_pa { struct trace_entry ent; dev_t dev; ino_t ino; __u64 pa_pstart; __u64 pa_lstart; __u32 pa_len; char __data[0]; }; struct trace_event_raw_ext4_mb_release_inode_pa { struct trace_entry ent; dev_t dev; ino_t ino; __u64 block; __u32 count; char __data[0]; }; struct trace_event_raw_ext4_mb_release_group_pa { struct trace_entry ent; dev_t dev; __u64 pa_pstart; __u32 pa_len; char __data[0]; }; struct trace_event_raw_ext4_discard_preallocations { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int len; unsigned int needed; char __data[0]; }; struct trace_event_raw_ext4_mb_discard_preallocations { struct trace_entry ent; dev_t dev; int needed; char __data[0]; }; struct trace_event_raw_ext4_request_blocks { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int len; __u32 logical; __u32 lleft; __u32 lright; __u64 goal; __u64 pleft; __u64 pright; unsigned int flags; char __data[0]; }; struct trace_event_raw_ext4_allocate_blocks { struct trace_entry ent; dev_t dev; ino_t ino; __u64 block; unsigned int len; __u32 logical; __u32 lleft; __u32 lright; __u64 goal; __u64 pleft; __u64 pright; unsigned int flags; char __data[0]; }; struct trace_event_raw_ext4_free_blocks { struct trace_entry ent; dev_t dev; ino_t ino; __u64 block; unsigned long count; int flags; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_sync_file_enter { struct trace_entry ent; dev_t dev; ino_t ino; ino_t parent; int datasync; char __data[0]; }; struct trace_event_raw_ext4_sync_file_exit { struct trace_entry ent; dev_t dev; ino_t ino; int ret; char __data[0]; }; struct trace_event_raw_ext4_sync_fs { struct trace_entry ent; dev_t dev; int wait; char __data[0]; }; struct trace_event_raw_ext4_alloc_da_blocks { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int data_blocks; char __data[0]; }; struct trace_event_raw_ext4_mballoc_alloc { struct trace_entry ent; dev_t dev; ino_t ino; __u32 orig_logical; int orig_start; __u32 orig_group; int orig_len; __u32 goal_logical; int goal_start; __u32 goal_group; int goal_len; __u32 result_logical; int result_start; __u32 result_group; int result_len; __u16 found; __u16 groups; __u16 buddy; __u16 flags; __u16 tail; __u8 cr; char __data[0]; }; struct trace_event_raw_ext4_mballoc_prealloc { struct trace_entry ent; dev_t dev; ino_t ino; __u32 orig_logical; int orig_start; __u32 orig_group; int orig_len; __u32 result_logical; int result_start; __u32 result_group; int result_len; char __data[0]; }; struct trace_event_raw_ext4__mballoc { struct trace_entry ent; dev_t dev; ino_t ino; int result_start; __u32 result_group; int result_len; char __data[0]; }; struct trace_event_raw_ext4_forget { struct trace_entry ent; dev_t dev; ino_t ino; __u64 block; int is_metadata; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_da_update_reserve_space { struct trace_entry ent; dev_t dev; ino_t ino; __u64 i_blocks; int used_blocks; int reserved_data_blocks; int quota_claim; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_da_reserve_space { struct trace_entry ent; dev_t dev; ino_t ino; __u64 i_blocks; int reserved_data_blocks; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_da_release_space { struct trace_entry ent; dev_t dev; ino_t ino; __u64 i_blocks; int freed_blocks; int reserved_data_blocks; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4__bitmap_load { struct trace_entry ent; dev_t dev; __u32 group; char __data[0]; }; struct trace_event_raw_ext4_read_block_bitmap_load { struct trace_entry ent; dev_t dev; __u32 group; bool prefetch; char __data[0]; }; struct trace_event_raw_ext4__fallocate_mode { struct trace_entry ent; dev_t dev; ino_t ino; loff_t offset; loff_t len; int mode; char __data[0]; }; struct trace_event_raw_ext4_fallocate_exit { struct trace_entry ent; dev_t dev; ino_t ino; loff_t pos; unsigned int blocks; int ret; char __data[0]; }; struct trace_event_raw_ext4_unlink_enter { struct trace_entry ent; dev_t dev; ino_t ino; ino_t parent; loff_t size; char __data[0]; }; struct trace_event_raw_ext4_unlink_exit { struct trace_entry ent; dev_t dev; ino_t ino; int ret; char __data[0]; }; struct trace_event_raw_ext4__truncate { struct trace_entry ent; dev_t dev; ino_t ino; __u64 blocks; char __data[0]; }; struct trace_event_raw_ext4_ext_convert_to_initialized_enter { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t m_lblk; unsigned int m_len; ext4_lblk_t u_lblk; unsigned int u_len; ext4_fsblk_t u_pblk; char __data[0]; }; struct trace_event_raw_ext4_ext_convert_to_initialized_fastpath { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t m_lblk; unsigned int m_len; ext4_lblk_t u_lblk; unsigned int u_len; ext4_fsblk_t u_pblk; ext4_lblk_t i_lblk; unsigned int i_len; ext4_fsblk_t i_pblk; char __data[0]; }; struct trace_event_raw_ext4__map_blocks_enter { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; unsigned int len; unsigned int flags; char __data[0]; }; struct trace_event_raw_ext4__map_blocks_exit { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int flags; ext4_fsblk_t pblk; ext4_lblk_t lblk; unsigned int len; unsigned int mflags; int ret; char __data[0]; }; struct trace_event_raw_ext4_ext_load_extent { struct trace_entry ent; dev_t dev; ino_t ino; ext4_fsblk_t pblk; ext4_lblk_t lblk; char __data[0]; }; struct trace_event_raw_ext4_load_inode { struct trace_entry ent; dev_t dev; ino_t ino; char __data[0]; }; struct trace_event_raw_ext4_journal_start_sb { struct trace_entry ent; dev_t dev; unsigned long ip; int blocks; int rsv_blocks; int revoke_creds; int type; char __data[0]; }; struct trace_event_raw_ext4_journal_start_inode { struct trace_entry ent; unsigned long ino; dev_t dev; unsigned long ip; int blocks; int rsv_blocks; int revoke_creds; int type; char __data[0]; }; struct trace_event_raw_ext4_journal_start_reserved { struct trace_entry ent; dev_t dev; unsigned long ip; int blocks; char __data[0]; }; struct trace_event_raw_ext4__trim { struct trace_entry ent; int dev_major; int dev_minor; __u32 group; int start; int len; char __data[0]; }; struct trace_event_raw_ext4_ext_handle_unwritten_extents { struct trace_entry ent; dev_t dev; ino_t ino; int flags; ext4_lblk_t lblk; ext4_fsblk_t pblk; unsigned int len; unsigned int allocated; ext4_fsblk_t newblk; char __data[0]; }; struct trace_event_raw_ext4_get_implied_cluster_alloc_exit { struct trace_entry ent; dev_t dev; unsigned int flags; ext4_lblk_t lblk; ext4_fsblk_t pblk; unsigned int len; int ret; char __data[0]; }; struct trace_event_raw_ext4_ext_show_extent { struct trace_entry ent; dev_t dev; ino_t ino; ext4_fsblk_t pblk; ext4_lblk_t lblk; unsigned short len; char __data[0]; }; struct trace_event_raw_ext4_remove_blocks { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t from; ext4_lblk_t to; ext4_fsblk_t ee_pblk; ext4_lblk_t ee_lblk; unsigned short ee_len; ext4_fsblk_t pc_pclu; ext4_lblk_t pc_lblk; int pc_state; char __data[0]; }; struct trace_event_raw_ext4_ext_rm_leaf { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t start; ext4_lblk_t ee_lblk; ext4_fsblk_t ee_pblk; short ee_len; ext4_fsblk_t pc_pclu; ext4_lblk_t pc_lblk; int pc_state; char __data[0]; }; struct trace_event_raw_ext4_ext_rm_idx { struct trace_entry ent; dev_t dev; ino_t ino; ext4_fsblk_t pblk; char __data[0]; }; struct trace_event_raw_ext4_ext_remove_space { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t start; ext4_lblk_t end; int depth; char __data[0]; }; struct trace_event_raw_ext4_ext_remove_space_done { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t start; ext4_lblk_t end; int depth; ext4_fsblk_t pc_pclu; ext4_lblk_t pc_lblk; int pc_state; unsigned short eh_entries; char __data[0]; }; struct trace_event_raw_ext4__es_extent { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; ext4_lblk_t len; ext4_fsblk_t pblk; char status; char __data[0]; }; struct trace_event_raw_ext4_es_remove_extent { struct trace_entry ent; dev_t dev; ino_t ino; loff_t lblk; loff_t len; char __data[0]; }; struct trace_event_raw_ext4_es_find_extent_range_enter { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; char __data[0]; }; struct trace_event_raw_ext4_es_find_extent_range_exit { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; ext4_lblk_t len; ext4_fsblk_t pblk; char status; char __data[0]; }; struct trace_event_raw_ext4_es_lookup_extent_enter { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; char __data[0]; }; struct trace_event_raw_ext4_es_lookup_extent_exit { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; ext4_lblk_t len; ext4_fsblk_t pblk; char status; int found; char __data[0]; }; struct trace_event_raw_ext4__es_shrink_enter { struct trace_entry ent; dev_t dev; int nr_to_scan; int cache_cnt; char __data[0]; }; struct trace_event_raw_ext4_es_shrink_scan_exit { struct trace_entry ent; dev_t dev; int nr_shrunk; int cache_cnt; char __data[0]; }; struct trace_event_raw_ext4_collapse_range { struct trace_entry ent; dev_t dev; ino_t ino; loff_t offset; loff_t len; char __data[0]; }; struct trace_event_raw_ext4_insert_range { struct trace_entry ent; dev_t dev; ino_t ino; loff_t offset; loff_t len; char __data[0]; }; struct trace_event_raw_ext4_es_shrink { struct trace_entry ent; dev_t dev; int nr_shrunk; unsigned long long scan_time; int nr_skipped; int retried; char __data[0]; }; struct trace_event_raw_ext4_es_insert_delayed_block { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; ext4_lblk_t len; ext4_fsblk_t pblk; char status; bool allocated; char __data[0]; }; struct trace_event_raw_ext4_fsmap_class { struct trace_entry ent; dev_t dev; dev_t keydev; u32 agno; u64 bno; u64 len; u64 owner; char __data[0]; }; struct trace_event_raw_ext4_getfsmap_class { struct trace_entry ent; dev_t dev; dev_t keydev; u64 block; u64 len; u64 owner; u64 flags; char __data[0]; }; struct trace_event_raw_ext4_shutdown { struct trace_entry ent; dev_t dev; unsigned int flags; char __data[0]; }; struct trace_event_raw_ext4_error { struct trace_entry ent; dev_t dev; const char *function; unsigned int line; char __data[0]; }; struct trace_event_raw_ext4_prefetch_bitmaps { struct trace_entry ent; dev_t dev; __u32 group; __u32 next; __u32 ios; char __data[0]; }; struct trace_event_raw_ext4_lazy_itable_init { struct trace_entry ent; dev_t dev; __u32 group; char __data[0]; }; struct trace_event_raw_ext4_fc_replay_scan { struct trace_entry ent; dev_t dev; int error; int off; char __data[0]; }; struct trace_event_raw_ext4_fc_replay { struct trace_entry ent; dev_t dev; int tag; int ino; int priv1; int priv2; char __data[0]; }; struct trace_event_raw_ext4_fc_commit_start { struct trace_entry ent; dev_t dev; tid_t tid; char __data[0]; }; struct trace_event_raw_ext4_fc_commit_stop { struct trace_entry ent; dev_t dev; int nblks; int reason; int num_fc; int num_fc_ineligible; int nblks_agg; tid_t tid; char __data[0]; }; struct trace_event_raw_ext4_fc_stats { struct trace_entry ent; dev_t dev; unsigned int fc_ineligible_rc[10]; unsigned long fc_commits; unsigned long fc_ineligible_commits; unsigned long fc_numblks; char __data[0]; }; struct trace_event_raw_ext4_fc_track_dentry { struct trace_entry ent; dev_t dev; tid_t t_tid; ino_t i_ino; tid_t i_sync_tid; int error; char __data[0]; }; struct trace_event_raw_ext4_fc_track_inode { struct trace_entry ent; dev_t dev; tid_t t_tid; ino_t i_ino; tid_t i_sync_tid; int error; char __data[0]; }; struct trace_event_raw_ext4_fc_track_range { struct trace_entry ent; dev_t dev; tid_t t_tid; ino_t i_ino; tid_t i_sync_tid; long start; long end; int error; char __data[0]; }; struct trace_event_raw_ext4_fc_cleanup { struct trace_entry ent; dev_t dev; int j_fc_off; int full; tid_t tid; char __data[0]; }; struct trace_event_raw_ext4_update_sb { struct trace_entry ent; dev_t dev; ext4_fsblk_t fsblk; unsigned int flags; char __data[0]; }; struct ext4_journal_cb_entry { struct list_head jce_list; void (*jce_func)(struct super_block *, struct ext4_journal_cb_entry *, int); }; struct ext4_fs_context { char *s_qf_names[3]; struct fscrypt_dummy_policy dummy_enc_policy; int s_jquota_fmt; unsigned short qname_spec; unsigned long vals_s_flags; unsigned long mask_s_flags; unsigned long journal_devnum; unsigned long s_commit_interval; unsigned long s_stripe; unsigned int s_inode_readahead_blks; unsigned int s_want_extra_isize; unsigned int s_li_wait_mult; unsigned int s_max_dir_size_kb; unsigned int journal_ioprio; unsigned int vals_s_mount_opt; unsigned int mask_s_mount_opt; unsigned int vals_s_mount_opt2; unsigned int mask_s_mount_opt2; unsigned int opt_flags; unsigned int spec; u32 s_max_batch_time; u32 s_min_batch_time; kuid_t s_resuid; kgid_t s_resgid; ext4_fsblk_t s_sb_block; }; struct ext4_mount_options { unsigned long s_mount_opt; unsigned long s_mount_opt2; kuid_t s_resuid; kgid_t s_resgid; unsigned long s_commit_interval; u32 s_min_batch_time; u32 s_max_batch_time; int s_jquota_fmt; char *s_qf_names[3]; }; struct trace_event_data_offsets_ext4_other_inode_update_time {}; struct trace_event_data_offsets_ext4_free_inode {}; struct trace_event_data_offsets_ext4_request_inode {}; struct trace_event_data_offsets_ext4_allocate_inode {}; struct trace_event_data_offsets_ext4_evict_inode {}; struct trace_event_data_offsets_ext4_drop_inode {}; struct trace_event_data_offsets_ext4_nfs_commit_metadata {}; struct trace_event_data_offsets_ext4_mark_inode_dirty {}; struct trace_event_data_offsets_ext4_begin_ordered_truncate {}; struct trace_event_data_offsets_ext4__write_begin {}; struct trace_event_data_offsets_ext4__write_end {}; struct trace_event_data_offsets_ext4_writepages {}; struct trace_event_data_offsets_ext4_da_write_pages {}; struct trace_event_data_offsets_ext4_da_write_pages_extent {}; struct trace_event_data_offsets_ext4_writepages_result {}; struct trace_event_data_offsets_ext4__folio_op {}; struct trace_event_data_offsets_ext4_invalidate_folio_op {}; struct trace_event_data_offsets_ext4_discard_blocks {}; struct trace_event_data_offsets_ext4__mb_new_pa {}; struct trace_event_data_offsets_ext4_mb_release_inode_pa {}; struct trace_event_data_offsets_ext4_mb_release_group_pa {}; struct trace_event_data_offsets_ext4_discard_preallocations {}; struct trace_event_data_offsets_ext4_mb_discard_preallocations {}; struct trace_event_data_offsets_ext4_request_blocks {}; struct trace_event_data_offsets_ext4_allocate_blocks {}; struct trace_event_data_offsets_ext4_free_blocks {}; struct trace_event_data_offsets_ext4_sync_file_enter {}; struct trace_event_data_offsets_ext4_sync_file_exit {}; struct trace_event_data_offsets_ext4_sync_fs {}; struct trace_event_data_offsets_ext4_alloc_da_blocks {}; struct trace_event_data_offsets_ext4_mballoc_alloc {}; struct trace_event_data_offsets_ext4_mballoc_prealloc {}; struct trace_event_data_offsets_ext4__mballoc {}; struct trace_event_data_offsets_ext4_forget {}; struct trace_event_data_offsets_ext4_da_update_reserve_space {}; struct trace_event_data_offsets_ext4_da_reserve_space {}; struct trace_event_data_offsets_ext4_da_release_space {}; struct trace_event_data_offsets_ext4__bitmap_load {}; struct trace_event_data_offsets_ext4_read_block_bitmap_load {}; struct trace_event_data_offsets_ext4__fallocate_mode {}; struct trace_event_data_offsets_ext4_fallocate_exit {}; struct trace_event_data_offsets_ext4_unlink_enter {}; struct trace_event_data_offsets_ext4_unlink_exit {}; struct trace_event_data_offsets_ext4__truncate {}; struct trace_event_data_offsets_ext4_ext_convert_to_initialized_enter {}; struct trace_event_data_offsets_ext4_ext_convert_to_initialized_fastpath {}; struct trace_event_data_offsets_ext4__map_blocks_enter {}; struct trace_event_data_offsets_ext4__map_blocks_exit {}; struct trace_event_data_offsets_ext4_ext_load_extent {}; struct trace_event_data_offsets_ext4_load_inode {}; struct trace_event_data_offsets_ext4_journal_start_sb {}; struct trace_event_data_offsets_ext4_journal_start_inode {}; struct trace_event_data_offsets_ext4_journal_start_reserved {}; struct trace_event_data_offsets_ext4__trim {}; struct trace_event_data_offsets_ext4_ext_handle_unwritten_extents {}; struct trace_event_data_offsets_ext4_get_implied_cluster_alloc_exit {}; struct trace_event_data_offsets_ext4_ext_show_extent {}; struct trace_event_data_offsets_ext4_remove_blocks {}; struct trace_event_data_offsets_ext4_ext_rm_leaf {}; struct trace_event_data_offsets_ext4_ext_rm_idx {}; struct trace_event_data_offsets_ext4_ext_remove_space {}; struct trace_event_data_offsets_ext4_ext_remove_space_done {}; struct trace_event_data_offsets_ext4__es_extent {}; struct trace_event_data_offsets_ext4_es_remove_extent {}; struct trace_event_data_offsets_ext4_es_find_extent_range_enter {}; struct trace_event_data_offsets_ext4_es_find_extent_range_exit {}; struct trace_event_data_offsets_ext4_es_lookup_extent_enter {}; struct trace_event_data_offsets_ext4_es_lookup_extent_exit {}; struct trace_event_data_offsets_ext4__es_shrink_enter {}; struct trace_event_data_offsets_ext4_es_shrink_scan_exit {}; struct trace_event_data_offsets_ext4_collapse_range {}; struct trace_event_data_offsets_ext4_insert_range {}; struct trace_event_data_offsets_ext4_es_shrink {}; struct trace_event_data_offsets_ext4_es_insert_delayed_block {}; struct trace_event_data_offsets_ext4_fsmap_class {}; struct trace_event_data_offsets_ext4_getfsmap_class {}; struct trace_event_data_offsets_ext4_shutdown {}; struct trace_event_data_offsets_ext4_error {}; struct trace_event_data_offsets_ext4_prefetch_bitmaps {}; struct trace_event_data_offsets_ext4_lazy_itable_init {}; struct trace_event_data_offsets_ext4_fc_replay_scan {}; struct trace_event_data_offsets_ext4_fc_replay {}; struct trace_event_data_offsets_ext4_fc_commit_start {}; struct trace_event_data_offsets_ext4_fc_commit_stop {}; struct trace_event_data_offsets_ext4_fc_stats {}; struct trace_event_data_offsets_ext4_fc_track_dentry {}; struct trace_event_data_offsets_ext4_fc_track_inode {}; struct trace_event_data_offsets_ext4_fc_track_range {}; struct trace_event_data_offsets_ext4_fc_cleanup {}; struct trace_event_data_offsets_ext4_update_sb {}; struct ext4_attr { struct attribute attr; short attr_id; short attr_ptr; unsigned short attr_size; union { int offset; void *explicit_ptr; } u; }; enum { attr_noop = 0, attr_delayed_allocation_blocks = 1, attr_session_write_kbytes = 2, attr_lifetime_write_kbytes = 3, attr_reserved_clusters = 4, attr_sra_exceeded_retry_limit = 5, attr_inode_readahead = 6, attr_trigger_test_error = 7, attr_first_error_time = 8, attr_last_error_time = 9, attr_feature = 10, attr_pointer_ui = 11, attr_pointer_ul = 12, attr_pointer_u64 = 13, attr_pointer_u8 = 14, attr_pointer_string = 15, attr_pointer_atomic = 16, attr_journal_task = 17, }; enum { ptr_explicit = 0, ptr_ext4_sb_info_offset = 1, ptr_ext4_super_block_offset = 2, }; struct ext4_xattr_header { __le32 h_magic; __le32 h_refcount; __le32 h_blocks; __le32 h_hash; __le32 h_checksum; __u32 h_reserved[3]; }; struct ext4_xattr_block_find { struct ext4_xattr_search s; struct buffer_head *bh; }; enum { EXT4_FC_STATUS_OK = 0, EXT4_FC_STATUS_INELIGIBLE = 1, EXT4_FC_STATUS_SKIPPED = 2, EXT4_FC_STATUS_FAILED = 3, }; struct ext4_fc_dentry_update { int fcd_op; int fcd_parent; int fcd_ino; struct qstr fcd_name; unsigned char fcd_iname[32]; struct list_head fcd_list; struct list_head fcd_dilist; }; struct __track_dentry_update_args { struct dentry *dentry; int op; }; struct __track_range_args { ext4_lblk_t start; ext4_lblk_t end; }; struct ext4_fc_tl { __le16 fc_tag; __le16 fc_len; }; struct ext4_fc_head { __le32 fc_features; __le32 fc_tid; }; struct ext4_fc_tail { __le32 fc_tid; __le32 fc_crc; }; struct ext4_fc_tl_mem { u16 fc_tag; u16 fc_len; }; struct ext4_fc_add_range { __le32 fc_ino; __u8 fc_ex[12]; }; struct ext4_fc_del_range { __le32 fc_ino; __le32 fc_lblk; __le32 fc_len; }; struct dentry_info_args { int parent_ino; int dname_len; int ino; int inode_len; char *dname; }; struct ext4_fc_dentry_info { __le32 fc_parent_ino; __le32 fc_ino; __u8 fc_dname[0]; }; struct ext4_fc_inode { __le32 fc_ino; __u8 fc_raw_inode[0]; }; struct ext4_orphan_block_tail { __le32 ob_magic; __le32 ob_checksum; }; typedef struct { __le32 a_version; } ext4_acl_header; typedef struct { __le16 e_tag; __le16 e_perm; __le32 e_id; } ext4_acl_entry; struct journal_block_tag_s { __be32 t_blocknr; __be16 t_checksum; __be16 t_flags; __be32 t_blocknr_high; }; typedef struct journal_block_tag_s journal_block_tag_t; struct journal_block_tag3_s { __be32 t_blocknr; __be32 t_flags; __be32 t_blocknr_high; __be32 t_checksum; }; typedef struct journal_block_tag3_s journal_block_tag3_t; struct commit_header { __be32 h_magic; __be32 h_blocktype; __be32 h_sequence; unsigned char h_chksum_type; unsigned char h_chksum_size; unsigned char h_padding[2]; __be32 h_chksum[8]; __be64 h_commit_sec; __be32 h_commit_nsec; }; struct jbd2_journal_block_tail { __be32 t_checksum; }; struct jbd2_journal_revoke_header_s { journal_header_t r_header; __be32 r_count; }; typedef struct jbd2_journal_revoke_header_s jbd2_journal_revoke_header_t; struct recovery_info { tid_t start_transaction; tid_t end_transaction; unsigned long head_block; int nr_replays; int nr_revokes; int nr_revoke_hits; }; enum shrink_type { SHRINK_DESTROY = 0, SHRINK_BUSY_STOP = 1, SHRINK_BUSY_SKIP = 2, }; struct jbd2_revoke_table_s { int hash_size; int hash_shift; struct list_head *hash_table; }; struct jbd2_revoke_record_s { struct list_head hash; tid_t sequence; unsigned long long blocknr; }; typedef void (*btf_trace_jbd2_checkpoint)(void *, journal_t *, int); typedef void (*btf_trace_jbd2_start_commit)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_commit_locking)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_commit_flushing)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_commit_logging)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_drop_transaction)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_end_commit)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_submit_inode_data)(void *, struct inode *); typedef void (*btf_trace_jbd2_handle_start)(void *, dev_t, tid_t, unsigned int, unsigned int, int); typedef void (*btf_trace_jbd2_handle_restart)(void *, dev_t, tid_t, unsigned int, unsigned int, int); typedef void (*btf_trace_jbd2_handle_extend)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int); typedef void (*btf_trace_jbd2_handle_stats)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int, int, int); typedef void (*btf_trace_jbd2_run_stats)(void *, dev_t, tid_t, struct transaction_run_stats_s *); typedef void (*btf_trace_jbd2_checkpoint_stats)(void *, dev_t, tid_t, struct transaction_chp_stats_s *); typedef void (*btf_trace_jbd2_update_log_tail)(void *, journal_t *, tid_t, unsigned long, unsigned long); typedef void (*btf_trace_jbd2_write_superblock)(void *, journal_t *, blk_opf_t); typedef void (*btf_trace_jbd2_lock_buffer_stall)(void *, dev_t, unsigned long); typedef void (*btf_trace_jbd2_shrink_count)(void *, journal_t *, unsigned long, unsigned long); typedef void (*btf_trace_jbd2_shrink_scan_enter)(void *, journal_t *, unsigned long, unsigned long); typedef void (*btf_trace_jbd2_shrink_scan_exit)(void *, journal_t *, unsigned long, unsigned long, unsigned long); typedef void (*btf_trace_jbd2_shrink_checkpoint_list)(void *, journal_t *, tid_t, tid_t, tid_t, unsigned long, tid_t); struct trace_event_raw_jbd2_checkpoint { struct trace_entry ent; dev_t dev; int result; char __data[0]; }; struct trace_event_raw_jbd2_commit { struct trace_entry ent; dev_t dev; char sync_commit; tid_t transaction; char __data[0]; }; struct trace_event_raw_jbd2_end_commit { struct trace_entry ent; dev_t dev; char sync_commit; tid_t transaction; tid_t head; char __data[0]; }; struct trace_event_raw_jbd2_submit_inode_data { struct trace_entry ent; dev_t dev; ino_t ino; char __data[0]; }; struct trace_event_raw_jbd2_handle_start_class { struct trace_entry ent; dev_t dev; tid_t tid; unsigned int type; unsigned int line_no; int requested_blocks; char __data[0]; }; struct trace_event_raw_jbd2_handle_extend { struct trace_entry ent; dev_t dev; tid_t tid; unsigned int type; unsigned int line_no; int buffer_credits; int requested_blocks; char __data[0]; }; struct trace_event_raw_jbd2_handle_stats { struct trace_entry ent; dev_t dev; tid_t tid; unsigned int type; unsigned int line_no; int interval; int sync; int requested_blocks; int dirtied_blocks; char __data[0]; }; struct trace_event_raw_jbd2_run_stats { struct trace_entry ent; dev_t dev; tid_t tid; unsigned long wait; unsigned long request_delay; unsigned long running; unsigned long locked; unsigned long flushing; unsigned long logging; __u32 handle_count; __u32 blocks; __u32 blocks_logged; char __data[0]; }; struct trace_event_raw_jbd2_checkpoint_stats { struct trace_entry ent; dev_t dev; tid_t tid; unsigned long chp_time; __u32 forced_to_close; __u32 written; __u32 dropped; char __data[0]; }; struct trace_event_raw_jbd2_update_log_tail { struct trace_entry ent; dev_t dev; tid_t tail_sequence; tid_t first_tid; unsigned long block_nr; unsigned long freed; char __data[0]; }; struct trace_event_raw_jbd2_write_superblock { struct trace_entry ent; dev_t dev; blk_opf_t write_flags; char __data[0]; }; struct trace_event_raw_jbd2_lock_buffer_stall { struct trace_entry ent; dev_t dev; unsigned long stall_ms; char __data[0]; }; struct trace_event_raw_jbd2_journal_shrink { struct trace_entry ent; dev_t dev; unsigned long nr_to_scan; unsigned long count; char __data[0]; }; struct trace_event_raw_jbd2_shrink_scan_exit { struct trace_entry ent; dev_t dev; unsigned long nr_to_scan; unsigned long nr_shrunk; unsigned long count; char __data[0]; }; struct trace_event_raw_jbd2_shrink_checkpoint_list { struct trace_entry ent; dev_t dev; tid_t first_tid; tid_t tid; tid_t last_tid; unsigned long nr_freed; tid_t next_tid; char __data[0]; }; struct trace_event_data_offsets_jbd2_checkpoint {}; struct trace_event_data_offsets_jbd2_commit {}; struct trace_event_data_offsets_jbd2_end_commit {}; struct trace_event_data_offsets_jbd2_submit_inode_data {}; struct trace_event_data_offsets_jbd2_handle_start_class {}; struct trace_event_data_offsets_jbd2_handle_extend {}; struct trace_event_data_offsets_jbd2_handle_stats {}; struct trace_event_data_offsets_jbd2_run_stats {}; struct trace_event_data_offsets_jbd2_checkpoint_stats {}; struct trace_event_data_offsets_jbd2_update_log_tail {}; struct trace_event_data_offsets_jbd2_write_superblock {}; struct trace_event_data_offsets_jbd2_lock_buffer_stall {}; struct trace_event_data_offsets_jbd2_journal_shrink {}; struct trace_event_data_offsets_jbd2_shrink_scan_exit {}; struct trace_event_data_offsets_jbd2_shrink_checkpoint_list {}; struct jbd2_stats_proc_session { journal_t *journal; struct transaction_stats_s *stats; int start; int max; }; enum ramfs_param { Opt_mode___3 = 0, }; struct ramfs_mount_opts { umode_t mode; }; struct ramfs_fs_info { struct ramfs_mount_opts mount_opts; }; struct fat_cache { struct list_head cache_list; int nr_contig; int fcluster; int dcluster; }; struct msdos_inode_info { spinlock_t cache_lru_lock; struct list_head cache_lru; int nr_caches; unsigned int cache_valid_id; loff_t mmu_private; int i_start; int i_logstart; int i_attrs; loff_t i_pos; struct hlist_node i_fat_hash; struct hlist_node i_dir_hash; struct rw_semaphore truncate_lock; struct timespec64 i_crtime; struct inode vfs_inode; }; struct fat_mount_options { kuid_t fs_uid; kgid_t fs_gid; unsigned short fs_fmask; unsigned short fs_dmask; unsigned short codepage; int time_offset; char *iocharset; unsigned short shortname; unsigned char name_check; unsigned char errors; unsigned char nfs; unsigned short allow_utime; unsigned int quiet: 1; unsigned int showexec: 1; unsigned int sys_immutable: 1; unsigned int dotsOK: 1; unsigned int isvfat: 1; unsigned int utf8: 1; unsigned int unicode_xlate: 1; unsigned int numtail: 1; unsigned int flush: 1; unsigned int nocase: 1; unsigned int usefree: 1; unsigned int tz_set: 1; unsigned int rodir: 1; unsigned int discard: 1; unsigned int dos1xfloppy: 1; }; struct nls_table; struct fatent_operations; struct msdos_sb_info { unsigned short sec_per_clus; unsigned short cluster_bits; unsigned int cluster_size; unsigned char fats; unsigned char fat_bits; unsigned short fat_start; unsigned long fat_length; unsigned long dir_start; unsigned short dir_entries; unsigned long data_start; unsigned long max_cluster; unsigned long root_cluster; unsigned long fsinfo_sector; struct mutex fat_lock; struct mutex nfs_build_inode_lock; struct mutex s_lock; unsigned int prev_free; unsigned int free_clusters; unsigned int free_clus_valid; struct fat_mount_options options; struct nls_table *nls_disk; struct nls_table *nls_io; const void *dir_ops; int dir_per_block; int dir_per_block_bits; unsigned int vol_id; int fatent_shift; const struct fatent_operations *fatent_ops; struct inode *fat_inode; struct inode *fsinfo_inode; struct ratelimit_state ratelimit; spinlock_t inode_hash_lock; struct hlist_head inode_hashtable[256]; spinlock_t dir_hash_lock; struct hlist_head dir_hashtable[256]; unsigned int dirty; struct callback_head rcu; }; typedef u16 wchar_t; struct nls_table { const char *charset; const char *alias; int (*uni2char)(wchar_t, unsigned char *, int); int (*char2uni)(const unsigned char *, int, wchar_t *); const unsigned char *charset2lower; const unsigned char *charset2upper; struct module *owner; struct nls_table *next; }; struct fat_entry; struct fatent_operations { void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); void (*ent_set_ptr)(struct fat_entry *, int); int (*ent_bread)(struct super_block *, struct fat_entry *, int, sector_t); int (*ent_get)(struct fat_entry *); void (*ent_put)(struct fat_entry *, int); int (*ent_next)(struct fat_entry *); }; struct fat_cache_id { unsigned int id; int nr_contig; int fcluster; int dcluster; }; struct fat_entry { int entry; union { u8 *ent12_p[2]; __le16 *ent16_p; __le32 *ent32_p; } u; int nr_bhs; struct buffer_head *bhs[2]; struct inode *fat_inode; }; enum { PARSE_INVALID = 1, PARSE_NOT_LONGNAME = 2, PARSE_EOF = 3, }; enum utf16_endian { UTF16_HOST_ENDIAN = 0, UTF16_LITTLE_ENDIAN = 1, UTF16_BIG_ENDIAN = 2, }; struct msdos_dir_entry { __u8 name[11]; __u8 attr; __u8 lcase; __u8 ctime_cs; __le16 ctime; __le16 cdate; __le16 adate; __le16 starthi; __le16 time; __le16 date; __le16 start; __le32 size; }; typedef unsigned long long llu; struct msdos_dir_slot { __u8 id; __u8 name0_4[10]; __u8 attr; __u8 reserved; __u8 alias_checksum; __u8 name5_10[12]; __le16 start; __u8 name11_12[4]; }; struct __fat_dirent { long d_ino; __kernel_off_t d_off; unsigned short d_reclen; char d_name[256]; }; struct fat_ioctl_filldir_callback { struct dir_context ctx; void __attribute__((btf_type_tag("user"))) *dirent; int result; const char *longname; int long_len; const char *shortname; int short_len; }; struct fat_slot_info { loff_t i_pos; loff_t slot_off; int nr_slots; struct msdos_dir_entry *de; struct buffer_head *bh; }; struct compat_dirent { u32 d_ino; compat_off_t d_off; u16 d_reclen; char d_name[256]; }; struct fatent_ra { sector_t cur; sector_t limit; unsigned int ra_blocks; sector_t ra_advance; sector_t ra_next; sector_t ra_limit; }; struct fat_floppy_defaults { unsigned int nr_sectors; unsigned int sec_per_clus; unsigned int dir_entries; unsigned int media; unsigned int fat_length; }; enum { Opt_check_n = 0, Opt_check_r = 1, Opt_check_s = 2, Opt_uid___3 = 3, Opt_gid___4 = 4, Opt_umask = 5, Opt_dmask = 6, Opt_fmask = 7, Opt_allow_utime = 8, Opt_codepage = 9, Opt_usefree = 10, Opt_nocase = 11, Opt_quiet = 12, Opt_showexec = 13, Opt_debug___2 = 14, Opt_immutable = 15, Opt_dots = 16, Opt_nodots = 17, Opt_charset = 18, Opt_shortname_lower = 19, Opt_shortname_win95 = 20, Opt_shortname_winnt = 21, Opt_shortname_mixed = 22, Opt_utf8_no = 23, Opt_utf8_yes = 24, Opt_uni_xl_no = 25, Opt_uni_xl_yes = 26, Opt_nonumtail_no = 27, Opt_nonumtail_yes = 28, Opt_obsolete = 29, Opt_flush = 30, Opt_tz_utc = 31, Opt_rodir = 32, Opt_err_cont = 33, Opt_err_panic = 34, Opt_err_ro = 35, Opt_discard___2 = 36, Opt_nfs = 37, Opt_time_offset = 38, Opt_nfs_stale_rw = 39, Opt_nfs_nostale_ro = 40, Opt_err___3 = 41, Opt_dos1xfloppy = 42, }; struct fat_boot_sector { __u8 ignored[3]; __u8 system_id[8]; __u8 sector_size[2]; __u8 sec_per_clus; __le16 reserved; __u8 fats; __u8 dir_entries[2]; __u8 sectors[2]; __u8 media; __le16 fat_length; __le16 secs_track; __le16 heads; __le32 hidden; __le32 total_sect; union { struct { __u8 drive_number; __u8 state; __u8 signature; __u8 vol_id[4]; __u8 vol_label[11]; __u8 fs_type[8]; } fat16; struct { __le32 length; __le16 flags; __u8 version[2]; __le32 root_cluster; __le16 info_sector; __le16 backup_boot; __le16 reserved2[6]; __u8 drive_number; __u8 state; __u8 signature; __u8 vol_id[4]; __u8 vol_label[11]; __u8 fs_type[8]; } fat32; }; }; struct fat_boot_fsinfo { __le32 signature1; __le32 reserved1[120]; __le32 signature2; __le32 free_clusters; __le32 next_cluster; __le32 reserved2[4]; }; struct fat_bios_param_block { u16 fat_sector_size; u8 fat_sec_per_clus; u16 fat_reserved; u8 fat_fats; u16 fat_dir_entries; u16 fat_sectors; u16 fat_fat_length; u32 fat_total_sect; u8 fat16_state; u32 fat16_vol_id; u32 fat32_length; u32 fat32_root_cluster; u16 fat32_info_sector; u8 fat32_state; u32 fat32_vol_id; }; struct fat_fid { u32 i_gen; u32 i_pos_low; u16 i_pos_hi; u16 parent_i_pos_hi; u32 parent_i_pos_low; u32 parent_i_gen; }; struct shortname_info { unsigned char lower: 1; unsigned char upper: 1; unsigned char valid: 1; }; enum exfat_error_mode { EXFAT_ERRORS_CONT = 0, EXFAT_ERRORS_PANIC = 1, EXFAT_ERRORS_RO = 2, }; struct exfat_chain { unsigned int dir; unsigned int size; unsigned char flags; }; struct exfat_hint { unsigned int clu; union { unsigned int off; int eidx; }; }; struct exfat_hint_femp { int eidx; int count; struct exfat_chain cur; }; struct exfat_inode_info { struct exfat_chain dir; int entry; unsigned int type; unsigned short attr; unsigned int start_clu; unsigned char flags; unsigned int version; struct exfat_hint hint_bmap; struct exfat_hint hint_stat; struct exfat_hint_femp hint_femp; spinlock_t cache_lru_lock; struct list_head cache_lru; int nr_caches; unsigned int cache_valid_id; loff_t i_size_ondisk; loff_t i_size_aligned; loff_t i_pos; struct hlist_node i_hash_fat; struct rw_semaphore truncate_lock; struct inode vfs_inode; struct timespec64 i_crtime; }; struct exfat_mount_options { kuid_t fs_uid; kgid_t fs_gid; unsigned short fs_fmask; unsigned short fs_dmask; unsigned short allow_utime; char *iocharset; enum exfat_error_mode errors; unsigned int utf8: 1; unsigned int sys_tz: 1; unsigned int discard: 1; unsigned int keep_last_dots: 1; int time_offset; }; struct exfat_sb_info { unsigned long long num_sectors; unsigned int num_clusters; unsigned int cluster_size; unsigned int cluster_size_bits; unsigned int sect_per_clus; unsigned int sect_per_clus_bits; unsigned long long FAT1_start_sector; unsigned long long FAT2_start_sector; unsigned long long data_start_sector; unsigned int num_FAT_sectors; unsigned int root_dir; unsigned int dentries_per_clu; unsigned int vol_flags; unsigned int vol_flags_persistent; struct buffer_head *boot_bh; unsigned int map_clu; unsigned int map_sectors; struct buffer_head **vol_amap; unsigned short *vol_utbl; unsigned int clu_srch_ptr; unsigned int used_clusters; struct mutex s_lock; struct mutex bitmap_lock; struct exfat_mount_options options; struct nls_table *nls_io; struct ratelimit_state ratelimit; spinlock_t inode_hash_lock; struct hlist_head inode_hashtable[256]; }; struct exfat_entry_set_cache { struct super_block *sb; unsigned int start_off; int num_bh; struct buffer_head *__bh[3]; struct buffer_head **bh; unsigned int num_entries; bool modified; }; struct exfat_dentry { __u8 type; union { struct { __u8 num_ext; __le16 checksum; __le16 attr; __le16 reserved1; __le16 create_time; __le16 create_date; __le16 modify_time; __le16 modify_date; __le16 access_time; __le16 access_date; __u8 create_time_cs; __u8 modify_time_cs; __u8 create_tz; __u8 modify_tz; __u8 access_tz; __u8 reserved2[7]; } __attribute__((packed)) file; struct { __u8 flags; __u8 reserved1; __u8 name_len; __le16 name_hash; __le16 reserved2; __le64 valid_size; __le32 reserved3; __le32 start_clu; __le64 size; } __attribute__((packed)) stream; struct { __u8 flags; __le16 unicode_0_14[15]; } __attribute__((packed)) name; struct { __u8 flags; __u8 reserved[18]; __le32 start_clu; __le64 size; } __attribute__((packed)) bitmap; struct { __u8 reserved1[3]; __le32 checksum; __u8 reserved2[12]; __le32 start_clu; __le64 size; } __attribute__((packed)) upcase; struct { __u8 flags; __u8 vendor_guid[16]; __u8 vendor_defined[14]; } vendor_ext; struct { __u8 flags; __u8 vendor_guid[16]; __u8 vendor_defined[2]; __le32 start_clu; __le64 size; } __attribute__((packed)) vendor_alloc; struct { __u8 flags; __u8 custom_defined[18]; __le32 start_clu; __le64 size; } __attribute__((packed)) generic_secondary; } dentry; }; struct exfat_dentry_namebuf { char *lfn; int lfnbuf_len; }; struct exfat_dir_entry { struct exfat_chain dir; int entry; unsigned int type; unsigned int start_clu; unsigned char flags; unsigned short attr; loff_t size; unsigned int num_subdirs; struct timespec64 atime; struct timespec64 mtime; struct timespec64 crtime; struct exfat_dentry_namebuf namebuf; }; enum { NLS_NAME_NO_LOSSY = 0, NLS_NAME_LOSSY = 1, NLS_NAME_OVERLEN = 2, }; typedef u32 unicode_t; struct exfat_uni_name { unsigned short name[258]; u16 name_hash; unsigned char name_len; }; enum exfat_validate_dentry_mode { ES_MODE_STARTED = 0, ES_MODE_GET_FILE_ENTRY = 1, ES_MODE_GET_STRM_ENTRY = 2, ES_MODE_GET_NAME_ENTRY = 3, ES_MODE_GET_CRITICAL_SEC_ENTRY = 4, ES_MODE_GET_BENIGN_SEC_ENTRY = 5, }; enum { DIRENT_STEP_FILE = 0, DIRENT_STEP_STRM = 1, DIRENT_STEP_NAME = 2, DIRENT_STEP_SECD = 3, }; enum { Opt_uid___4 = 0, Opt_gid___5 = 1, Opt_umask___2 = 2, Opt_dmask___2 = 3, Opt_fmask___2 = 4, Opt_allow_utime___2 = 5, Opt_charset___2 = 6, Opt_errors___2 = 7, Opt_discard___3 = 8, Opt_keep_last_dots = 9, Opt_sys_tz = 10, Opt_time_offset___2 = 11, Opt_utf8 = 12, Opt_debug___3 = 13, Opt_namecase = 14, Opt_codepage___2 = 15, }; struct boot_sector { __u8 jmp_boot[3]; __u8 fs_name[8]; __u8 must_be_zero[53]; __le64 partition_offset; __le64 vol_length; __le32 fat_offset; __le32 fat_length; __le32 clu_offset; __le32 clu_count; __le32 root_cluster; __le32 vol_serial; __u8 fs_revision[2]; __le16 vol_flags; __u8 sect_size_bits; __u8 sect_per_clus_bits; __u8 num_fats; __u8 drv_sel; __u8 percent_in_use; __u8 reserved[7]; __u8 boot_code[390]; __le16 signature; }; struct exfat_cache { struct list_head cache_list; unsigned int nr_contig; unsigned int fcluster; unsigned int dcluster; }; struct exfat_cache_id { unsigned int id; unsigned int nr_contig; unsigned int fcluster; unsigned int dcluster; }; struct getdents_callback___2 { struct dir_context ctx; char *name; u64 ino; int found; int sequence; }; struct utf8_table { int cmask; int cval; int shift; long lmask; long lval; }; enum utf8_normalization { UTF8_NFDI = 0, UTF8_NFDICF = 1, UTF8_NMAX = 2, }; typedef const unsigned char utf8leaf_t; typedef const unsigned char utf8trie_t; struct utf8cursor { const struct unicode_map *um; enum utf8_normalization n; const char *s; const char *p; const char *ss; const char *sp; unsigned int len; unsigned int slen; short ccc; short nccc; unsigned char hangul[12]; }; struct fuse_iqueue; struct fuse_iqueue_ops { void (*wake_forget_and_unlock)(struct fuse_iqueue *, bool); void (*wake_interrupt_and_unlock)(struct fuse_iqueue *, bool); void (*wake_pending_and_unlock)(struct fuse_iqueue *, bool); void (*release)(struct fuse_iqueue *); }; struct fuse_forget_one { uint64_t nodeid; uint64_t nlookup; }; struct fuse_forget_link { struct fuse_forget_one forget_one; struct fuse_forget_link *next; }; struct fuse_iqueue { unsigned int connected; spinlock_t lock; wait_queue_head_t waitq; u64 reqctr; struct list_head pending; struct list_head interrupts; struct fuse_forget_link forget_list_head; struct fuse_forget_link *forget_list_tail; int forget_batch; struct fasync_struct *fasync; const struct fuse_iqueue_ops *ops; void *priv; }; enum fuse_req_flag { FR_ISREPLY = 0, FR_FORCE = 1, FR_BACKGROUND = 2, FR_WAITING = 3, FR_ABORTED = 4, FR_INTERRUPTED = 5, FR_LOCKED = 6, FR_PENDING = 7, FR_SENT = 8, FR_FINISHED = 9, FR_PRIVATE = 10, FR_ASYNC = 11, }; enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, FUSE_GETATTR = 3, FUSE_SETATTR = 4, FUSE_READLINK = 5, FUSE_SYMLINK = 6, FUSE_MKNOD = 8, FUSE_MKDIR = 9, FUSE_UNLINK = 10, FUSE_RMDIR = 11, FUSE_RENAME = 12, FUSE_LINK = 13, FUSE_OPEN = 14, FUSE_READ = 15, FUSE_WRITE = 16, FUSE_STATFS = 17, FUSE_RELEASE = 18, FUSE_FSYNC = 20, FUSE_SETXATTR = 21, FUSE_GETXATTR = 22, FUSE_LISTXATTR = 23, FUSE_REMOVEXATTR = 24, FUSE_FLUSH = 25, FUSE_INIT = 26, FUSE_OPENDIR = 27, FUSE_READDIR = 28, FUSE_RELEASEDIR = 29, FUSE_FSYNCDIR = 30, FUSE_GETLK = 31, FUSE_SETLK = 32, FUSE_SETLKW = 33, FUSE_ACCESS = 34, FUSE_CREATE = 35, FUSE_INTERRUPT = 36, FUSE_BMAP = 37, FUSE_DESTROY = 38, FUSE_IOCTL = 39, FUSE_POLL = 40, FUSE_NOTIFY_REPLY = 41, FUSE_BATCH_FORGET = 42, FUSE_FALLOCATE = 43, FUSE_READDIRPLUS = 44, FUSE_RENAME2 = 45, FUSE_LSEEK = 46, FUSE_COPY_FILE_RANGE = 47, FUSE_SETUPMAPPING = 48, FUSE_REMOVEMAPPING = 49, FUSE_SYNCFS = 50, FUSE_TMPFILE = 51, FUSE_STATX = 52, FUSE_CANONICAL_PATH = 2016, CUSE_INIT = 4096, CUSE_INIT_BSWAP_RESERVED = 1048576, FUSE_INIT_BSWAP_RESERVED = 436207616, }; enum fuse_notify_code { FUSE_NOTIFY_POLL = 1, FUSE_NOTIFY_INVAL_INODE = 2, FUSE_NOTIFY_INVAL_ENTRY = 3, FUSE_NOTIFY_STORE = 4, FUSE_NOTIFY_RETRIEVE = 5, FUSE_NOTIFY_DELETE = 6, FUSE_NOTIFY_CODE_MAX = 7, }; struct fuse_pqueue { unsigned int connected; spinlock_t lock; struct list_head *processing; struct list_head io; }; struct fuse_conn; struct fuse_dev { struct fuse_conn *fc; struct fuse_pqueue pq; struct list_head entry; }; struct fuse_sync_bucket; struct fuse_conn { spinlock_t lock; refcount_t count; atomic_t dev_count; struct callback_head rcu; kuid_t user_id; kgid_t group_id; struct pid_namespace *pid_ns; struct user_namespace *user_ns; unsigned int max_read; unsigned int max_write; unsigned int max_pages; unsigned int max_pages_limit; struct fuse_iqueue iq; atomic64_t khctr; struct rb_root polled_files; unsigned int max_background; unsigned int congestion_threshold; unsigned int num_background; unsigned int active_background; struct list_head bg_queue; spinlock_t bg_lock; int initialized; int blocked; wait_queue_head_t blocked_waitq; unsigned int connected; bool aborted; unsigned int conn_error: 1; unsigned int conn_init: 1; unsigned int async_read: 1; unsigned int abort_err: 1; unsigned int atomic_o_trunc: 1; unsigned int export_support: 1; unsigned int writeback_cache: 1; unsigned int parallel_dirops: 1; unsigned int handle_killpriv: 1; unsigned int cache_symlinks: 1; unsigned int legacy_opts_show: 1; unsigned int handle_killpriv_v2: 1; unsigned int no_open: 1; unsigned int no_opendir: 1; unsigned int no_fsync: 1; unsigned int no_fsyncdir: 1; unsigned int no_flush: 1; unsigned int no_setxattr: 1; unsigned int setxattr_ext: 1; unsigned int no_getxattr: 1; unsigned int no_listxattr: 1; unsigned int no_removexattr: 1; unsigned int no_lock: 1; unsigned int no_access: 1; unsigned int no_create: 1; unsigned int no_interrupt: 1; unsigned int no_bmap: 1; unsigned int no_poll: 1; unsigned int big_writes: 1; unsigned int dont_mask: 1; unsigned int no_flock: 1; unsigned int no_fallocate: 1; unsigned int no_rename2: 1; unsigned int auto_inval_data: 1; unsigned int explicit_inval_data: 1; unsigned int do_readdirplus: 1; unsigned int readdirplus_auto: 1; unsigned int async_dio: 1; unsigned int no_lseek: 1; unsigned int posix_acl: 1; unsigned int default_permissions: 1; unsigned int allow_other: 1; unsigned int no_copy_file_range: 1; unsigned int destroy: 1; unsigned int delete_stale: 1; unsigned int no_control: 1; unsigned int no_force_umount: 1; unsigned int auto_submounts: 1; unsigned int passthrough: 1; unsigned int sync_fs: 1; unsigned int init_security: 1; unsigned int create_supp_group: 1; unsigned int inode_dax: 1; unsigned int no_tmpfile: 1; unsigned int direct_io_allow_mmap: 1; unsigned int no_statx: 1; unsigned int no_daemon: 1; atomic_t num_waiting; unsigned int minor; struct list_head entry; dev_t dev; struct dentry *ctl_dentry[5]; int ctl_ndents; u32 scramble_key[4]; atomic64_t attr_version; void (*release)(struct fuse_conn *); struct rw_semaphore killsb; struct list_head devices; struct list_head mounts; struct fuse_sync_bucket __attribute__((btf_type_tag("rcu"))) *curr_bucket; struct idr passthrough_req; spinlock_t passthrough_req_lock; }; struct fuse_sync_bucket { atomic_t count; wait_queue_head_t waitq; struct callback_head rcu; }; struct fuse_in_header { uint32_t len; uint32_t opcode; uint64_t unique; uint64_t nodeid; uint32_t uid; uint32_t gid; uint32_t pid; union { struct { uint16_t total_extlen; uint16_t padding; }; uint32_t error_in; }; }; struct fuse_out_header { uint32_t len; int32_t error; uint64_t unique; }; struct fuse_args; struct fuse_mount; struct fuse_req { struct list_head list; struct list_head intr_entry; struct fuse_args *args; refcount_t count; unsigned long flags; struct { struct fuse_in_header h; } in; struct { struct fuse_out_header h; } out; wait_queue_head_t waitq; void *argbuf; struct fuse_mount *fm; }; struct fuse_in_arg { unsigned int size; const void *value; }; struct fuse_arg { unsigned int size; void *value; }; struct fuse_args { uint64_t nodeid; uint32_t opcode; uint32_t error_in; uint8_t in_numargs; uint8_t out_numargs; uint8_t ext_idx; bool force: 1; bool noreply: 1; bool nocreds: 1; bool in_pages: 1; bool out_pages: 1; bool user_pages: 1; bool out_argvar: 1; bool page_zeroing: 1; bool page_replace: 1; bool may_block: 1; bool is_ext: 1; struct fuse_in_arg in_args[5]; struct fuse_arg out_args[3]; void (*end)(struct fuse_mount *, struct fuse_args *, int); struct path *canonical_path; }; struct fuse_mount { struct fuse_conn *fc; struct super_block *sb; struct list_head fc_entry; }; struct fuse_passthrough { struct file *filp; struct cred *cred; }; struct fuse_release_args; struct fuse_file { struct fuse_mount *fm; struct fuse_release_args *release_args; u64 kh; u64 fh; u64 nodeid; refcount_t count; u32 open_flags; struct list_head write_entry; struct { struct mutex lock; loff_t pos; loff_t cache_off; u64 version; } readdir; struct fuse_passthrough passthrough; struct file *backing_file; struct rb_node polled_node; wait_queue_head_t poll_wait; bool flock: 1; }; struct fuse_page_desc; struct fuse_args_pages { struct fuse_args args; struct page **pages; struct fuse_page_desc *descs; unsigned int num_pages; }; struct fuse_page_desc { unsigned int length; unsigned int offset; }; struct fuse_entry_bpf_out { uint64_t backing_action; uint64_t backing_fd; uint64_t bpf_action; uint64_t bpf_fd; }; struct fuse_entry_bpf { struct fuse_entry_bpf_out out; struct file *backing_file; struct file *bpf_file; }; struct fuse_notify_retrieve_in { uint64_t dummy1; uint64_t offset; uint32_t size; uint32_t dummy2; uint64_t dummy3; uint64_t dummy4; }; struct fuse_retrieve_args { struct fuse_args_pages ap; struct fuse_notify_retrieve_in inarg; }; struct fuse_copy_state { int write; struct fuse_req *req; struct iov_iter *iter; struct pipe_buffer *pipebufs; struct pipe_buffer *currbuf; struct pipe_inode_info *pipe; unsigned long nr_segs; struct page *pg; unsigned int len; unsigned int offset; unsigned int move_pages: 1; }; struct fuse_batch_forget_in { uint32_t count; uint32_t dummy; }; struct fuse_forget_in { uint64_t nlookup; }; struct fuse_notify_poll_wakeup_out { uint64_t kh; }; struct fuse_notify_retrieve_out { uint64_t notify_unique; uint64_t nodeid; uint64_t offset; uint32_t size; uint32_t padding; }; struct fuse_notify_inval_entry_out { uint64_t parent; uint32_t namelen; uint32_t flags; }; struct fuse_notify_store_out { uint64_t nodeid; uint64_t offset; uint32_t size; uint32_t padding; }; struct fuse_notify_inval_inode_out { uint64_t ino; int64_t off; int64_t len; }; struct fuse_notify_delete_out { uint64_t parent; uint64_t child; uint32_t namelen; uint32_t padding; }; struct fuse_interrupt_in { uint64_t unique; }; enum { FUSE_I_ADVISE_RDPLUS = 0, FUSE_I_INIT_RDPLUS = 1, FUSE_I_SIZE_UNSTABLE = 2, FUSE_I_BAD = 3, FUSE_I_BTIME = 4, }; enum fuse_ext_type { FUSE_MAX_NR_SECCTX = 31, FUSE_EXT_GROUPS = 32, }; struct fuse_dentry { union { u64 time; struct callback_head rcu; }; struct path backing_path; struct bpf_prog *bpf; }; struct fuse_submount_lookup; struct fuse_inode { struct inode inode; struct inode *backing_inode; struct bpf_prog *bpf; u64 nodeid; u64 nlookup; struct fuse_forget_link *forget; u64 i_time; u32 inval_mask; umode_t orig_i_mode; struct timespec64 i_btime; u64 orig_ino; u64 attr_version; union { struct { struct list_head write_files; struct list_head queued_writes; int writectr; wait_queue_head_t page_waitq; struct rb_root writepages; }; struct { bool cached; loff_t size; loff_t pos; u64 version; struct timespec64 mtime; u64 iversion; spinlock_t lock; } rdc; }; unsigned long state; struct mutex mutex; spinlock_t lock; struct fuse_submount_lookup *submount_lookup; }; struct fuse_submount_lookup { refcount_t count; u64 nodeid; struct fuse_forget_link *forget; }; struct fuse_supp_groups { uint32_t nr_groups; uint32_t groups[0]; }; struct fuse_attr { uint64_t ino; uint64_t size; uint64_t blocks; uint64_t atime; uint64_t mtime; uint64_t ctime; uint32_t atimensec; uint32_t mtimensec; uint32_t ctimensec; uint32_t mode; uint32_t nlink; uint32_t uid; uint32_t gid; uint32_t rdev; uint32_t blksize; uint32_t flags; }; struct fuse_entry_out { uint64_t nodeid; uint64_t generation; uint64_t entry_valid; uint64_t attr_valid; uint32_t entry_valid_nsec; uint32_t attr_valid_nsec; struct fuse_attr attr; }; struct fuse_sx_time { int64_t tv_sec; uint32_t tv_nsec; int32_t __reserved; }; struct fuse_statx { uint32_t mask; uint32_t blksize; uint64_t attributes; uint32_t nlink; uint32_t uid; uint32_t gid; uint16_t mode; uint16_t __spare0[1]; uint64_t ino; uint64_t size; uint64_t blocks; uint64_t attributes_mask; struct fuse_sx_time atime; struct fuse_sx_time btime; struct fuse_sx_time ctime; struct fuse_sx_time mtime; uint32_t rdev_major; uint32_t rdev_minor; uint32_t dev_major; uint32_t dev_minor; uint64_t __spare2[14]; }; struct fuse_dummy_io { int unused; }; struct fuse_statx_out { uint64_t attr_valid; uint32_t attr_valid_nsec; uint32_t flags; uint64_t spare[2]; struct fuse_statx stat; }; struct fuse_statx_in { uint32_t getattr_flags; uint32_t reserved; uint64_t fh; uint32_t sx_flags; uint32_t sx_mask; }; struct fuse_getattr_in { uint32_t getattr_flags; uint32_t dummy; uint64_t fh; }; struct fuse_attr_out { uint64_t attr_valid; uint32_t attr_valid_nsec; uint32_t dummy; struct fuse_attr attr; }; struct fuse_getattr_io { struct fuse_getattr_in fgi; struct fuse_attr_out fao; }; struct fuse_setattr_in { uint32_t valid; uint32_t padding; uint64_t fh; uint64_t size; uint64_t lock_owner; uint64_t atime; uint64_t mtime; uint64_t ctime; uint32_t atimensec; uint32_t mtimensec; uint32_t ctimensec; uint32_t mode; uint32_t unused4; uint32_t uid; uint32_t gid; uint32_t unused5; }; struct fuse_setattr_io { struct fuse_setattr_in fsi; struct fuse_attr_out fao; }; struct fuse_access_in { uint32_t mask; uint32_t padding; }; struct fuse_err_ret { void *result; bool ret; }; struct fuse_lookup_io { struct fuse_entry_out feo; struct fuse_entry_bpf feb; }; struct fuse_link_in { uint64_t oldnodeid; }; struct fuse_mkdir_in { uint32_t mode; uint32_t umask; }; struct fuse_mknod_in { uint32_t mode; uint32_t rdev; uint32_t umask; uint32_t padding; }; struct fuse_rename2_in { uint64_t newdir; uint32_t flags; uint32_t padding; }; struct fuse_rename_in { uint64_t newdir; }; struct fuse_secctx { uint32_t size; uint32_t padding; }; struct fuse_secctx_header { uint32_t size; uint32_t nr_secctx; }; struct fuse_ext_header { uint32_t size; uint32_t type; }; struct fuse_create_in { uint32_t flags; uint32_t mode; uint32_t umask; uint32_t open_flags; }; struct fuse_open_out { uint64_t fh; uint32_t open_flags; uint32_t passthrough_fh; }; struct fuse_create_open_io { struct fuse_create_in fci; struct fuse_entry_out feo; struct fuse_open_out foo; }; struct fuse_fsync_in { uint64_t fh; uint32_t fsync_flags; uint32_t padding; }; struct fuse_read_in { uint64_t fh; uint64_t offset; uint32_t size; uint32_t read_flags; uint64_t lock_owner; uint32_t flags; uint32_t padding; }; struct fuse_write_in { uint64_t fh; uint64_t offset; uint32_t size; uint32_t write_flags; uint64_t lock_owner; uint32_t flags; uint32_t padding; }; struct fuse_write_out { uint32_t size; uint32_t padding; }; struct fuse_io_priv; struct fuse_io_args { union { struct { struct fuse_read_in in; u64 attr_ver; } read; struct { struct fuse_write_in in; struct fuse_write_out out; bool page_locked; } write; }; struct fuse_args_pages ap; struct fuse_io_priv *io; struct fuse_file *ff; }; struct fuse_writepage_args { struct fuse_io_args ia; struct rb_node writepages_entry; struct list_head queue_entry; struct fuse_writepage_args *next; struct inode *inode; struct fuse_sync_bucket *bucket; }; struct fuse_io_priv { struct kref refcnt; int async; spinlock_t lock; unsigned int reqs; ssize_t bytes; size_t size; __u64 offset; bool write; bool should_dirty; int err; struct kiocb *iocb; struct completion *done; bool blocking; }; struct fuse_release_in { uint64_t fh; uint32_t flags; uint32_t release_flags; uint64_t lock_owner; }; struct fuse_release_args { struct fuse_args args; struct fuse_release_in inarg; struct inode *inode; }; struct fuse_open_in { uint32_t flags; uint32_t open_flags; }; struct fuse_open_io { struct fuse_open_in foi; struct fuse_open_out foo; }; struct fuse_lseek_out { uint64_t offset; }; struct fuse_lseek_in { uint64_t fh; uint64_t offset; uint32_t whence; uint32_t padding; }; struct fuse_lseek_io { struct fuse_lseek_in fli; struct fuse_lseek_out flo; }; struct fuse_read_iter_out { uint64_t ret; }; struct fuse_file_read_iter_io { struct fuse_read_in fri; struct fuse_read_iter_out frio; }; struct fuse_write_iter_out { uint64_t ret; }; struct fuse_file_write_iter_io { struct fuse_write_in fwi; struct fuse_write_out fwo; struct fuse_write_iter_out fwio; }; struct fuse_flush_in { uint64_t fh; uint32_t unused; uint32_t padding; uint64_t lock_owner; }; struct fuse_file_lock { uint64_t start; uint64_t end; uint32_t type; uint32_t pid; }; struct fuse_lk_out { struct fuse_file_lock lk; }; struct fuse_lk_in { uint64_t fh; uint64_t owner; struct fuse_file_lock lk; uint32_t lk_flags; uint32_t padding; }; struct fuse_fallocate_in { uint64_t fh; uint64_t offset; uint64_t length; uint32_t mode; uint32_t padding; }; struct fuse_copy_file_range_in { uint64_t fh_in; uint64_t off_in; uint64_t nodeid_out; uint64_t fh_out; uint64_t off_out; uint64_t len; uint64_t flags; }; struct fuse_copy_file_range_io { struct fuse_copy_file_range_in fci; struct fuse_write_out fwo; }; struct fuse_fill_wb_data { struct fuse_writepage_args *wpa; struct fuse_file *ff; struct inode *inode; struct page **orig_pages; unsigned int max_pages; }; struct fuse_poll_out { uint32_t revents; uint32_t padding; }; struct fuse_poll_in { uint64_t fh; uint64_t kh; uint32_t flags; uint32_t events; }; struct fuse_bmap_out { uint64_t block; }; struct fuse_bmap_in { uint64_t block; uint32_t blocksize; uint32_t padding; }; enum fuse_dax_mode { FUSE_DAX_INODE_DEFAULT = 0, FUSE_DAX_ALWAYS = 1, FUSE_DAX_NEVER = 2, FUSE_DAX_INODE_USER = 3, }; enum { OPT_SOURCE = 0, OPT_SUBTYPE = 1, OPT_FD = 2, OPT_ROOTMODE = 3, OPT_USER_ID = 4, OPT_GROUP_ID = 5, OPT_DEFAULT_PERMISSIONS = 6, OPT_ALLOW_OTHER = 7, OPT_MAX_READ = 8, OPT_BLKSIZE = 9, OPT_ROOT_BPF = 10, OPT_ROOT_DIR = 11, OPT_NO_DAEMON = 12, OPT_ERR = 13, }; struct fuse_inode_identifier { u64 nodeid; struct inode *backing_inode; }; struct fuse_init_in { uint32_t major; uint32_t minor; uint32_t max_readahead; uint32_t flags; uint32_t flags2; uint32_t unused[11]; }; struct fuse_init_out { uint32_t major; uint32_t minor; uint32_t max_readahead; uint32_t flags; uint16_t max_background; uint16_t congestion_threshold; uint32_t max_write; uint32_t time_gran; uint16_t max_pages; uint16_t map_alignment; uint32_t flags2; uint32_t unused[7]; }; struct fuse_init_args { struct fuse_args args; struct fuse_init_in in; struct fuse_init_out out; }; struct fuse_kstatfs { uint64_t blocks; uint64_t bfree; uint64_t bavail; uint64_t files; uint64_t ffree; uint32_t bsize; uint32_t namelen; uint32_t frsize; uint32_t padding; uint32_t spare[6]; }; struct fuse_statfs_out { struct fuse_kstatfs st; }; struct fuse_fs_context { int fd; struct file *file; unsigned int rootmode; kuid_t user_id; kgid_t group_id; bool is_bdev: 1; bool fd_present: 1; bool rootmode_present: 1; bool user_id_present: 1; bool group_id_present: 1; bool default_permissions: 1; bool allow_other: 1; bool destroy: 1; bool no_control: 1; bool no_force_umount: 1; bool legacy_opts_show: 1; enum fuse_dax_mode dax_mode; bool no_daemon: 1; unsigned int max_read; unsigned int blksize; const char *subtype; struct bpf_prog *root_bpf; struct file *root_dir; struct dax_device *dax_dev; void **fudptr; }; struct fuse_syncfs_in { uint64_t padding; }; struct fuse_inode_handle { u64 nodeid; u32 generation; }; struct fuse_getxattr_in { uint32_t size; uint32_t padding; }; struct fuse_getxattr_out { uint32_t size; uint32_t padding; }; struct fuse_getxattr_io { struct fuse_getxattr_in fgi; struct fuse_getxattr_out fgo; }; struct fuse_setxattr_in { uint32_t size; uint32_t flags; uint32_t setxattr_flags; uint32_t padding; }; enum fuse_parse_result { FOUND_ERR = -1, FOUND_NONE = 0, FOUND_SOME = 1, FOUND_ALL = 2, }; struct fuse_dirent { uint64_t ino; uint64_t off; uint32_t namelen; uint32_t type; char name[0]; }; struct fuse_direntplus { struct fuse_entry_out entry_out; struct fuse_dirent dirent; }; struct fuse_read_out { uint64_t offset; uint32_t again; uint32_t padding; }; struct fuse_read_io { struct fuse_read_in fri; struct fuse_read_out fro; }; struct fuse_ioctl_out { int32_t result; uint32_t flags; uint32_t in_iovs; uint32_t out_iovs; }; struct fuse_ioctl_iovec { uint64_t base; uint64_t len; }; struct fuse_ioctl_in { uint64_t fh; uint32_t flags; uint32_t cmd; uint64_t arg; uint32_t in_size; uint32_t out_size; }; struct fuse_aio_req { struct kiocb iocb; struct kiocb *iocb_fuse; }; struct fuse_bpf_aio_req { struct kiocb iocb; refcount_t ref; struct kiocb *iocb_orig; }; struct extfuse_ctx { struct dir_context ctx; u8 *addr; size_t offset; }; struct virtio_device_id; struct virtio_device; struct virtio_driver { struct device_driver driver; const struct virtio_device_id *id_table; const unsigned int *feature_table; unsigned int feature_table_size; const unsigned int *feature_table_legacy; unsigned int feature_table_size_legacy; int (*validate)(struct virtio_device *); int (*probe)(struct virtio_device *); void (*scan)(struct virtio_device *); void (*remove)(struct virtio_device *); void (*config_changed)(struct virtio_device *); int (*freeze)(struct virtio_device *); int (*restore)(struct virtio_device *); }; struct virtio_device_id { __u32 device; __u32 vendor; }; struct vringh_config_ops; struct virtio_config_ops; struct virtio_device { int index; bool failed; bool config_enabled; bool config_change_pending; spinlock_t config_lock; spinlock_t vqs_list_lock; struct device dev; struct virtio_device_id id; const struct virtio_config_ops *config; const struct vringh_config_ops *vringh_config; struct list_head vqs; u64 features; void *priv; }; struct virtqueue; typedef void vq_callback_t(struct virtqueue *); struct virtio_shm_region; struct virtio_config_ops { void (*get)(struct virtio_device *, unsigned int, void *, unsigned int); void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int); u32 (*generation)(struct virtio_device *); u8 (*get_status)(struct virtio_device *); void (*set_status)(struct virtio_device *, u8); void (*reset)(struct virtio_device *); int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, vq_callback_t **, const char * const *, const bool *, struct irq_affinity *); void (*del_vqs)(struct virtio_device *); void (*synchronize_cbs)(struct virtio_device *); u64 (*get_features)(struct virtio_device *); int (*finalize_features)(struct virtio_device *); const char * (*bus_name)(struct virtio_device *); int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *); const struct cpumask * (*get_vq_affinity)(struct virtio_device *, int); bool (*get_shm_region)(struct virtio_device *, struct virtio_shm_region *, u8); int (*disable_vq_and_reset)(struct virtqueue *); int (*enable_vq_after_reset)(struct virtqueue *); }; struct virtqueue { struct list_head list; void (*callback)(struct virtqueue *); const char *name; struct virtio_device *vdev; unsigned int index; unsigned int num_free; unsigned int num_max; bool reset; void *priv; }; struct virtio_shm_region { u64 addr; u64 len; }; enum { OPT_DAX = 0, OPT_DAX_ENUM = 1, }; enum { VQ_HIPRIO = 0, VQ_REQUEST = 1, }; struct virtio_fs_vq; struct virtio_fs { struct kref refcount; struct list_head list; char *tag; struct virtio_fs_vq *vqs; unsigned int nvqs; unsigned int num_request_queues; struct dax_device *dax_dev; void *window_kaddr; phys_addr_t window_phys_addr; size_t window_len; }; struct virtio_fs_vq { spinlock_t lock; struct virtqueue *vq; struct work_struct done_work; struct list_head queued_reqs; struct list_head end_reqs; struct delayed_work dispatch_work; struct fuse_dev *fud; bool connected; long in_flight; struct completion in_flight_zero; char name[24]; long: 64; }; struct virtio_fs_req_work { struct fuse_req *req; struct virtio_fs_vq *fsvq; struct work_struct done_work; }; struct virtio_fs_forget_req { struct fuse_in_header ih; struct fuse_forget_in arg; }; struct virtio_fs_forget { struct list_head list; struct virtio_fs_forget_req req; }; enum { OVL_XINO_OFF = 0, OVL_XINO_AUTO = 1, OVL_XINO_ON = 2, }; enum { OVL_UUID_OFF = 0, OVL_UUID_NULL = 1, OVL_UUID_AUTO = 2, OVL_UUID_ON = 3, }; enum ovl_path_type { __OVL_PATH_UPPER = 1, __OVL_PATH_MERGE = 2, __OVL_PATH_ORIGIN = 4, }; enum ovl_xattr { OVL_XATTR_OPAQUE = 0, OVL_XATTR_REDIRECT = 1, OVL_XATTR_ORIGIN = 2, OVL_XATTR_IMPURE = 3, OVL_XATTR_NLINK = 4, OVL_XATTR_UPPER = 5, OVL_XATTR_UUID = 6, OVL_XATTR_METACOPY = 7, OVL_XATTR_PROTATTR = 8, }; enum { OVL_REDIRECT_OFF = 0, OVL_REDIRECT_FOLLOW = 1, OVL_REDIRECT_NOFOLLOW = 2, OVL_REDIRECT_ON = 3, }; enum ovl_inode_flag { OVL_IMPURE = 0, OVL_WHITEOUTS = 1, OVL_INDEX = 2, OVL_UPPERDATA = 3, OVL_CONST_INO = 4, OVL_HAS_DIGEST = 5, OVL_VERIFIED_DIGEST = 6, }; enum ovl_entry_flag { OVL_E_UPPER_ALIAS = 0, OVL_E_OPAQUE = 1, OVL_E_CONNECTED = 2, }; struct ovl_dir_cache; struct ovl_entry; struct ovl_inode { union { struct ovl_dir_cache *cache; const char *lowerdata_redirect; }; const char *redirect; u64 version; unsigned long flags; struct inode vfs_inode; struct dentry *__upperdentry; struct ovl_entry *oe; struct mutex lock; }; struct ovl_layer; struct ovl_path { const struct ovl_layer *layer; struct dentry *dentry; }; struct ovl_entry { unsigned int __numlower; struct ovl_path __lowerstack[0]; }; struct ovl_sb; struct ovl_layer { struct vfsmount *mnt; struct inode *trap; struct ovl_sb *fs; int idx; int fsid; }; struct ovl_sb { struct super_block *sb; dev_t pseudo_dev; bool bad_uuid; bool is_lower; }; struct ovl_config { char *upperdir; char *workdir; char **lowerdirs; bool default_permissions; int redirect_mode; int verity_mode; bool index; int uuid; bool nfs_export; int xino; bool metacopy; bool userxattr; bool ovl_volatile; bool override_creds; }; struct ovl_fs { unsigned int numlayer; unsigned int numfs; unsigned int numdatalayer; const struct ovl_layer *layers; struct ovl_sb *fs; struct dentry *workbasedir; struct dentry *workdir; struct dentry *indexdir; long namelen; struct ovl_config config; const struct cred *creator_cred; bool tmpfile; bool noxattr; bool nofh; bool upperdir_locked; bool workdir_locked; struct inode *workbasedir_trap; struct inode *workdir_trap; struct inode *indexdir_trap; int xino_mode; atomic_long_t last_ino; struct dentry *whiteout; bool no_shared_whiteout; errseq_t errseq; }; struct ovl_opt_set { bool metacopy; bool redirect; bool nfs_export; bool index; }; struct ovl_fs_context_layer; struct ovl_fs_context { struct path upper; struct path work; size_t capacity; size_t nr; size_t nr_data; struct ovl_opt_set set; struct ovl_fs_context_layer *lower; char *lowerdir_all; }; struct ovl_fs_context_layer { char *name; struct path path; }; struct ovl_cattr { dev_t rdev; umode_t mode; const char *link; struct dentry *hardlink; }; struct ovl_inode_params { struct inode *newinode; struct dentry *upperdentry; struct ovl_entry *oe; bool index; char *redirect; char *lowerdata_redirect; }; enum { OVL_VERITY_OFF = 0, OVL_VERITY_ON = 1, OVL_VERITY_REQUIRE = 2, }; struct ovl_fb { u8 version; u8 magic; u8 len; u8 flags; u8 type; uuid_t uuid; u32 fid[0]; } __attribute__((packed)); struct ovl_fh { u8 padding[3]; union { struct ovl_fb fb; struct { struct {} __empty_buf; u8 buf[0]; }; }; }; struct ovl_metacopy { u8 version; u8 len; u8 flags; u8 digest_algo; u8 digest[64]; }; struct ovl_lookup_data { struct super_block *sb; struct vfsmount *mnt; struct qstr name; bool is_dir; bool opaque; bool stop; bool last; char *redirect; int metacopy; bool absolute_redirect; }; enum ovl_copyop { OVL_COPY = 0, OVL_CLONE = 1, OVL_DEDUPE = 2, }; struct ovl_aio_req { struct kiocb iocb; refcount_t ref; struct kiocb *orig_iocb; }; struct ovl_cache_entry { unsigned int len; unsigned int type; u64 real_ino; u64 ino; struct list_head l_node; struct rb_node node; struct ovl_cache_entry *next_maybe_whiteout; bool is_upper; bool is_whiteout; char name[0]; }; struct ovl_dir_cache { long refcount; u64 version; struct list_head entries; struct rb_root root; }; struct ovl_readdir_translate { struct dir_context *orig_ctx; struct ovl_dir_cache *cache; struct dir_context ctx; u64 parent_ino; int fsid; int xinobits; bool xinowarn; }; struct ovl_readdir_data { struct dir_context ctx; struct dentry *dentry; bool is_lowest; struct rb_root *root; struct list_head *list; struct list_head middle; struct ovl_cache_entry *first_maybe_whiteout; int count; int err; bool is_upper; bool d_type_supported; }; struct ovl_dir_file { bool is_real; bool is_upper; struct ovl_dir_cache *cache; struct list_head *cursor; struct file *realfile; struct file *upperfile; }; struct ovl_copy_up_ctx { struct dentry *parent; struct dentry *dentry; struct path lowerpath; struct kstat stat; struct kstat pstat; const char *link; struct dentry *destdir; struct qstr destname; struct dentry *workdir; bool origin; bool indexed; bool metacopy; bool metacopy_digest; }; struct ovl_cu_creds { const struct cred *old; struct cred *new; }; enum ovl_opt { Opt_lowerdir = 0, Opt_lowerdir_add = 1, Opt_datadir_add = 2, Opt_upperdir = 3, Opt_workdir = 4, Opt_default_permissions = 5, Opt_redirect_dir = 6, Opt_index = 7, Opt_uuid = 8, Opt_nfs_export = 9, Opt_userxattr = 10, Opt_xino = 11, Opt_metacopy = 12, Opt_verity = 13, Opt_volatile = 14, Opt_override_creds = 15, }; enum LOG_RECORD_TYPE { FULL___2 = 0, SAME_FILE = 1, SAME_FILE_CLOSE_BLOCK = 2, SAME_FILE_CLOSE_BLOCK_SHORT = 3, SAME_FILE_NEXT_BLOCK = 4, SAME_FILE_NEXT_BLOCK_SHORT = 5, }; enum incfs_hash_tree_algorithm { INCFS_HASH_TREE_NONE = 0, INCFS_HASH_TREE_SHA256 = 1, }; enum incfs_file_header_flags { INCFS_FILE_MAPPED = 2, }; enum FILL_PERMISSION { CANT_FILL = 0, CAN_FILL = 1, }; enum incfs_compression_alg { COMPRESSION_NONE = 0, COMPRESSION_LZ4 = 1, COMPRESSION_ZSTD = 2, }; enum incfs_block_map_entry_flags { INCFS_BLOCK_COMPRESSED_LZ4 = 1, INCFS_BLOCK_COMPRESSED_ZSTD = 2, INCFS_BLOCK_COMPRESSED_MASK = 7, }; enum incfs_block_flags { INCFS_BLOCK_FLAGS_NONE = 0, INCFS_BLOCK_FLAGS_HASH = 1, }; typedef struct { __u8 bytes[16]; } incfs_uuid_t; struct pending_read { incfs_uuid_t file_id; s64 timestamp_us; atomic_t done; int block_index; int serial_number; uid_t uid; struct list_head mi_reads_list; struct list_head segment_reads_list; struct callback_head rcu; }; struct full_record { enum LOG_RECORD_TYPE type: 3; u32 block_index: 29; incfs_uuid_t file_id; u64 absolute_ts_us; uid_t uid; } __attribute__((packed)); struct read_log_state { u32 generation_id; u32 next_offset; u32 current_pass_no; struct full_record base_record; u64 current_record_no; }; struct read_log { void *rl_ring_buf; int rl_size; struct read_log_state rl_head; struct read_log_state rl_tail; spinlock_t rl_lock; wait_queue_head_t ml_notif_wq; struct delayed_work ml_wakeup_work; }; struct mount_options { unsigned int read_timeout_ms; unsigned int readahead_pages; unsigned int read_log_pages; unsigned int read_log_wakeup_count; bool report_uid; char *sysfs_name; }; struct mem_range { u8 *data; size_t len; }; struct incfs_per_uid_read_timeouts; struct ZSTD_DCtx_s; typedef struct ZSTD_DCtx_s ZSTD_DCtx; typedef ZSTD_DCtx ZSTD_DStream; struct incfs_sysfs_node; struct mount_info { struct super_block *mi_sb; struct path mi_backing_dir_path; struct dentry *mi_index_dir; bool mi_index_free; struct dentry *mi_incomplete_dir; bool mi_incomplete_free; const struct cred *mi_owner; struct mount_options mi_options; struct mutex mi_dir_struct_mutex; wait_queue_head_t mi_pending_reads_notif_wq; spinlock_t pending_read_lock; struct list_head mi_reads_list_head; int mi_pending_reads_count; int mi_last_pending_read_number; struct read_log mi_log; struct mem_range pseudo_file_xattr[3]; wait_queue_head_t mi_blocks_written_notif_wq; atomic_t mi_blocks_written; spinlock_t mi_per_uid_read_timeouts_lock; struct incfs_per_uid_read_timeouts *mi_per_uid_read_timeouts; int mi_per_uid_read_timeouts_size; struct mutex mi_zstd_workspace_mutex; void *mi_zstd_workspace; ZSTD_DStream *mi_zstd_stream; struct delayed_work mi_zstd_cleanup_work; struct incfs_sysfs_node *mi_sysfs_node; struct mutex mi_le_mutex; incfs_uuid_t mi_le_file_id; u64 mi_le_time_us; u32 mi_le_page; u32 mi_le_errno; uid_t mi_le_uid; u32 mi_reads_failed_timed_out; u32 mi_reads_failed_hash_verification; u32 mi_reads_failed_other; u32 mi_reads_delayed_pending; u64 mi_reads_delayed_pending_us; u32 mi_reads_delayed_min; u64 mi_reads_delayed_min_us; }; struct incfs_per_uid_read_timeouts { __u32 uid; __u32 min_time_us; __u32 min_pending_time_us; __u32 max_pending_time_us; }; typedef uint16_t U16; typedef uint8_t BYTE; typedef uint32_t U32; typedef struct { U16 nextState; BYTE nbAdditionalBits; BYTE nbBits; U32 baseValue; } ZSTD_seqSymbol; typedef U32 HUF_DTable; typedef struct { ZSTD_seqSymbol LLTable[513]; ZSTD_seqSymbol OFTable[257]; ZSTD_seqSymbol MLTable[513]; HUF_DTable hufTable[4097]; U32 rep[3]; U32 workspace[157]; } ZSTD_entropyDTables_t; typedef enum { ZSTD_frame = 0, ZSTD_skippableFrame = 1, } ZSTD_frameType_e; typedef struct { unsigned long long frameContentSize; unsigned long long windowSize; unsigned int blockSizeMax; ZSTD_frameType_e frameType; unsigned int headerSize; unsigned int dictID; unsigned int checksumFlag; } ZSTD_frameHeader; typedef uint64_t U64; typedef enum { bt_raw = 0, bt_rle = 1, bt_compressed = 2, bt_reserved = 3, } blockType_e; typedef enum { ZSTDds_getFrameHeaderSize = 0, ZSTDds_decodeFrameHeader = 1, ZSTDds_decodeBlockHeader = 2, ZSTDds_decompressBlock = 3, ZSTDds_decompressLastBlock = 4, ZSTDds_checkChecksum = 5, ZSTDds_decodeSkippableHeader = 6, ZSTDds_skipFrame = 7, } ZSTD_dStage; struct xxh64_state { uint64_t total_len; uint64_t v1; uint64_t v2; uint64_t v3; uint64_t v4; uint64_t mem64[4]; uint32_t memsize; }; typedef enum { ZSTD_f_zstd1 = 0, ZSTD_f_zstd1_magicless = 1, } ZSTD_format_e; typedef enum { ZSTD_d_validateChecksum = 0, ZSTD_d_ignoreChecksum = 1, } ZSTD_forceIgnoreChecksum_e; typedef void * (*ZSTD_allocFunction)(void *, size_t); typedef void (*ZSTD_freeFunction)(void *, void *); typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void *opaque; } ZSTD_customMem; typedef enum { ZSTD_use_indefinitely = -1, ZSTD_dont_use = 0, ZSTD_use_once = 1, } ZSTD_dictUses_e; struct ZSTD_DDict_s; typedef struct ZSTD_DDict_s ZSTD_DDict; typedef struct { const ZSTD_DDict **ddictPtrTable; size_t ddictPtrTableSize; size_t ddictPtrCount; } ZSTD_DDictHashSet; typedef enum { ZSTD_rmd_refSingleDDict = 0, ZSTD_rmd_refMultipleDDicts = 1, } ZSTD_refMultipleDDicts_e; typedef enum { zdss_init = 0, zdss_loadHeader = 1, zdss_read = 2, zdss_load = 3, zdss_flush = 4, } ZSTD_dStreamStage; typedef enum { ZSTD_bm_buffered = 0, ZSTD_bm_stable = 1, } ZSTD_bufferMode_e; struct ZSTD_outBuffer_s { void *dst; size_t size; size_t pos; }; typedef struct ZSTD_outBuffer_s ZSTD_outBuffer; typedef enum { ZSTD_not_in_dst = 0, ZSTD_in_dst = 1, ZSTD_split = 2, } ZSTD_litLocation_e; struct ZSTD_DCtx_s { const ZSTD_seqSymbol *LLTptr; const ZSTD_seqSymbol *MLTptr; const ZSTD_seqSymbol *OFTptr; const HUF_DTable *HUFptr; ZSTD_entropyDTables_t entropy; U32 workspace[640]; const void *previousDstEnd; const void *prefixStart; const void *virtualStart; const void *dictEnd; size_t expected; ZSTD_frameHeader fParams; U64 processedCSize; U64 decodedSize; blockType_e bType; ZSTD_dStage stage; U32 litEntropy; U32 fseEntropy; struct xxh64_state xxhState; size_t headerSize; ZSTD_format_e format; ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; U32 validateChecksum; const BYTE *litPtr; ZSTD_customMem customMem; size_t litSize; size_t rleSize; size_t staticSize; ZSTD_DDict *ddictLocal; const ZSTD_DDict *ddict; U32 dictID; int ddictIsCold; ZSTD_dictUses_e dictUses; ZSTD_DDictHashSet *ddictSet; ZSTD_refMultipleDDicts_e refMultipleDDicts; ZSTD_dStreamStage streamStage; char *inBuff; size_t inBuffSize; size_t inPos; size_t maxWindowSize; char *outBuff; size_t outBuffSize; size_t outStart; size_t outEnd; size_t lhSize; U32 hostageByte; int noForwardProgress; ZSTD_bufferMode_e outBufferMode; ZSTD_outBuffer expectedOutBuffer; BYTE *litBuffer; const BYTE *litBufferEnd; ZSTD_litLocation_e litBufferLocation; BYTE litExtraBuffer[65568]; BYTE headerBuffer[18]; size_t oversizedDuration; }; struct incfs_sysfs_node { struct kobject isn_sysfs_node; struct completion isn_completion; struct mount_info *isn_mi; }; struct data_file; struct inode_info { struct mount_info *n_mount_info; struct inode *n_backing_inode; struct data_file *n_file; struct inode n_vfs_inode; }; struct data_file_segment { wait_queue_head_t new_data_arrival_wq; struct rw_semaphore rwsem; struct list_head reads_list_head; }; struct backing_file_context; struct mtree; struct incfs_df_signature; struct incfs_df_verity_signature; struct data_file { struct backing_file_context *df_backing_file_context; struct mount_info *df_mount_info; incfs_uuid_t df_id; struct data_file_segment df_segments[3]; loff_t df_metadata_off; loff_t df_blockmap_off; loff_t df_size; u32 df_header_flags; int df_data_block_count; int df_total_block_count; loff_t df_mapped_offset; atomic_t df_data_blocks_written; u32 df_initial_data_blocks_written; atomic_t df_hash_blocks_written; u32 df_initial_hash_blocks_written; loff_t df_status_offset; struct mutex df_enable_verity; struct mtree *df_hash_tree; struct incfs_df_signature *df_signature; struct mem_range df_verity_file_digest; struct incfs_df_verity_signature *df_verity_signature; }; struct backing_file_context { struct mutex bc_mutex; struct file *bc_file; loff_t bc_last_md_record_offset; const struct cred *bc_cred; bool bc_has_bad_block; }; struct incfs_hash_alg; struct mtree { struct incfs_hash_alg *alg; u8 root_hash[32]; u32 hash_level_suboffset[8]; u32 hash_tree_area_size; int depth; }; struct incfs_hash_alg { const char *name; int digest_size; enum incfs_hash_tree_algorithm id; struct crypto_shash *shash; }; struct incfs_df_signature { u32 sig_size; u64 sig_offset; u32 hash_size; u64 hash_offset; }; struct incfs_df_verity_signature { u32 size; u64 offset; }; struct same_file { enum LOG_RECORD_TYPE type: 3; u32 block_index: 29; uid_t uid; u16 relative_ts_us; } __attribute__((packed)); struct same_file_close_block { enum LOG_RECORD_TYPE type: 3; u16 relative_ts_us: 13; s16 block_index_delta; }; struct same_file_close_block_short { enum LOG_RECORD_TYPE type: 3; u8 relative_ts_tens_us: 5; s8 block_index_delta; } __attribute__((packed)); struct same_file_next_block { enum LOG_RECORD_TYPE type: 3; u16 relative_ts_us: 13; } __attribute__((packed)); struct same_file_next_block_short { enum LOG_RECORD_TYPE type: 3; u8 relative_ts_tens_us: 5; } __attribute__((packed)); union log_record { struct full_record full_record; struct same_file same_file; struct same_file_close_block same_file_close_block; struct same_file_close_block_short same_file_close_block_short; struct same_file_next_block same_file_next_block; struct same_file_next_block_short same_file_next_block_short; }; struct incfs_md_header { __u8 h_md_entry_type; __le16 h_record_size; __le32 h_unused1; __le64 h_next_md_offset; __le64 h_unused2; } __attribute__((packed)); struct incfs_blockmap { struct incfs_md_header m_header; __le64 m_base_offset; __le32 m_block_count; } __attribute__((packed)); struct incfs_file_signature { struct incfs_md_header sg_header; __le32 sg_sig_size; __le64 sg_sig_offset; __le32 sg_hash_tree_size; __le64 sg_hash_tree_offset; } __attribute__((packed)); struct incfs_status { struct incfs_md_header is_header; __le32 is_data_blocks_written; __le32 is_hash_blocks_written; __le32 is_dummy[6]; } __attribute__((packed)); struct incfs_file_verity_signature { struct incfs_md_header vs_header; __le32 vs_size; __le64 vs_offset; } __attribute__((packed)); struct metadata_handler { loff_t md_record_offset; loff_t md_prev_record_offset; void *context; union { struct incfs_md_header md_header; struct incfs_blockmap blockmap; struct incfs_file_signature signature; struct incfs_status status; struct incfs_file_verity_signature verity_signature; } md_buffer; int (*handle_blockmap)(struct incfs_blockmap *, struct metadata_handler *); int (*handle_signature)(struct incfs_file_signature *, struct metadata_handler *); int (*handle_status)(struct incfs_status *, struct metadata_handler *); int (*handle_verity_signature)(struct incfs_file_verity_signature *, struct metadata_handler *); }; struct incfs_blockmap_entry { __le32 me_data_offset_lo; __le16 me_data_offset_hi; __le16 me_data_size; __le16 me_flags; } __attribute__((packed)); struct data_file_block { loff_t db_backing_file_data_offset; size_t db_stored_size; enum incfs_compression_alg db_comp_alg; }; struct incfs_read_data_file_timeouts { u32 min_time_us; u32 min_pending_time_us; u32 max_pending_time_us; }; struct ZSTD_inBuffer_s { const void *src; size_t size; size_t pos; }; typedef struct ZSTD_inBuffer_s ZSTD_inBuffer; typedef ZSTD_DStream zstd_dstream; typedef ZSTD_outBuffer zstd_out_buffer; typedef ZSTD_inBuffer zstd_in_buffer; struct dir_file { struct mount_info *mount_info; struct file *backing_dir; }; struct incfs_file_data { enum FILL_PERMISSION fd_fill_permission; int fd_get_block_pos; int fd_filled_data_blocks; int fd_filled_hash_blocks; }; struct incfs_get_filled_blocks_args { __u64 range_buffer; __u32 range_buffer_size; __u32 start_index; __u32 end_index; __u32 total_blocks_out; __u32 data_blocks_out; __u32 range_buffer_size_out; __u32 index_out; }; struct incfs_filled_range { __u32 begin; __u32 end; }; struct incfs_fill_block { __u32 block_index; __u32 data_len; __u64 data; __u8 compression; __u8 flags; __u16 reserved1; __u32 reserved2; __u64 reserved3; }; struct incfs_pending_read_info { incfs_uuid_t file_id; __u64 timestamp_us; __u32 block_index; __u32 serial_number; }; struct incfs_pending_read_info2 { incfs_uuid_t file_id; __u64 timestamp_us; __u32 block_index; __u32 serial_number; __u32 uid; __u32 reserved; }; enum incfs_metadata_type { INCFS_MD_NONE = 0, INCFS_MD_BLOCK_MAP = 1, INCFS_MD_FILE_ATTR = 2, INCFS_MD_SIGNATURE = 3, INCFS_MD_STATUS = 4, INCFS_MD_VERITY_SIGNATURE = 5, }; struct incfs_file_header { __le64 fh_magic; __le64 fh_version; __le16 fh_header_size; __le16 fh_data_block_size; __le32 fh_flags; union { struct { __le64 fh_first_md_offset; __le64 fh_file_size; incfs_uuid_t fh_uuid; }; struct { __le64 fh_original_offset; __le64 fh_mapped_file_size; incfs_uuid_t fh_original_uuid; }; }; }; struct signature_info { u32 version; enum incfs_hash_tree_algorithm hash_algorithm; u8 log2_blocksize; struct mem_range salt; struct mem_range root_hash; }; struct inode_search { unsigned long ino; }; struct incfs_new_file_args { incfs_uuid_t file_id; __u64 size; __u16 mode; __u16 reserved1; __u32 reserved2; __u64 directory_path; __u64 file_name; __u64 file_attr; __u32 file_attr_len; __u32 reserved4; __u64 signature_info; __u64 signature_size; __u64 reserved6; }; struct incfs_get_last_read_error_args { incfs_uuid_t file_id_out; __u64 time_us_out; __u32 page_out; __u32 errno_out; __u32 uid_out; __u32 reserved1; __u64 reserved2; }; struct incfs_get_read_timeouts_args { __u64 timeouts_array; __u32 timeouts_array_size; __u32 timeouts_array_size_out; }; struct incfs_permit_fill { __u32 file_descriptor; }; struct incfs_create_mapped_file_args { __u64 size; __u16 mode; __u16 reserved1; __u32 reserved2; __u64 directory_path; __u64 file_name; incfs_uuid_t source_file_id; __u64 source_offset; }; struct incfs_set_read_timeouts_args { __u64 timeouts_array; __u32 timeouts_array_size; }; struct pending_reads_state { int last_pending_read_sn; }; struct log_file_state { struct read_log_state state; }; struct blocks_written_file_state { unsigned long blocks_written; }; enum parse_parameter { Opt_read_timeout = 0, Opt_readahead_pages = 1, Opt_rlog_pages = 2, Opt_rlog_wakeup_cnt = 3, Opt_report_uid = 4, Opt_sysfs_name = 5, Opt_err___4 = 6, }; struct dentry_info { struct path backing_path; }; struct incfs_get_file_sig_args { __u64 file_signature; __u32 file_signature_buf_size; __u32 file_signature_len_out; }; struct incfs_get_block_count_args { __u32 total_data_blocks_out; __u32 filled_data_blocks_out; __u32 total_hash_blocks_out; __u32 filled_hash_blocks_out; }; struct incfs_fill_blocks { __u64 count; __u64 fill_blocks; }; struct inode_search___2 { unsigned long ino; struct dentry *backing_dentry; size_t size; bool verity; }; enum { Opt_uid___5 = 0, Opt_gid___6 = 1, Opt_mode___4 = 2, Opt_err___5 = 3, }; struct debugfs_fsdata { const struct file_operations *real_fops; union { debugfs_automount_t automount; struct { refcount_t active_users; struct completion active_users_drained; }; }; }; struct debugfs_mount_opts { kuid_t uid; kgid_t gid; umode_t mode; unsigned int opts; }; struct debugfs_fs_info { struct debugfs_mount_opts mount_opts; }; struct debugfs_reg32 { char *name; unsigned long offset; }; struct debugfs_blob_wrapper { void *data; unsigned long size; }; struct debugfs_regset32 { const struct debugfs_reg32 *regs; int nregs; void *base; struct device *dev; }; struct debugfs_devm_entry { int (*read)(struct seq_file *, void *); struct device *dev; }; struct tracefs_dir_ops { int (*mkdir)(const char *); int (*rmdir)(const char *); }; enum { TRACEFS_EVENT_INODE = 2, TRACEFS_EVENT_TOP_INODE = 4, TRACEFS_GID_PERM_SET = 8, TRACEFS_UID_PERM_SET = 16, TRACEFS_INSTANCE_INODE = 32, }; struct tracefs_inode { struct inode vfs_inode; unsigned long flags; void *private; }; struct tracefs_mount_opts { kuid_t uid; kgid_t gid; umode_t mode; unsigned int opts; }; struct tracefs_fs_info { struct tracefs_mount_opts mount_opts; }; struct eventfs_attr { int mode; kuid_t uid; kgid_t gid; }; struct eventfs_inode { union { struct list_head list; struct callback_head rcu; }; struct list_head children; const struct eventfs_entry *entries; const char *name; struct dentry *events_dir; struct eventfs_attr *entry_attrs; void *data; struct eventfs_attr attr; struct kref kref; unsigned int is_freed: 1; unsigned int is_events: 1; unsigned int nr_entries: 30; unsigned int ino; }; enum { EVENTFS_SAVE_MODE = 65536, EVENTFS_SAVE_UID = 131072, EVENTFS_SAVE_GID = 262144, EVENTFS_TOPLEVEL = 524288, }; typedef __le32 f2fs_hash_t; enum page_type { DATA = 0, NODE = 1, META = 2, NR_PAGE_TYPE = 3, META_FLUSH = 4, IPU = 5, OPU = 6, }; enum temp_type { HOT = 0, WARM = 1, COLD = 2, NR_TEMP_TYPE = 3, }; enum iostat_type { APP_DIRECT_IO = 0, APP_BUFFERED_IO = 1, APP_WRITE_IO = 2, APP_MAPPED_IO = 3, APP_BUFFERED_CDATA_IO = 4, APP_MAPPED_CDATA_IO = 5, FS_DATA_IO = 6, FS_CDATA_IO = 7, FS_NODE_IO = 8, FS_META_IO = 9, FS_GC_DATA_IO = 10, FS_GC_NODE_IO = 11, FS_CP_DATA_IO = 12, FS_CP_NODE_IO = 13, FS_CP_META_IO = 14, APP_DIRECT_READ_IO = 15, APP_BUFFERED_READ_IO = 16, APP_READ_IO = 17, APP_MAPPED_READ_IO = 18, APP_BUFFERED_CDATA_READ_IO = 19, APP_MAPPED_CDATA_READ_IO = 20, FS_DATA_READ_IO = 21, FS_GDATA_READ_IO = 22, FS_CDATA_READ_IO = 23, FS_NODE_READ_IO = 24, FS_META_READ_IO = 25, FS_DISCARD_IO = 26, FS_FLUSH_IO = 27, FS_ZONE_RESET_IO = 28, NR_IO_TYPE = 29, }; enum { FI_NEW_INODE = 0, FI_DIRTY_INODE = 1, FI_AUTO_RECOVER = 2, FI_DIRTY_DIR = 3, FI_INC_LINK = 4, FI_ACL_MODE = 5, FI_NO_ALLOC = 6, FI_FREE_NID = 7, FI_NO_EXTENT = 8, FI_INLINE_XATTR = 9, FI_INLINE_DATA = 10, FI_INLINE_DENTRY = 11, FI_APPEND_WRITE = 12, FI_UPDATE_WRITE = 13, FI_NEED_IPU = 14, FI_ATOMIC_FILE = 15, FI_DATA_EXIST = 16, FI_INLINE_DOTS = 17, FI_SKIP_WRITES = 18, FI_OPU_WRITE = 19, FI_DIRTY_FILE = 20, FI_PREALLOCATED_ALL = 21, FI_HOT_DATA = 22, FI_EXTRA_ATTR = 23, FI_PROJ_INHERIT = 24, FI_PIN_FILE = 25, FI_VERITY_IN_PROGRESS = 26, FI_COMPRESSED_FILE = 27, FI_COMPRESS_CORRUPT = 28, FI_MMAP_FILE = 29, FI_ENABLE_COMPRESS = 30, FI_COMPRESS_RELEASED = 31, FI_ALIGNED_WRITE = 32, FI_COW_FILE = 33, FI_ATOMIC_COMMITTED = 34, FI_ATOMIC_REPLACE = 35, FI_OPENED_FILE = 36, FI_MAX = 37, }; enum { FAULT_KMALLOC = 0, FAULT_KVMALLOC = 1, FAULT_PAGE_ALLOC = 2, FAULT_PAGE_GET = 3, FAULT_ALLOC_BIO = 4, FAULT_ALLOC_NID = 5, FAULT_ORPHAN = 6, FAULT_BLOCK = 7, FAULT_DIR_DEPTH = 8, FAULT_EVICT_INODE = 9, FAULT_TRUNCATE = 10, FAULT_READ_IO = 11, FAULT_CHECKPOINT = 12, FAULT_DISCARD = 13, FAULT_WRITE_IO = 14, FAULT_SLAB_ALLOC = 15, FAULT_DQUOT_INIT = 16, FAULT_LOCK_OP = 17, FAULT_BLKADDR_VALIDITY = 18, FAULT_BLKADDR_CONSISTENCE = 19, FAULT_NO_SEGMENT = 20, FAULT_MAX = 21, }; enum { CP_TIME = 0, REQ_TIME = 1, DISCARD_TIME = 2, GC_TIME = 3, DISABLE_TIME = 4, UMOUNT_DISCARD_TIMEOUT = 5, MAX_TIME = 6, }; enum fsync_mode { FSYNC_MODE_POSIX = 0, FSYNC_MODE_STRICT = 1, FSYNC_MODE_NOBARRIER = 2, }; enum { ORPHAN_INO = 0, APPEND_INO = 1, UPDATE_INO = 2, TRANS_DIR_INO = 3, FLUSH_INO = 4, MAX_INO_ENTRY = 5, }; enum { SBI_IS_DIRTY = 0, SBI_IS_CLOSE = 1, SBI_NEED_FSCK = 2, SBI_POR_DOING = 3, SBI_NEED_SB_WRITE = 4, SBI_NEED_CP = 5, SBI_IS_SHUTDOWN = 6, SBI_IS_RECOVERED = 7, SBI_CP_DISABLED = 8, SBI_CP_DISABLED_QUICK = 9, SBI_QUOTA_NEED_FLUSH = 10, SBI_QUOTA_SKIP_FLUSH = 11, SBI_QUOTA_NEED_REPAIR = 12, SBI_IS_RESIZEFS = 13, SBI_IS_FREEZING = 14, SBI_IS_WRITABLE = 15, MAX_SBI_FLAG = 16, }; enum f2fs_error { ERROR_CORRUPTED_CLUSTER = 0, ERROR_FAIL_DECOMPRESSION = 1, ERROR_INVALID_BLKADDR = 2, ERROR_CORRUPTED_DIRENT = 3, ERROR_CORRUPTED_INODE = 4, ERROR_INCONSISTENT_SUMMARY = 5, ERROR_INCONSISTENT_FOOTER = 6, ERROR_INCONSISTENT_SUM_TYPE = 7, ERROR_CORRUPTED_JOURNAL = 8, ERROR_INCONSISTENT_NODE_COUNT = 9, ERROR_INCONSISTENT_BLOCK_COUNT = 10, ERROR_INVALID_CURSEG = 11, ERROR_INCONSISTENT_SIT = 12, ERROR_CORRUPTED_VERITY_XATTR = 13, ERROR_CORRUPTED_XATTR = 14, ERROR_INVALID_NODE_REFERENCE = 15, ERROR_INCONSISTENT_NAT = 16, ERROR_MAX = 17, }; enum extent_type { EX_READ = 0, EX_BLOCK_AGE = 1, NR_EXTENT_CACHES = 2, }; enum { PAGE_PRIVATE_NOT_POINTER = 0, PAGE_PRIVATE_ONGOING_MIGRATION = 1, PAGE_PRIVATE_INLINE_INODE = 2, PAGE_PRIVATE_REF_RESOURCE = 3, PAGE_PRIVATE_MAX = 4, }; enum count_type { F2FS_DIRTY_DENTS = 0, F2FS_DIRTY_DATA = 1, F2FS_DIRTY_QDATA = 2, F2FS_DIRTY_NODES = 3, F2FS_DIRTY_META = 4, F2FS_DIRTY_IMETA = 5, F2FS_WB_CP_DATA = 6, F2FS_WB_DATA = 7, F2FS_RD_DATA = 8, F2FS_RD_NODE = 9, F2FS_RD_META = 10, F2FS_DIO_WRITE = 11, F2FS_DIO_READ = 12, NR_COUNT_TYPE = 13, }; struct f2fs_rwsem { struct rw_semaphore internal_rwsem; wait_queue_head_t read_waiters; }; typedef u32 nid_t; struct extent_tree; struct f2fs_inode_info { struct inode vfs_inode; unsigned long i_flags; unsigned char i_advise; unsigned char i_dir_level; union { unsigned int i_current_depth; unsigned short i_gc_failures; }; unsigned int i_pino; umode_t i_acl_mode; unsigned long flags[1]; struct f2fs_rwsem i_sem; atomic_t dirty_pages; f2fs_hash_t chash; unsigned int clevel; struct task_struct *task; struct task_struct *cp_task; struct task_struct *wb_task; nid_t i_xattr_nid; loff_t last_disk_size; spinlock_t i_size_lock; struct dquot __attribute__((btf_type_tag("rcu"))) *i_dquot[3]; qsize_t i_reserved_quota; struct list_head dirty_list; struct list_head gdirty_list; struct task_struct *atomic_write_task; struct extent_tree *extent_tree[2]; union { struct inode *cow_inode; struct inode *atomic_inode; }; struct f2fs_rwsem i_gc_rwsem[2]; struct f2fs_rwsem i_xattr_sem; int i_extra_isize; kprojid_t i_projid; int i_inline_xattr_size; struct timespec64 i_crtime; struct timespec64 i_disk_time[3]; atomic_t i_compr_blocks; unsigned char i_compress_algorithm; unsigned char i_log_cluster_size; unsigned char i_compress_level; unsigned char i_compress_flag; unsigned int i_cluster_size; unsigned int atomic_write_cnt; loff_t original_i_size; }; typedef u32 block_t; struct extent_info { unsigned int fofs; unsigned int len; union { struct { block_t blk; unsigned int c_len; }; struct { unsigned long long age; unsigned long long last_blocks; }; }; }; struct extent_node; struct extent_tree { nid_t ino; enum extent_type type; struct rb_root_cached root; struct extent_node *cached_en; struct list_head list; rwlock_t lock; atomic_t node_cnt; bool largest_updated; struct extent_info largest; }; struct extent_node { struct rb_node rb_node; struct extent_info ei; struct list_head list; struct extent_tree *et; }; struct f2fs_dir_entry { __le32 hash_code; __le32 ino; __le16 name_len; __u8 file_type; } __attribute__((packed)); struct f2fs_dentry_block { __u8 dentry_bitmap[27]; __u8 reserved[3]; struct f2fs_dir_entry dentry[214]; __u8 filename[1712]; }; struct f2fs_extent { __le32 fofs; __le32 blk; __le32 len; }; struct f2fs_inode { __le16 i_mode; __u8 i_advise; __u8 i_inline; __le32 i_uid; __le32 i_gid; __le32 i_links; __le64 i_size; __le64 i_blocks; __le64 i_atime; __le64 i_ctime; __le64 i_mtime; __le32 i_atime_nsec; __le32 i_ctime_nsec; __le32 i_mtime_nsec; __le32 i_generation; union { __le32 i_current_depth; __le16 i_gc_failures; }; __le32 i_xattr_nid; __le32 i_flags; __le32 i_pino; __le32 i_namelen; __u8 i_name[255]; __u8 i_dir_level; struct f2fs_extent i_ext; union { struct { __le16 i_extra_isize; __le16 i_inline_xattr_size; __le32 i_projid; __le32 i_inode_checksum; __le64 i_crtime; __le32 i_crtime_nsec; __le64 i_compr_blocks; __u8 i_compress_algorithm; __u8 i_log_cluster_size; __le16 i_compress_flag; __le32 i_extra_end[0]; } __attribute__((packed)); __le32 i_addr[923]; }; __le32 i_nid[5]; }; struct direct_node { __le32 addr[1018]; }; struct indirect_node { __le32 nid[1018]; }; struct node_footer { __le32 nid; __le32 ino; __le32 flag; __le64 cp_ver; __le32 next_blkaddr; } __attribute__((packed)); struct f2fs_node { union { struct f2fs_inode i; struct direct_node dn; struct indirect_node in; }; struct node_footer footer; }; struct ckpt_req_control { struct task_struct *f2fs_issue_ckpt; int ckpt_thread_ioprio; wait_queue_head_t ckpt_wait_queue; atomic_t issued_ckpt; atomic_t total_ckpt; atomic_t queued_ckpt; struct llist_head issue_list; spinlock_t stat_lock; unsigned int cur_time; unsigned int peak_time; }; struct inode_management { struct xarray ino_root; spinlock_t ino_lock; struct list_head ino_list; unsigned long ino_num; }; struct extent_tree_info { struct xarray extent_tree_root; struct mutex extent_tree_lock; struct list_head extent_list; spinlock_t extent_lock; atomic_t total_ext_tree; struct list_head zombie_list; atomic_t total_zombie_tree; atomic_t total_ext_node; }; struct f2fs_mount_info { unsigned int opt; block_t root_reserved_blocks; kuid_t s_resuid; kgid_t s_resgid; int active_logs; int inline_xattr_size; char *s_qf_names[3]; int s_jquota_fmt; int alloc_mode; int fsync_mode; int fs_mode; int bggc_mode; int memory_mode; int errors; int discard_unit; struct fscrypt_dummy_policy dummy_enc_policy; block_t unusable_cap_perc; block_t unusable_cap; unsigned char compress_algorithm; unsigned char compress_log_size; unsigned char compress_level; bool compress_chksum; unsigned char compress_ext_cnt; unsigned char nocompress_ext_cnt; int compress_mode; unsigned char extensions[128]; unsigned char noextensions[128]; }; struct atgc_management { bool atgc_enabled; struct rb_root_cached root; struct list_head victim_list; unsigned int victim_count; unsigned int candidate_ratio; unsigned int max_candidate_count; unsigned int age_weight; unsigned long long age_threshold; }; struct f2fs_super_block; struct f2fs_nm_info; struct f2fs_sm_info; struct f2fs_bio_info; struct f2fs_checkpoint; struct f2fs_gc_kthread; struct f2fs_stat_info; struct f2fs_dev_info; struct iostat_lat_info; struct f2fs_sb_info { struct super_block *sb; struct proc_dir_entry *s_proc; struct f2fs_super_block *raw_super; struct f2fs_rwsem sb_lock; int valid_super_block; unsigned long s_flag; struct mutex writepages; unsigned int blocks_per_blkz; unsigned int max_open_zones; struct f2fs_nm_info *nm_info; struct inode *node_inode; struct f2fs_sm_info *sm_info; struct f2fs_bio_info *write_io[3]; struct f2fs_rwsem io_order_lock; unsigned long page_eio_ofs[3]; int page_eio_cnt[3]; struct f2fs_checkpoint *ckpt; int cur_cp_pack; spinlock_t cp_lock; struct inode *meta_inode; struct f2fs_rwsem cp_global_sem; struct f2fs_rwsem cp_rwsem; struct f2fs_rwsem node_write; struct f2fs_rwsem node_change; wait_queue_head_t cp_wait; unsigned long last_time[6]; long interval_time[6]; struct ckpt_req_control cprc_info; struct inode_management im[5]; spinlock_t fsync_node_lock; struct list_head fsync_node_list; unsigned int fsync_seg_id; unsigned int fsync_node_num; unsigned int max_orphans; struct list_head inode_list[3]; spinlock_t inode_lock[3]; struct mutex flush_lock; struct extent_tree_info extent_tree[2]; atomic64_t allocated_data_blocks; unsigned int hot_data_age_threshold; unsigned int warm_data_age_threshold; unsigned int last_age_weight; unsigned int log_sectors_per_block; unsigned int log_blocksize; unsigned int blocksize; unsigned int root_ino_num; unsigned int node_ino_num; unsigned int meta_ino_num; unsigned int log_blocks_per_seg; unsigned int blocks_per_seg; unsigned int unusable_blocks_per_sec; unsigned int segs_per_sec; unsigned int secs_per_zone; unsigned int total_sections; unsigned int total_node_count; unsigned int total_valid_node_count; int dir_level; bool readdir_ra; u64 max_io_bytes; block_t user_block_count; block_t total_valid_block_count; block_t discard_blks; block_t last_valid_block_count; block_t reserved_blocks; block_t current_reserved_blocks; block_t unusable_block_count; unsigned int nquota_files; struct f2fs_rwsem quota_sem; atomic_t nr_pages[13]; struct percpu_counter alloc_valid_block_count; struct percpu_counter rf_node_block_count; atomic_t wb_sync_req[2]; struct percpu_counter total_valid_inode_count; struct f2fs_mount_info mount_opt; struct f2fs_rwsem gc_lock; struct f2fs_gc_kthread *gc_thread; struct atgc_management am; unsigned int cur_victim_sec; unsigned int gc_mode; unsigned int next_victim_seg[2]; spinlock_t gc_remaining_trials_lock; unsigned int gc_remaining_trials; unsigned long long skipped_gc_rwsem; unsigned short gc_pin_file_threshold; struct f2fs_rwsem pin_sem; unsigned int max_victim_search; unsigned int migration_granularity; struct f2fs_stat_info *stat_info; atomic_t meta_count[4]; unsigned int segment_count[2]; unsigned int block_count[2]; atomic_t inplace_count; atomic64_t total_hit_ext[2]; atomic64_t read_hit_rbtree[2]; atomic64_t read_hit_cached[2]; atomic64_t read_hit_largest; atomic_t inline_xattr; atomic_t inline_inode; atomic_t inline_dir; atomic_t compr_inode; atomic64_t compr_blocks; atomic_t swapfile_inode; atomic_t atomic_files; atomic_t max_aw_cnt; unsigned int io_skip_bggc; unsigned int other_skip_bggc; unsigned int ndirty_inode[3]; atomic_t cp_call_count[2]; spinlock_t stat_lock; unsigned int data_io_flag; unsigned int node_io_flag; struct kobject s_kobj; struct completion s_kobj_unregister; struct kobject s_stat_kobj; struct completion s_stat_kobj_unregister; struct kobject s_feature_list_kobj; struct completion s_feature_list_kobj_unregister; struct list_head s_list; struct mutex umount_mutex; unsigned int shrinker_run_no; int s_ndevs; struct f2fs_dev_info *devs; unsigned int dirty_device; spinlock_t dev_lock; bool aligned_blksize; u64 sectors_written_start; u64 kbytes_written; struct crypto_shash *s_chksum_driver; __u32 s_chksum_seed; struct workqueue_struct *post_read_wq; struct work_struct s_error_work; unsigned char errors[16]; unsigned char stop_reason[32]; spinlock_t error_lock; bool error_dirty; struct kmem_cache *inline_xattr_slab; unsigned int inline_xattr_slab_size; unsigned int gc_segment_mode; unsigned int gc_reclaimed_segs[7]; unsigned long seq_file_ra_mul; int max_fragment_chunk; int max_fragment_hole; atomic64_t current_atomic_write; s64 peak_atomic_write; u64 committed_atomic_block; u64 revoked_atomic_block; struct kmem_cache *page_array_slab; unsigned int page_array_slab_size; u64 compr_written_block; u64 compr_saved_block; u32 compr_new_inode; struct inode *compress_inode; unsigned int compress_percent; unsigned int compress_watermark; atomic_t compress_page_hit; spinlock_t iostat_lock; unsigned long long iostat_count[29]; unsigned long long iostat_bytes[29]; unsigned long long prev_iostat_bytes[29]; bool iostat_enable; unsigned long iostat_next_period; unsigned int iostat_period_ms; spinlock_t iostat_lat_lock; struct iostat_lat_info *iostat_io_lat; }; struct f2fs_device { __u8 path[64]; __le32 total_segments; }; struct f2fs_super_block { __le32 magic; __le16 major_ver; __le16 minor_ver; __le32 log_sectorsize; __le32 log_sectors_per_block; __le32 log_blocksize; __le32 log_blocks_per_seg; __le32 segs_per_sec; __le32 secs_per_zone; __le32 checksum_offset; __le64 block_count; __le32 section_count; __le32 segment_count; __le32 segment_count_ckpt; __le32 segment_count_sit; __le32 segment_count_nat; __le32 segment_count_ssa; __le32 segment_count_main; __le32 segment0_blkaddr; __le32 cp_blkaddr; __le32 sit_blkaddr; __le32 nat_blkaddr; __le32 ssa_blkaddr; __le32 main_blkaddr; __le32 root_ino; __le32 node_ino; __le32 meta_ino; __u8 uuid[16]; __le16 volume_name[512]; __le32 extension_count; __u8 extension_list[512]; __le32 cp_payload; __u8 version[256]; __u8 init_version[256]; __le32 feature; __u8 encryption_level; __u8 encrypt_pw_salt[16]; struct f2fs_device devs[8]; __le32 qf_ino[3]; __u8 hot_ext_count; __le16 s_encoding; __le16 s_encoding_flags; __u8 s_stop_reason[32]; __u8 s_errors[16]; __u8 reserved[258]; __le32 crc; } __attribute__((packed)); struct f2fs_nm_info { block_t nat_blkaddr; nid_t max_nid; nid_t available_nids; nid_t next_scan_nid; nid_t max_rf_node_blocks; unsigned int ram_thresh; unsigned int ra_nid_pages; unsigned int dirty_nats_ratio; struct xarray nat_root; struct xarray nat_set_root; struct f2fs_rwsem nat_tree_lock; struct list_head nat_entries; spinlock_t nat_list_lock; unsigned int nat_cnt[3]; unsigned int nat_blocks; struct xarray free_nid_root; struct list_head free_nid_list; unsigned int nid_cnt[2]; spinlock_t nid_list_lock; struct mutex build_lock; unsigned char **free_nid_bitmap; unsigned char *nat_block_bitmap; unsigned short *free_nid_count; char *nat_bitmap; unsigned int nat_bits_blocks; unsigned char *nat_bits; unsigned char *full_nat_bits; unsigned char *empty_nat_bits; int bitmap_size; }; struct sit_info; struct free_segmap_info; struct dirty_seglist_info; struct curseg_info; struct flush_cmd_control; struct discard_cmd_control; struct f2fs_sm_info { struct sit_info *sit_info; struct free_segmap_info *free_info; struct dirty_seglist_info *dirty_info; struct curseg_info *curseg_array; struct f2fs_rwsem curseg_lock; block_t seg0_blkaddr; block_t main_blkaddr; block_t ssa_blkaddr; unsigned int segment_count; unsigned int main_segments; unsigned int reserved_segments; unsigned int additional_reserved_segments; unsigned int ovp_segments; unsigned int rec_prefree_segments; struct list_head sit_entry_set; unsigned int ipu_policy; unsigned int min_ipu_util; unsigned int min_fsync_blocks; unsigned int min_seq_blocks; unsigned int min_hot_blocks; unsigned int min_ssr_sections; struct flush_cmd_control *fcc_info; struct discard_cmd_control *dcc_info; }; struct flush_cmd_control { struct task_struct *f2fs_issue_flush; wait_queue_head_t flush_wait_queue; atomic_t issued_flush; atomic_t queued_flush; struct llist_head issue_list; struct llist_node *dispatch_list; }; struct discard_cmd_control { struct task_struct *f2fs_issue_discard; struct list_head entry_list; struct list_head pend_list[512]; struct list_head wait_list; struct list_head fstrim_list; wait_queue_head_t discard_wait_queue; struct mutex cmd_lock; unsigned int nr_discards; unsigned int max_discards; unsigned int max_discard_request; unsigned int min_discard_issue_time; unsigned int mid_discard_issue_time; unsigned int max_discard_issue_time; unsigned int discard_io_aware_gran; unsigned int discard_urgent_util; unsigned int discard_granularity; unsigned int max_ordered_discard; unsigned int discard_io_aware; unsigned int undiscard_blks; unsigned int next_pos; atomic_t issued_discard; atomic_t queued_discard; atomic_t discard_cmd_cnt; struct rb_root_cached root; bool rbtree_check; bool discard_wake; }; struct f2fs_io_info { struct f2fs_sb_info *sbi; nid_t ino; enum page_type type; enum temp_type temp; enum req_op op; blk_opf_t op_flags; block_t new_blkaddr; block_t old_blkaddr; struct page *page; struct page *encrypted_page; struct page *compressed_page; struct list_head list; unsigned int compr_blocks; unsigned int need_lock: 8; unsigned int version: 8; unsigned int submitted: 1; unsigned int in_list: 1; unsigned int is_por: 1; unsigned int encrypted: 1; unsigned int meta_gc: 1; enum iostat_type io_type; struct writeback_control *io_wbc; struct bio **bio; sector_t *last_block; }; struct f2fs_bio_info { struct f2fs_sb_info *sbi; struct bio *bio; sector_t last_block_in_bio; struct f2fs_io_info fio; struct completion zone_wait; struct bio *zone_pending_bio; void *bi_private; struct f2fs_rwsem io_rwsem; spinlock_t io_lock; struct list_head io_list; struct list_head bio_list; struct f2fs_rwsem bio_list_lock; }; struct f2fs_checkpoint { __le64 checkpoint_ver; __le64 user_block_count; __le64 valid_block_count; __le32 rsvd_segment_count; __le32 overprov_segment_count; __le32 free_segment_count; __le32 cur_node_segno[8]; __le16 cur_node_blkoff[8]; __le32 cur_data_segno[8]; __le16 cur_data_blkoff[8]; __le32 ckpt_flags; __le32 cp_pack_total_block_count; __le32 cp_pack_start_sum; __le32 valid_node_count; __le32 valid_inode_count; __le32 next_free_nid; __le32 sit_ver_bitmap_bytesize; __le32 nat_ver_bitmap_bytesize; __le32 checksum_offset; __le64 elapsed_time; unsigned char alloc_type[16]; unsigned char sit_nat_version_bitmap[0]; }; struct f2fs_stat_info { struct list_head stat_list; struct f2fs_sb_info *sbi; int all_area_segs; int sit_area_segs; int nat_area_segs; int ssa_area_segs; int main_area_segs; int main_area_sections; int main_area_zones; unsigned long long hit_cached[2]; unsigned long long hit_rbtree[2]; unsigned long long total_ext[2]; unsigned long long hit_total[2]; int ext_tree[2]; int zombie_tree[2]; int ext_node[2]; unsigned long long ext_mem[2]; unsigned long long hit_largest; unsigned long long allocated_data_blocks; int ndirty_node; int ndirty_dent; int ndirty_meta; int ndirty_imeta; int ndirty_data; int ndirty_qdata; unsigned int ndirty_dirs; unsigned int ndirty_files; unsigned int nquota_files; unsigned int ndirty_all; int nats; int dirty_nats; int sits; int dirty_sits; int free_nids; int avail_nids; int alloc_nids; int total_count; int utilization; int nr_wb_cp_data; int nr_wb_data; int nr_rd_data; int nr_rd_node; int nr_rd_meta; int nr_dio_read; int nr_dio_write; unsigned int io_skip_bggc; unsigned int other_skip_bggc; int nr_flushing; int nr_flushed; int flush_list_empty; int nr_discarding; int nr_discarded; int nr_discard_cmd; unsigned int undiscard_blks; int nr_issued_ckpt; int nr_total_ckpt; int nr_queued_ckpt; unsigned int cur_ckpt_time; unsigned int peak_ckpt_time; int inline_xattr; int inline_inode; int inline_dir; int append; int update; int orphans; int compr_inode; int swapfile_inode; unsigned long long compr_blocks; int aw_cnt; int max_aw_cnt; unsigned int valid_count; unsigned int valid_node_count; unsigned int valid_inode_count; unsigned int discard_blks; unsigned int bimodal; unsigned int avg_vblocks; int util_free; int util_valid; int util_invalid; int rsvd_segs; int overp_segs; int dirty_count; int node_pages; int meta_pages; int compress_pages; int compress_page_hit; int prefree_count; int free_segs; int free_secs; int cp_call_count[2]; int cp_count; int gc_call_count[2]; int gc_segs[4]; int gc_secs[4]; int tot_blks; int data_blks; int node_blks; int bg_data_blks; int bg_node_blks; int curseg[8]; int cursec[8]; int curzone[8]; unsigned int dirty_seg[8]; unsigned int full_seg[8]; unsigned int valid_blks[8]; unsigned int meta_count[4]; unsigned int segment_count[2]; unsigned int block_count[2]; unsigned int inplace_count; unsigned long long base_mem; unsigned long long cache_mem; unsigned long long page_mem; }; struct f2fs_dev_info { struct block_device *bdev; char path[64]; unsigned int total_segments; block_t start_blk; block_t end_blk; unsigned int nr_blkz; unsigned long *blkz_seq; }; struct f2fs_filename { const struct qstr *usr_fname; struct fscrypt_str disk_name; f2fs_hash_t hash; struct fscrypt_str crypto_buf; struct fscrypt_str cf_name; }; struct f2fs_dentry_ptr { struct inode *inode; void *bitmap; struct f2fs_dir_entry *dentry; __u8 (*filename)[8]; int max; int nr_bitmap; }; enum { META_CP = 0, META_NAT = 1, META_SIT = 2, META_SSA = 3, META_MAX = 4, META_POR = 5, DATA_GENERIC = 6, DATA_GENERIC_ENHANCE = 7, DATA_GENERIC_ENHANCE_READ = 8, DATA_GENERIC_ENHANCE_UPDATE = 9, META_GENERIC = 10, }; enum { ALLOC_NODE = 0, LOOKUP_NODE = 1, LOOKUP_NODE_RA = 2, }; enum stop_cp_reason { STOP_CP_REASON_SHUTDOWN = 0, STOP_CP_REASON_FAULT_INJECT = 1, STOP_CP_REASON_META_PAGE = 2, STOP_CP_REASON_WRITE_FAIL = 3, STOP_CP_REASON_CORRUPTED_SUMMARY = 4, STOP_CP_REASON_UPDATE_INODE = 5, STOP_CP_REASON_FLUSH_FAIL = 6, STOP_CP_REASON_NO_SEGMENT = 7, STOP_CP_REASON_MAX = 8, }; enum { CURSEG_HOT_DATA = 0, CURSEG_WARM_DATA = 1, CURSEG_COLD_DATA = 2, CURSEG_HOT_NODE = 3, CURSEG_WARM_NODE = 4, CURSEG_COLD_NODE = 5, NR_PERSISTENT_LOG = 6, CURSEG_COLD_DATA_PINNED = 6, CURSEG_ALL_DATA_ATGC = 7, NO_CHECK_TYPE = 8, }; enum { F2FS_GET_BLOCK_DEFAULT = 0, F2FS_GET_BLOCK_FIEMAP = 1, F2FS_GET_BLOCK_BMAP = 2, F2FS_GET_BLOCK_DIO = 3, F2FS_GET_BLOCK_PRE_DIO = 4, F2FS_GET_BLOCK_PRE_AIO = 5, F2FS_GET_BLOCK_PRECACHE = 6, }; enum cp_reason_type { CP_NO_NEEDED = 0, CP_NON_REGULAR = 1, CP_COMPRESSED = 2, CP_HARDLINK = 3, CP_SB_NEED_CP = 4, CP_WRONG_PINO = 5, CP_NO_SPC_ROLL = 6, CP_NODE_NEED_CP = 7, CP_FASTBOOT_MODE = 8, CP_SPEC_LOG_NUM = 9, CP_RECOVER_DIR = 10, }; enum inode_type { DIR_INODE = 0, FILE_INODE = 1, DIRTY_META = 2, NR_INODE_TYPE = 3, }; enum { COLD_BIT_SHIFT = 0, FSYNC_BIT_SHIFT = 1, DENT_BIT_SHIFT = 2, OFFSET_BIT_SHIFT = 3, }; enum compress_flag { COMPRESS_CHKSUM = 0, COMPRESS_MAX_FLAG = 1, }; enum compress_algorithm_type { COMPRESS_LZO = 0, COMPRESS_LZ4 = 1, COMPRESS_ZSTD = 2, COMPRESS_LZORLE = 3, COMPRESS_MAX = 4, }; enum { BG_GC = 0, FG_GC = 1, }; enum { BACKGROUND = 0, FOREGROUND = 1, MAX_CALL_TYPE = 2, TOTAL_CALL = 1, }; enum { FS_MODE_ADAPTIVE = 0, FS_MODE_LFS = 1, FS_MODE_FRAGMENT_SEG = 2, FS_MODE_FRAGMENT_BLK = 3, }; enum { GC_CB = 0, GC_GREEDY = 1, GC_AT = 2, ALLOC_NEXT = 3, FLUSH_DEVICE = 4, MAX_GC_POLICY = 5, }; enum { COMPR_MODE_FS = 0, COMPR_MODE_USER = 1, }; struct seg_entry; struct sec_entry; struct sit_info { block_t sit_base_addr; block_t sit_blocks; block_t written_valid_blocks; char *bitmap; char *sit_bitmap; unsigned int bitmap_size; unsigned long *tmp_map; unsigned long *dirty_sentries_bitmap; unsigned int dirty_sentries; unsigned int sents_per_block; struct rw_semaphore sentry_lock; struct seg_entry *sentries; struct sec_entry *sec_entries; unsigned long long elapsed_time; unsigned long long mounted_time; unsigned long long min_mtime; unsigned long long max_mtime; unsigned long long dirty_min_mtime; unsigned long long dirty_max_mtime; unsigned int last_victim[5]; }; struct seg_entry { unsigned int type: 6; unsigned int valid_blocks: 10; unsigned int ckpt_valid_blocks: 10; unsigned int padding: 6; unsigned char *cur_valid_map; unsigned char *ckpt_valid_map; unsigned char *discard_map; unsigned long long mtime; }; struct sec_entry { unsigned int valid_blocks; }; struct free_segmap_info { unsigned int start_segno; unsigned int free_segments; unsigned int free_sections; spinlock_t segmap_lock; unsigned long *free_segmap; unsigned long *free_secmap; }; struct dirty_seglist_info { unsigned long *dirty_segmap[8]; unsigned long *dirty_secmap; struct mutex seglist_lock; int nr_dirty[8]; unsigned long *victim_secmap; unsigned long *pinned_secmap; unsigned int pinned_secmap_cnt; bool enable_pin_section; }; struct f2fs_summary_block; struct f2fs_journal; struct curseg_info { struct mutex curseg_mutex; struct f2fs_summary_block *sum_blk; struct rw_semaphore journal_rwsem; struct f2fs_journal *journal; unsigned char alloc_type; unsigned short seg_type; unsigned int segno; unsigned short next_blkoff; unsigned int zone; unsigned int next_segno; int fragment_remained_chunk; bool inited; }; struct f2fs_summary { __le32 nid; union { __u8 reserved[3]; struct { __u8 version; __le16 ofs_in_node; } __attribute__((packed)); }; } __attribute__((packed)); struct f2fs_nat_entry { __u8 version; __le32 ino; __le32 block_addr; } __attribute__((packed)); struct nat_journal_entry { __le32 nid; struct f2fs_nat_entry ne; } __attribute__((packed)); struct nat_journal { struct nat_journal_entry entries[38]; __u8 reserved[11]; }; struct f2fs_sit_entry { __le16 vblocks; __u8 valid_map[64]; __le64 mtime; } __attribute__((packed)); struct sit_journal_entry { __le32 segno; struct f2fs_sit_entry se; } __attribute__((packed)); struct sit_journal { struct sit_journal_entry entries[6]; __u8 reserved[37]; }; struct f2fs_extra_info { __le64 kbytes_written; __u8 reserved[497]; } __attribute__((packed)); struct f2fs_journal { union { __le16 n_nats; __le16 n_sits; }; union { struct nat_journal nat_j; struct sit_journal sit_j; struct f2fs_extra_info info; }; } __attribute__((packed)); struct summary_footer { unsigned char entry_type; __le32 check_sum; } __attribute__((packed)); struct f2fs_summary_block { struct f2fs_summary entries[512]; struct f2fs_journal journal; struct summary_footer footer; }; struct f2fs_gc_kthread { struct task_struct *f2fs_gc_task; wait_queue_head_t gc_wait_queue_head; unsigned int urgent_sleep_time; unsigned int min_sleep_time; unsigned int max_sleep_time; unsigned int no_gc_sleep_time; bool gc_wake; wait_queue_head_t fggc_wq; }; struct iostat_lat_info { unsigned long sum_lat[9]; unsigned long peak_lat[9]; unsigned int bio_cnt[9]; }; struct f2fs_gc_range { __u32 sync; __u64 start; __u64 len; }; struct f2fs_defragment { __u64 start; __u64 len; }; struct f2fs_move_range { __u32 dst_fd; __u64 pos_in; __u64 pos_out; __u64 len; }; struct f2fs_flush_device { __u32 dev_num; __u32 segments; }; struct f2fs_sectrim_range { __u64 start; __u64 len; __u64 flags; }; struct f2fs_comp_option { __u8 algorithm; __u8 log_cluster_size; }; struct dnode_of_data { struct inode *inode; struct page *inode_page; struct page *node_page; nid_t nid; unsigned int ofs_in_node; bool inode_page_locked; bool node_changed; char cur_level; char max_level; block_t data_blkaddr; }; struct f2fs_map_blocks { struct block_device *m_bdev; block_t m_pblk; block_t m_lblk; unsigned int m_len; unsigned int m_flags; unsigned long *m_next_pgofs; unsigned long *m_next_extent; int m_seg_type; bool m_may_create; bool m_multidev_dio; }; struct f2fs_gc_control { unsigned int victim_segno; int init_gc_type; bool no_bg_gc; bool should_migrate_blocks; bool err_gc_skipped; unsigned int nr_free_secs; }; struct node_info { nid_t nid; nid_t ino; block_t blk_addr; unsigned char version; unsigned char flag; }; struct compat_f2fs_gc_range { u32 sync; compat_u64 start; compat_u64 len; }; struct compat_f2fs_move_range { u32 dst_fd; compat_u64 pos_in; compat_u64 pos_out; compat_u64 len; }; typedef void (*btf_trace_f2fs_sync_file_enter)(void *, struct inode *); typedef void (*btf_trace_f2fs_sync_file_exit)(void *, struct inode *, int, int, int); typedef void (*btf_trace_f2fs_sync_fs)(void *, struct super_block *, int); typedef void (*btf_trace_f2fs_iget)(void *, struct inode *); typedef void (*btf_trace_f2fs_iget_exit)(void *, struct inode *, int); typedef void (*btf_trace_f2fs_evict_inode)(void *, struct inode *); typedef void (*btf_trace_f2fs_new_inode)(void *, struct inode *, int); typedef void (*btf_trace_f2fs_unlink_enter)(void *, struct inode *, struct dentry *); typedef void (*btf_trace_f2fs_unlink_exit)(void *, struct inode *, int); typedef void (*btf_trace_f2fs_drop_inode)(void *, struct inode *, int); typedef void (*btf_trace_f2fs_truncate)(void *, struct inode *); typedef void (*btf_trace_f2fs_truncate_data_blocks_range)(void *, struct inode *, nid_t, unsigned int, int); typedef void (*btf_trace_f2fs_truncate_blocks_enter)(void *, struct inode *, u64); typedef void (*btf_trace_f2fs_truncate_blocks_exit)(void *, struct inode *, int); typedef void (*btf_trace_f2fs_truncate_inode_blocks_enter)(void *, struct inode *, u64); typedef void (*btf_trace_f2fs_truncate_inode_blocks_exit)(void *, struct inode *, int); typedef void (*btf_trace_f2fs_truncate_nodes_enter)(void *, struct inode *, nid_t, block_t); typedef void (*btf_trace_f2fs_truncate_nodes_exit)(void *, struct inode *, int); typedef void (*btf_trace_f2fs_truncate_node)(void *, struct inode *, nid_t, block_t); typedef void (*btf_trace_f2fs_truncate_partial_nodes)(void *, struct inode *, nid_t *, int, int); typedef void (*btf_trace_f2fs_file_write_iter)(void *, struct inode *, loff_t, size_t, ssize_t); typedef void (*btf_trace_f2fs_map_blocks)(void *, struct inode *, struct f2fs_map_blocks *, int, int); typedef void (*btf_trace_f2fs_background_gc)(void *, struct super_block *, unsigned int, unsigned int, unsigned int); typedef void (*btf_trace_f2fs_gc_begin)(void *, struct super_block *, int, bool, unsigned int, long long, long long, long long, unsigned int, unsigned int, int, unsigned int); typedef void (*btf_trace_f2fs_gc_end)(void *, struct super_block *, int, int, int, long long, long long, long long, unsigned int, unsigned int, int, unsigned int); struct victim_sel_policy; typedef void (*btf_trace_f2fs_get_victim)(void *, struct super_block *, int, int, struct victim_sel_policy *, unsigned int, unsigned int, unsigned int); struct victim_sel_policy { int alloc_mode; int gc_mode; unsigned long *dirty_bitmap; unsigned int max_search; unsigned int offset; unsigned int ofs_unit; unsigned int min_cost; unsigned long long oldest_age; unsigned int min_segno; unsigned long long age; unsigned long long age_threshold; }; typedef void (*btf_trace_f2fs_lookup_start)(void *, struct inode *, struct dentry *, unsigned int); typedef void (*btf_trace_f2fs_lookup_end)(void *, struct inode *, struct dentry *, nid_t, int); typedef void (*btf_trace_f2fs_rename_start)(void *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); typedef void (*btf_trace_f2fs_rename_end)(void *, struct dentry *, struct dentry *, unsigned int, int); typedef void (*btf_trace_f2fs_readdir)(void *, struct inode *, loff_t, loff_t, int); typedef void (*btf_trace_f2fs_fallocate)(void *, struct inode *, int, loff_t, loff_t, int); typedef void (*btf_trace_f2fs_direct_IO_enter)(void *, struct inode *, struct kiocb *, long, int); typedef void (*btf_trace_f2fs_direct_IO_exit)(void *, struct inode *, loff_t, unsigned long, int, int); typedef void (*btf_trace_f2fs_reserve_new_blocks)(void *, struct inode *, nid_t, unsigned int, blkcnt_t); typedef void (*btf_trace_f2fs_submit_page_bio)(void *, struct page *, struct f2fs_io_info *); typedef void (*btf_trace_f2fs_submit_page_write)(void *, struct page *, struct f2fs_io_info *); typedef void (*btf_trace_f2fs_prepare_write_bio)(void *, struct super_block *, int, struct bio *); typedef void (*btf_trace_f2fs_prepare_read_bio)(void *, struct super_block *, int, struct bio *); typedef void (*btf_trace_f2fs_submit_read_bio)(void *, struct super_block *, int, struct bio *); typedef void (*btf_trace_f2fs_submit_write_bio)(void *, struct super_block *, int, struct bio *); typedef void (*btf_trace_f2fs_write_begin)(void *, struct inode *, loff_t, unsigned int); typedef void (*btf_trace_f2fs_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); typedef void (*btf_trace_f2fs_writepage)(void *, struct folio *, int); typedef void (*btf_trace_f2fs_do_write_data_page)(void *, struct folio *, int); typedef void (*btf_trace_f2fs_readpage)(void *, struct folio *, int); typedef void (*btf_trace_f2fs_set_page_dirty)(void *, struct folio *, int); typedef void (*btf_trace_f2fs_replace_atomic_write_block)(void *, struct inode *, struct inode *, unsigned long, block_t, block_t, bool); typedef void (*btf_trace_f2fs_filemap_fault)(void *, struct inode *, unsigned long, vm_flags_t, vm_fault_t); typedef void (*btf_trace_f2fs_vm_page_mkwrite)(void *, struct inode *, unsigned long, vm_flags_t, vm_fault_t); typedef void (*btf_trace_f2fs_writepages)(void *, struct inode *, struct writeback_control *, int); typedef void (*btf_trace_f2fs_readpages)(void *, struct inode *, unsigned long, unsigned int); typedef void (*btf_trace_f2fs_write_checkpoint)(void *, struct super_block *, int, const char *); typedef void (*btf_trace_f2fs_queue_discard)(void *, struct block_device *, block_t, block_t); typedef void (*btf_trace_f2fs_issue_discard)(void *, struct block_device *, block_t, block_t); typedef void (*btf_trace_f2fs_remove_discard)(void *, struct block_device *, block_t, block_t); typedef void (*btf_trace_f2fs_queue_reset_zone)(void *, struct block_device *, block_t); typedef void (*btf_trace_f2fs_issue_reset_zone)(void *, struct block_device *, block_t); typedef void (*btf_trace_f2fs_issue_flush)(void *, struct block_device *, unsigned int, unsigned int, int); typedef void (*btf_trace_f2fs_lookup_extent_tree_start)(void *, struct inode *, unsigned int, enum extent_type); typedef void (*btf_trace_f2fs_lookup_read_extent_tree_end)(void *, struct inode *, unsigned int, struct extent_info *); typedef void (*btf_trace_f2fs_lookup_age_extent_tree_end)(void *, struct inode *, unsigned int, struct extent_info *); typedef void (*btf_trace_f2fs_update_read_extent_tree_range)(void *, struct inode *, unsigned int, unsigned int, block_t, unsigned int); typedef void (*btf_trace_f2fs_update_age_extent_tree_range)(void *, struct inode *, unsigned int, unsigned int, unsigned long long, unsigned long long); typedef void (*btf_trace_f2fs_shrink_extent_tree)(void *, struct f2fs_sb_info *, unsigned int, unsigned int, enum extent_type); typedef void (*btf_trace_f2fs_destroy_extent_tree)(void *, struct inode *, unsigned int, enum extent_type); typedef void (*btf_trace_f2fs_sync_dirty_inodes_enter)(void *, struct super_block *, int, s64); typedef void (*btf_trace_f2fs_sync_dirty_inodes_exit)(void *, struct super_block *, int, s64); typedef void (*btf_trace_f2fs_shutdown)(void *, struct f2fs_sb_info *, unsigned int, int); typedef void (*btf_trace_f2fs_compress_pages_start)(void *, struct inode *, unsigned long, unsigned int, unsigned char); typedef void (*btf_trace_f2fs_decompress_pages_start)(void *, struct inode *, unsigned long, unsigned int, unsigned char); typedef void (*btf_trace_f2fs_compress_pages_end)(void *, struct inode *, unsigned long, unsigned int, int); typedef void (*btf_trace_f2fs_decompress_pages_end)(void *, struct inode *, unsigned long, unsigned int, int); typedef void (*btf_trace_f2fs_iostat)(void *, struct f2fs_sb_info *, unsigned long long *); struct f2fs_iostat_latency { unsigned int peak_lat; unsigned int avg_lat; unsigned int cnt; }; typedef void (*btf_trace_f2fs_iostat_latency)(void *, struct f2fs_sb_info *, struct f2fs_iostat_latency(*)[3]); typedef void (*btf_trace_f2fs_bmap)(void *, struct inode *, sector_t, sector_t); typedef void (*btf_trace_f2fs_fiemap)(void *, struct inode *, sector_t, sector_t, unsigned long long, unsigned int, int); typedef void (*btf_trace_f2fs_dataread_start)(void *, struct inode *, loff_t, int, pid_t, char *, char *); typedef void (*btf_trace_f2fs_dataread_end)(void *, struct inode *, loff_t, int); typedef void (*btf_trace_f2fs_datawrite_start)(void *, struct inode *, loff_t, int, pid_t, char *, char *); typedef void (*btf_trace_f2fs_datawrite_end)(void *, struct inode *, loff_t, int); struct f2fs_sb_encodings { __u16 magic; char *name; unsigned int version; }; enum errors_option { MOUNT_ERRORS_READONLY = 0, MOUNT_ERRORS_CONTINUE = 1, MOUNT_ERRORS_PANIC = 2, }; enum iostat_lat_type { READ_IO = 0, WRITE_SYNC_IO = 1, WRITE_ASYNC_IO = 2, MAX_IO_TYPE = 3, }; enum { BGGC_MODE_ON = 0, BGGC_MODE_OFF = 1, BGGC_MODE_SYNC = 2, }; enum { DISCARD_UNIT_BLOCK = 0, DISCARD_UNIT_SEGMENT = 1, DISCARD_UNIT_SECTION = 2, }; enum { ALLOC_MODE_DEFAULT = 0, ALLOC_MODE_REUSE = 1, }; enum { MEMORY_MODE_NORMAL = 0, MEMORY_MODE_LOW = 1, }; enum { Opt_gc_background = 0, Opt_disable_roll_forward = 1, Opt_norecovery = 2, Opt_discard___4 = 3, Opt_nodiscard___2 = 4, Opt_noheap = 5, Opt_heap = 6, Opt_user_xattr___2 = 7, Opt_nouser_xattr = 8, Opt_acl___2 = 9, Opt_noacl = 10, Opt_active_logs = 11, Opt_disable_ext_identify = 12, Opt_inline_xattr = 13, Opt_noinline_xattr = 14, Opt_inline_xattr_size = 15, Opt_inline_data = 16, Opt_inline_dentry = 17, Opt_noinline_dentry = 18, Opt_flush_merge = 19, Opt_noflush_merge = 20, Opt_barrier___2 = 21, Opt_nobarrier___2 = 22, Opt_fastboot = 23, Opt_extent_cache = 24, Opt_noextent_cache = 25, Opt_noinline_data = 26, Opt_data_flush = 27, Opt_reserve_root = 28, Opt_resgid___2 = 29, Opt_resuid___2 = 30, Opt_mode___5 = 31, Opt_fault_injection = 32, Opt_fault_type = 33, Opt_lazytime = 34, Opt_nolazytime = 35, Opt_quota___3 = 36, Opt_noquota___2 = 37, Opt_usrquota___3 = 38, Opt_grpquota___3 = 39, Opt_prjquota___2 = 40, Opt_usrjquota___2 = 41, Opt_grpjquota___2 = 42, Opt_prjjquota = 43, Opt_offusrjquota = 44, Opt_offgrpjquota = 45, Opt_offprjjquota = 46, Opt_jqfmt_vfsold = 47, Opt_jqfmt_vfsv0 = 48, Opt_jqfmt_vfsv1 = 49, Opt_alloc = 50, Opt_fsync = 51, Opt_test_dummy_encryption___2 = 52, Opt_inlinecrypt___2 = 53, Opt_checkpoint_disable = 54, Opt_checkpoint_disable_cap = 55, Opt_checkpoint_disable_cap_perc = 56, Opt_checkpoint_enable = 57, Opt_checkpoint_merge = 58, Opt_nocheckpoint_merge = 59, Opt_compress_algorithm = 60, Opt_compress_log_size = 61, Opt_compress_extension = 62, Opt_nocompress_extension = 63, Opt_compress_chksum = 64, Opt_compress_mode = 65, Opt_compress_cache = 66, Opt_atgc = 67, Opt_gc_merge = 68, Opt_nogc_merge = 69, Opt_discard_unit = 70, Opt_memory_mode = 71, Opt_age_extent_cache = 72, Opt_errors___3 = 73, Opt_err___6 = 74, }; enum { GC_NORMAL = 0, GC_IDLE_CB = 1, GC_IDLE_GREEDY = 2, GC_IDLE_AT = 3, GC_URGENT_HIGH = 4, GC_URGENT_LOW = 5, GC_URGENT_MID = 6, MAX_GC_MODE = 7, }; enum blk_zone_type { BLK_ZONE_TYPE_CONVENTIONAL = 1, BLK_ZONE_TYPE_SEQWRITE_REQ = 2, BLK_ZONE_TYPE_SEQWRITE_PREF = 3, }; enum { F2FS_IPU_FORCE = 0, F2FS_IPU_SSR = 1, F2FS_IPU_UTIL = 2, F2FS_IPU_SSR_UTIL = 3, F2FS_IPU_FSYNC = 4, F2FS_IPU_ASYNC = 5, F2FS_IPU_NOCACHE = 6, F2FS_IPU_HONOR_OPU_WRITE = 7, F2FS_IPU_MAX = 8, }; struct trace_event_raw_f2fs__inode { struct trace_entry ent; dev_t dev; ino_t ino; ino_t pino; umode_t mode; loff_t size; unsigned int nlink; blkcnt_t blocks; __u8 advise; char __data[0]; }; struct trace_event_raw_f2fs_sync_file_exit { struct trace_entry ent; dev_t dev; ino_t ino; int cp_reason; int datasync; int ret; char __data[0]; }; struct trace_event_raw_f2fs_sync_fs { struct trace_entry ent; dev_t dev; int dirty; int wait; char __data[0]; }; struct trace_event_raw_f2fs__inode_exit { struct trace_entry ent; dev_t dev; ino_t ino; umode_t mode; int ret; char __data[0]; }; struct trace_event_raw_f2fs_unlink_enter { struct trace_entry ent; dev_t dev; ino_t ino; loff_t size; blkcnt_t blocks; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_f2fs_truncate_data_blocks_range { struct trace_entry ent; dev_t dev; ino_t ino; nid_t nid; unsigned int ofs; int free; char __data[0]; }; struct trace_event_raw_f2fs__truncate_op { struct trace_entry ent; dev_t dev; ino_t ino; loff_t size; blkcnt_t blocks; u64 from; char __data[0]; }; struct trace_event_raw_f2fs__truncate_node { struct trace_entry ent; dev_t dev; ino_t ino; nid_t nid; block_t blk_addr; char __data[0]; }; struct trace_event_raw_f2fs_truncate_partial_nodes { struct trace_entry ent; dev_t dev; ino_t ino; nid_t nid[3]; int depth; int err; char __data[0]; }; struct trace_event_raw_f2fs_file_write_iter { struct trace_entry ent; dev_t dev; ino_t ino; loff_t offset; size_t length; ssize_t ret; char __data[0]; }; struct trace_event_raw_f2fs_map_blocks { struct trace_entry ent; dev_t dev; ino_t ino; block_t m_lblk; block_t m_pblk; unsigned int m_len; unsigned int m_flags; int m_seg_type; bool m_may_create; bool m_multidev_dio; int flag; int ret; char __data[0]; }; struct trace_event_raw_f2fs_background_gc { struct trace_entry ent; dev_t dev; unsigned int wait_ms; unsigned int prefree; unsigned int free; char __data[0]; }; struct trace_event_raw_f2fs_gc_begin { struct trace_entry ent; dev_t dev; int gc_type; bool no_bg_gc; unsigned int nr_free_secs; long long dirty_nodes; long long dirty_dents; long long dirty_imeta; unsigned int free_sec; unsigned int free_seg; int reserved_seg; unsigned int prefree_seg; char __data[0]; }; struct trace_event_raw_f2fs_gc_end { struct trace_entry ent; dev_t dev; int ret; int seg_freed; int sec_freed; long long dirty_nodes; long long dirty_dents; long long dirty_imeta; unsigned int free_sec; unsigned int free_seg; int reserved_seg; unsigned int prefree_seg; char __data[0]; }; struct trace_event_raw_f2fs_get_victim { struct trace_entry ent; dev_t dev; int type; int gc_type; int alloc_mode; int gc_mode; unsigned int victim; unsigned int cost; unsigned int ofs_unit; unsigned int pre_victim; unsigned int prefree; unsigned int free; char __data[0]; }; struct trace_event_raw_f2fs_lookup_start { struct trace_entry ent; dev_t dev; ino_t ino; u32 __data_loc_name; unsigned int flags; char __data[0]; }; struct trace_event_raw_f2fs_lookup_end { struct trace_entry ent; dev_t dev; ino_t ino; u32 __data_loc_name; nid_t cino; int err; char __data[0]; }; struct trace_event_raw_f2fs_rename_start { struct trace_entry ent; dev_t dev; ino_t ino; u32 __data_loc_old_name; ino_t new_pino; u32 __data_loc_new_name; unsigned int flags; char __data[0]; }; struct trace_event_raw_f2fs_rename_end { struct trace_entry ent; dev_t dev; ino_t ino; u32 __data_loc_old_name; u32 __data_loc_new_name; unsigned int flags; int ret; char __data[0]; }; struct trace_event_raw_f2fs_readdir { struct trace_entry ent; dev_t dev; ino_t ino; loff_t start; loff_t end; int err; char __data[0]; }; struct trace_event_raw_f2fs_fallocate { struct trace_entry ent; dev_t dev; ino_t ino; int mode; loff_t offset; loff_t len; loff_t size; blkcnt_t blocks; int ret; char __data[0]; }; struct trace_event_raw_f2fs_direct_IO_enter { struct trace_entry ent; dev_t dev; ino_t ino; loff_t ki_pos; int ki_flags; u16 ki_ioprio; unsigned long len; int rw; char __data[0]; }; struct trace_event_raw_f2fs_direct_IO_exit { struct trace_entry ent; dev_t dev; ino_t ino; loff_t pos; unsigned long len; int rw; int ret; char __data[0]; }; struct trace_event_raw_f2fs_reserve_new_blocks { struct trace_entry ent; dev_t dev; nid_t nid; unsigned int ofs_in_node; blkcnt_t count; char __data[0]; }; struct trace_event_raw_f2fs__submit_page_bio { struct trace_entry ent; dev_t dev; ino_t ino; unsigned long index; block_t old_blkaddr; block_t new_blkaddr; enum req_op op; blk_opf_t op_flags; int temp; int type; char __data[0]; }; struct trace_event_raw_f2fs__bio { struct trace_entry ent; dev_t dev; dev_t target; enum req_op op; blk_opf_t op_flags; int type; sector_t sector; unsigned int size; char __data[0]; }; struct trace_event_raw_f2fs_write_begin { struct trace_entry ent; dev_t dev; ino_t ino; loff_t pos; unsigned int len; char __data[0]; }; struct trace_event_raw_f2fs_write_end { struct trace_entry ent; dev_t dev; ino_t ino; loff_t pos; unsigned int len; unsigned int copied; char __data[0]; }; struct trace_event_raw_f2fs__folio { struct trace_entry ent; dev_t dev; ino_t ino; int type; int dir; unsigned long index; int dirty; int uptodate; char __data[0]; }; struct trace_event_raw_f2fs_replace_atomic_write_block { struct trace_entry ent; dev_t dev; ino_t ino; ino_t cow_ino; unsigned long index; block_t old_addr; block_t new_addr; bool recovery; char __data[0]; }; struct trace_event_raw_f2fs_mmap { struct trace_entry ent; dev_t dev; ino_t ino; unsigned long index; vm_flags_t flags; vm_fault_t ret; char __data[0]; }; struct trace_event_raw_f2fs_writepages { struct trace_entry ent; dev_t dev; ino_t ino; int type; int dir; long nr_to_write; long pages_skipped; loff_t range_start; loff_t range_end; unsigned long writeback_index; int sync_mode; char for_kupdate; char for_background; char tagged_writepages; char for_reclaim; char range_cyclic; char for_sync; char __data[0]; }; struct trace_event_raw_f2fs_readpages { struct trace_entry ent; dev_t dev; ino_t ino; unsigned long start; unsigned int nrpage; char __data[0]; }; struct trace_event_raw_f2fs_write_checkpoint { struct trace_entry ent; dev_t dev; int reason; u32 __data_loc_dest_msg; char __data[0]; }; struct trace_event_raw_f2fs_discard { struct trace_entry ent; dev_t dev; block_t blkstart; block_t blklen; char __data[0]; }; struct trace_event_raw_f2fs_reset_zone { struct trace_entry ent; dev_t dev; block_t blkstart; char __data[0]; }; struct trace_event_raw_f2fs_issue_flush { struct trace_entry ent; dev_t dev; unsigned int nobarrier; unsigned int flush_merge; int ret; char __data[0]; }; struct trace_event_raw_f2fs_lookup_extent_tree_start { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int pgofs; enum extent_type type; char __data[0]; }; struct trace_event_raw_f2fs_lookup_read_extent_tree_end { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int pgofs; unsigned int fofs; unsigned int len; u32 blk; char __data[0]; }; struct trace_event_raw_f2fs_lookup_age_extent_tree_end { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int pgofs; unsigned int fofs; unsigned int len; unsigned long long age; unsigned long long blocks; char __data[0]; }; struct trace_event_raw_f2fs_update_read_extent_tree_range { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int pgofs; u32 blk; unsigned int len; unsigned int c_len; char __data[0]; }; struct trace_event_raw_f2fs_update_age_extent_tree_range { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int pgofs; unsigned int len; unsigned long long age; unsigned long long blocks; char __data[0]; }; struct trace_event_raw_f2fs_shrink_extent_tree { struct trace_entry ent; dev_t dev; unsigned int node_cnt; unsigned int tree_cnt; enum extent_type type; char __data[0]; }; struct trace_event_raw_f2fs_destroy_extent_tree { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int node_cnt; enum extent_type type; char __data[0]; }; struct trace_event_raw_f2fs_sync_dirty_inodes { struct trace_entry ent; dev_t dev; int type; s64 count; char __data[0]; }; struct trace_event_raw_f2fs_shutdown { struct trace_entry ent; dev_t dev; unsigned int mode; int ret; char __data[0]; }; struct trace_event_raw_f2fs_zip_start { struct trace_entry ent; dev_t dev; ino_t ino; unsigned long idx; unsigned int size; unsigned int algtype; char __data[0]; }; struct trace_event_raw_f2fs_zip_end { struct trace_entry ent; dev_t dev; ino_t ino; unsigned long idx; unsigned int size; unsigned int ret; char __data[0]; }; struct trace_event_raw_f2fs_iostat { struct trace_entry ent; dev_t dev; unsigned long long app_dio; unsigned long long app_bio; unsigned long long app_wio; unsigned long long app_mio; unsigned long long app_bcdio; unsigned long long app_mcdio; unsigned long long fs_dio; unsigned long long fs_cdio; unsigned long long fs_nio; unsigned long long fs_mio; unsigned long long fs_gc_dio; unsigned long long fs_gc_nio; unsigned long long fs_cp_dio; unsigned long long fs_cp_nio; unsigned long long fs_cp_mio; unsigned long long app_drio; unsigned long long app_brio; unsigned long long app_rio; unsigned long long app_mrio; unsigned long long app_bcrio; unsigned long long app_mcrio; unsigned long long fs_drio; unsigned long long fs_gdrio; unsigned long long fs_cdrio; unsigned long long fs_nrio; unsigned long long fs_mrio; unsigned long long fs_discard; unsigned long long fs_reset_zone; char __data[0]; }; struct trace_event_raw_f2fs_iostat_latency { struct trace_entry ent; dev_t dev; unsigned int d_rd_peak; unsigned int d_rd_avg; unsigned int d_rd_cnt; unsigned int n_rd_peak; unsigned int n_rd_avg; unsigned int n_rd_cnt; unsigned int m_rd_peak; unsigned int m_rd_avg; unsigned int m_rd_cnt; unsigned int d_wr_s_peak; unsigned int d_wr_s_avg; unsigned int d_wr_s_cnt; unsigned int n_wr_s_peak; unsigned int n_wr_s_avg; unsigned int n_wr_s_cnt; unsigned int m_wr_s_peak; unsigned int m_wr_s_avg; unsigned int m_wr_s_cnt; unsigned int d_wr_as_peak; unsigned int d_wr_as_avg; unsigned int d_wr_as_cnt; unsigned int n_wr_as_peak; unsigned int n_wr_as_avg; unsigned int n_wr_as_cnt; unsigned int m_wr_as_peak; unsigned int m_wr_as_avg; unsigned int m_wr_as_cnt; char __data[0]; }; struct trace_event_raw_f2fs_bmap { struct trace_entry ent; dev_t dev; ino_t ino; sector_t lblock; sector_t pblock; char __data[0]; }; struct trace_event_raw_f2fs_fiemap { struct trace_entry ent; dev_t dev; ino_t ino; sector_t lblock; sector_t pblock; unsigned long long len; unsigned int flags; int ret; char __data[0]; }; struct trace_event_raw_f2fs__rw_start { struct trace_entry ent; u32 __data_loc_pathbuf; loff_t offset; int bytes; loff_t i_size; u32 __data_loc_cmdline; pid_t pid; ino_t ino; char __data[0]; }; struct trace_event_raw_f2fs__rw_end { struct trace_entry ent; ino_t ino; loff_t offset; int bytes; char __data[0]; }; struct trace_event_data_offsets_f2fs_unlink_enter { u32 name; }; struct trace_event_data_offsets_f2fs_lookup_start { u32 name; }; struct trace_event_data_offsets_f2fs_lookup_end { u32 name; }; struct trace_event_data_offsets_f2fs_write_checkpoint { u32 dest_msg; }; struct cp_control { int reason; __u64 trim_start; __u64 trim_end; __u64 trim_minlen; }; struct f2fs_report_zones_args { struct f2fs_sb_info *sbi; struct f2fs_dev_info *dev; }; struct trace_event_data_offsets_f2fs__inode {}; struct trace_event_data_offsets_f2fs__inode_exit {}; struct trace_event_data_offsets_f2fs_sync_file_exit {}; struct trace_event_data_offsets_f2fs_sync_fs {}; struct trace_event_data_offsets_f2fs_truncate_data_blocks_range {}; struct trace_event_data_offsets_f2fs__truncate_op {}; struct trace_event_data_offsets_f2fs__truncate_node {}; struct trace_event_data_offsets_f2fs_truncate_partial_nodes {}; struct trace_event_data_offsets_f2fs_file_write_iter {}; struct trace_event_data_offsets_f2fs_map_blocks {}; struct trace_event_data_offsets_f2fs_background_gc {}; struct trace_event_data_offsets_f2fs_gc_begin {}; struct trace_event_data_offsets_f2fs_gc_end {}; struct trace_event_data_offsets_f2fs_get_victim {}; struct trace_event_data_offsets_f2fs_rename_start { u32 old_name; u32 new_name; }; struct trace_event_data_offsets_f2fs_rename_end { u32 old_name; u32 new_name; }; struct trace_event_data_offsets_f2fs_readdir {}; struct trace_event_data_offsets_f2fs_fallocate {}; struct trace_event_data_offsets_f2fs_direct_IO_enter {}; struct trace_event_data_offsets_f2fs_direct_IO_exit {}; struct trace_event_data_offsets_f2fs_reserve_new_blocks {}; struct trace_event_data_offsets_f2fs__submit_page_bio {}; struct trace_event_data_offsets_f2fs__bio {}; struct trace_event_data_offsets_f2fs_write_begin {}; struct trace_event_data_offsets_f2fs_write_end {}; struct trace_event_data_offsets_f2fs__folio {}; struct trace_event_data_offsets_f2fs_replace_atomic_write_block {}; struct trace_event_data_offsets_f2fs_mmap {}; struct trace_event_data_offsets_f2fs_writepages {}; struct trace_event_data_offsets_f2fs_readpages {}; struct trace_event_data_offsets_f2fs_discard {}; struct trace_event_data_offsets_f2fs_reset_zone {}; struct trace_event_data_offsets_f2fs_issue_flush {}; struct trace_event_data_offsets_f2fs_lookup_extent_tree_start {}; struct trace_event_data_offsets_f2fs_lookup_read_extent_tree_end {}; struct trace_event_data_offsets_f2fs_lookup_age_extent_tree_end {}; struct trace_event_data_offsets_f2fs_update_read_extent_tree_range {}; struct trace_event_data_offsets_f2fs_update_age_extent_tree_range {}; struct trace_event_data_offsets_f2fs_shrink_extent_tree {}; struct trace_event_data_offsets_f2fs_destroy_extent_tree {}; struct trace_event_data_offsets_f2fs_sync_dirty_inodes {}; struct trace_event_data_offsets_f2fs_shutdown {}; struct trace_event_data_offsets_f2fs_zip_start {}; struct trace_event_data_offsets_f2fs_zip_end {}; struct trace_event_data_offsets_f2fs_iostat {}; struct trace_event_data_offsets_f2fs_iostat_latency {}; struct trace_event_data_offsets_f2fs_bmap {}; struct trace_event_data_offsets_f2fs_fiemap {}; struct trace_event_data_offsets_f2fs__rw_start { u32 pathbuf; u32 cmdline; }; struct trace_event_data_offsets_f2fs__rw_end {}; enum nat_state { TOTAL_NAT = 0, DIRTY_NAT = 1, RECLAIMABLE_NAT = 2, MAX_NAT_STATE = 3, }; enum nid_state { FREE_NID = 0, PREALLOC_NID = 1, MAX_NID_STATE = 2, }; enum dirty_type { DIRTY_HOT_DATA = 0, DIRTY_WARM_DATA = 1, DIRTY_COLD_DATA = 2, DIRTY_HOT_NODE = 3, DIRTY_WARM_NODE = 4, DIRTY_COLD_NODE = 5, DIRTY = 6, PRE = 7, NR_DIRTY_TYPE = 8, }; enum { NAT_BITMAP = 0, SIT_BITMAP = 1, }; struct ino_entry { struct list_head list; nid_t ino; unsigned int dirty_device; }; struct f2fs_orphan_block { __le32 ino[1020]; __le32 reserved; __le16 blk_addr; __le16 blk_count; __le32 entry_count; __le32 check_sum; }; struct free_nid { struct list_head list; nid_t nid; int state; }; struct ckpt_req { struct completion wait; struct llist_node llnode; int ret; ktime_t queue_time; }; enum { LFS = 0, SSR = 1, AT_SSR = 2, }; enum need_lock_type { LOCK_REQ = 0, LOCK_DONE = 1, LOCK_RETRY = 2, }; struct victim_entry { struct rb_node rb_node; unsigned long long mtime; unsigned int segno; struct list_head list; }; struct inode_entry { struct list_head list; struct inode *inode; }; struct gc_inode_list { struct list_head ilist; struct xarray iroot; }; enum bio_post_read_step___2 { STEP_DECRYPT___2 = 1, STEP_DECOMPRESS = 2, STEP_VERITY___2 = 4, }; enum mem_type { FREE_NIDS = 0, NAT_ENTRIES = 1, DIRTY_DENTS = 2, INO_ENTRIES = 3, READ_EXTENT_CACHE = 4, AGE_EXTENT_CACHE = 5, DISCARD_CACHE = 6, COMPRESS_PAGE = 7, BASE_CHECK = 8, }; struct bio_entry { struct bio *bio; struct list_head list; }; struct bio_post_read_ctx___2 { struct bio *bio; struct f2fs_sb_info *sbi; struct work_struct work; unsigned int enabled_steps; bool decompression_attempted; block_t fs_blkaddr; }; struct bio_iostat_ctx { struct f2fs_sb_info *sbi; unsigned long submit_ts; enum page_type type; struct bio_post_read_ctx___2 *post_read_ctx; }; struct bvec_iter_all { struct bio_vec bv; int idx; unsigned int done; }; struct compress_data; struct compress_ctx { struct inode *inode; unsigned long cluster_idx; unsigned int cluster_size; unsigned int log_cluster_size; struct page **rpages; unsigned int nr_rpages; struct page **cpages; unsigned int nr_cpages; unsigned int valid_nr_cpages; void *rbuf; struct compress_data *cbuf; size_t rlen; size_t clen; void *private; void *private2; }; struct compress_data { __le32 clen; __le32 chksum; __le32 reserved[4]; u8 cdata[0]; }; struct decompress_io_ctx { u32 magic; struct inode *inode; unsigned long cluster_idx; unsigned int cluster_size; unsigned int log_cluster_size; struct page **rpages; unsigned int nr_rpages; struct page **cpages; unsigned int nr_cpages; struct page **tpages; void *rbuf; struct compress_data *cbuf; size_t rlen; size_t clen; atomic_t remaining_pages; refcount_t refcnt; bool failed; bool need_verity; void *private; void *private2; struct work_struct verity_work; struct work_struct free_work; }; enum { IS_CHECKPOINTED = 0, HAS_FSYNCED_INODE = 1, HAS_LAST_FSYNC = 2, IS_DIRTY = 3, IS_PREALLOC = 4, }; enum { NAT_JOURNAL = 0, SIT_JOURNAL = 1, }; struct fsync_node_entry { struct list_head list; struct page *page; unsigned int seq_id; }; struct nat_entry { struct list_head list; struct node_info ni; }; struct f2fs_nat_block { struct f2fs_nat_entry entries[455]; }; struct nat_entry_set { struct list_head set_list; struct list_head entry_list; nid_t set; unsigned int entry_cnt; }; enum { DPOLICY_BG = 0, DPOLICY_FORCE = 1, DPOLICY_FSTRIM = 2, DPOLICY_UMOUNT = 3, MAX_DPOLICY = 4, }; enum { D_PREP = 0, D_PARTIAL = 1, D_SUBMIT = 2, D_DONE = 3, }; enum { DPOLICY_IO_AWARE_DISABLE = 0, DPOLICY_IO_AWARE_ENABLE = 1, DPOLICY_IO_AWARE_MAX = 2, }; enum blk_zone_cond { BLK_ZONE_COND_NOT_WP = 0, BLK_ZONE_COND_EMPTY = 1, BLK_ZONE_COND_IMP_OPEN = 2, BLK_ZONE_COND_EXP_OPEN = 3, BLK_ZONE_COND_CLOSED = 4, BLK_ZONE_COND_READONLY = 13, BLK_ZONE_COND_FULL = 14, BLK_ZONE_COND_OFFLINE = 15, }; struct flush_cmd { struct completion wait; struct llist_node llnode; nid_t ino; int ret; }; struct discard_entry { struct list_head list; block_t start_blkaddr; unsigned char discard_map[64]; }; struct sit_entry_set { struct list_head set_list; unsigned int start_segno; unsigned int entry_cnt; }; struct revoke_entry { struct list_head list; block_t old_addr; unsigned long index; }; struct discard_info { block_t lstart; block_t len; block_t start; }; struct discard_cmd { struct rb_node rb_node; struct discard_info di; struct list_head list; struct completion wait; struct block_device *bdev; unsigned short ref; unsigned char state; unsigned char queued; int error; spinlock_t lock; unsigned short bio_ref; }; struct f2fs_sit_block { struct f2fs_sit_entry entries[55]; }; struct check_zone_write_pointer_args { struct f2fs_sb_info *sbi; struct f2fs_dev_info *fdev; }; struct discard_policy { int type; unsigned int min_interval; unsigned int mid_interval; unsigned int max_interval; unsigned int max_requests; unsigned int io_aware_gran; bool io_aware; bool sync; bool ordered; bool timeout; unsigned int granularity; }; struct fsync_inode_entry { struct list_head list; struct inode *inode; block_t blkaddr; block_t last_dentry; }; struct f2fs_attr { struct attribute attr; ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *); ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *, const char *, size_t); int struct_type; int offset; int id; }; enum { GC_THREAD = 0, SM_INFO = 1, DCC_INFO = 2, NM_INFO = 3, F2FS_SBI = 4, STAT_INFO = 5, RESERVED_BLOCKS = 6, CPRC_INFO = 7, ATGC_INFO = 8, }; struct f2fs_xattr_entry { __u8 e_name_index; __u8 e_name_len; __le16 e_value_size; char e_name[0]; }; struct f2fs_xattr_header { __le32 h_magic; __le32 h_refcount; __u32 h_reserved[4]; }; struct f2fs_acl_header { __le32 a_version; }; struct f2fs_acl_entry { __le16 e_tag; __le16 e_perm; __le32 e_id; }; struct fsverity_descriptor_location { __le32 version; __le32 size; __le64 pos; }; struct f2fs_compress_ops { int (*init_compress_ctx)(struct compress_ctx *); void (*destroy_compress_ctx)(struct compress_ctx *); int (*compress_pages)(struct compress_ctx *); int (*init_decompress_ctx)(struct decompress_io_ctx *); void (*destroy_decompress_ctx)(struct decompress_io_ctx *); int (*decompress_pages)(struct decompress_io_ctx *); bool (*is_level_valid)(int); }; struct compress_io_ctx { u32 magic; struct inode *inode; struct page **rpages; unsigned int nr_rpages; atomic_t pending_pages; }; typedef enum { ZSTD_fast = 1, ZSTD_dfast = 2, ZSTD_greedy = 3, ZSTD_lazy = 4, ZSTD_lazy2 = 5, ZSTD_btlazy2 = 6, ZSTD_btopt = 7, ZSTD_btultra = 8, ZSTD_btultra2 = 9, } ZSTD_strategy; typedef struct { unsigned int windowLog; unsigned int chainLog; unsigned int hashLog; unsigned int searchLog; unsigned int minMatch; unsigned int targetLength; ZSTD_strategy strategy; } ZSTD_compressionParameters; typedef struct { int contentSizeFlag; int checksumFlag; int noDictIDFlag; } ZSTD_frameParameters; typedef struct { ZSTD_compressionParameters cParams; ZSTD_frameParameters fParams; } ZSTD_parameters; typedef ZSTD_parameters zstd_parameters; typedef ZSTD_compressionParameters zstd_compression_parameters; typedef enum { ZSTDcs_created = 0, ZSTDcs_init = 1, ZSTDcs_ongoing = 2, ZSTDcs_ending = 3, } ZSTD_compressionStage_e; typedef enum { ZSTD_dictDefaultAttach = 0, ZSTD_dictForceAttach = 1, ZSTD_dictForceCopy = 2, ZSTD_dictForceLoad = 3, } ZSTD_dictAttachPref_e; typedef enum { ZSTD_ps_auto = 0, ZSTD_ps_enable = 1, ZSTD_ps_disable = 2, } ZSTD_paramSwitch_e; typedef struct { ZSTD_paramSwitch_e enableLdm; U32 hashLog; U32 bucketSizeLog; U32 minMatchLength; U32 hashRateLog; U32 windowLog; } ldmParams_t; typedef enum { ZSTD_sf_noBlockDelimiters = 0, ZSTD_sf_explicitBlockDelimiters = 1, } ZSTD_sequenceFormat_e; struct ZSTD_CCtx_params_s { ZSTD_format_e format; ZSTD_compressionParameters cParams; ZSTD_frameParameters fParams; int compressionLevel; int forceWindow; size_t targetCBlockSize; int srcSizeHint; ZSTD_dictAttachPref_e attachDictPref; ZSTD_paramSwitch_e literalCompressionMode; int nbWorkers; size_t jobSize; int overlapLog; int rsyncable; ldmParams_t ldmParams; int enableDedicatedDictSearch; ZSTD_bufferMode_e inBufferMode; ZSTD_bufferMode_e outBufferMode; ZSTD_sequenceFormat_e blockDelimiters; int validateSequences; ZSTD_paramSwitch_e useBlockSplitter; ZSTD_paramSwitch_e useRowMatchFinder; int deterministicRefPrefix; ZSTD_customMem customMem; }; typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params; typedef enum { ZSTD_cwksp_alloc_objects = 0, ZSTD_cwksp_alloc_buffers = 1, ZSTD_cwksp_alloc_aligned = 2, } ZSTD_cwksp_alloc_phase_e; typedef enum { ZSTD_cwksp_dynamic_alloc = 0, ZSTD_cwksp_static_alloc = 1, } ZSTD_cwksp_static_alloc_e; typedef struct { void *workspace; void *workspaceEnd; void *objectEnd; void *tableEnd; void *tableValidEnd; void *allocStart; BYTE allocFailed; int workspaceOversizedDuration; ZSTD_cwksp_alloc_phase_e phase; ZSTD_cwksp_static_alloc_e isStatic; } ZSTD_cwksp; struct POOL_ctx_s; typedef struct POOL_ctx_s ZSTD_threadPool; typedef struct { unsigned int offset; unsigned int litLength; unsigned int matchLength; unsigned int rep; } ZSTD_Sequence; typedef struct { int collectSequences; ZSTD_Sequence *seqStart; size_t seqIndex; size_t maxSequences; } SeqCollector; typedef enum { ZSTD_llt_none = 0, ZSTD_llt_literalLength = 1, ZSTD_llt_matchLength = 2, } ZSTD_longLengthType_e; struct seqDef_s; typedef struct seqDef_s seqDef; typedef struct { seqDef *sequencesStart; seqDef *sequences; BYTE *litStart; BYTE *lit; BYTE *llCode; BYTE *mlCode; BYTE *ofCode; size_t maxNbSeq; size_t maxNbLit; ZSTD_longLengthType_e longLengthType; U32 longLengthPos; } seqStore_t; typedef struct { const BYTE *nextSrc; const BYTE *base; const BYTE *dictBase; U32 dictLimit; U32 lowLimit; U32 nbOverflowCorrections; } ZSTD_window_t; typedef struct { U32 offset; U32 checksum; } ldmEntry_t; typedef struct { const BYTE *split; U32 hash; U32 checksum; ldmEntry_t *bucket; } ldmMatchCandidate_t; typedef struct { ZSTD_window_t window; ldmEntry_t *hashTable; U32 loadedDictEnd; BYTE *bucketOffsets; size_t splitIndices[64]; ldmMatchCandidate_t matchCandidates[64]; } ldmState_t; typedef struct { U32 offset; U32 litLength; U32 matchLength; } rawSeq; typedef struct { rawSeq *seq; size_t pos; size_t posInSequence; size_t size; size_t capacity; } rawSeqStore_t; typedef size_t HUF_CElt; typedef enum { HUF_repeat_none = 0, HUF_repeat_check = 1, HUF_repeat_valid = 2, } HUF_repeat; typedef struct { HUF_CElt CTable[257]; HUF_repeat repeatMode; } ZSTD_hufCTables_t; typedef unsigned int FSE_CTable; typedef enum { FSE_repeat_none = 0, FSE_repeat_check = 1, FSE_repeat_valid = 2, } FSE_repeat; typedef struct { FSE_CTable offcodeCTable[193]; FSE_CTable matchlengthCTable[363]; FSE_CTable litlengthCTable[329]; FSE_repeat offcode_repeatMode; FSE_repeat matchlength_repeatMode; FSE_repeat litlength_repeatMode; } ZSTD_fseCTables_t; typedef struct { ZSTD_hufCTables_t huf; ZSTD_fseCTables_t fse; } ZSTD_entropyCTables_t; typedef struct { ZSTD_entropyCTables_t entropy; U32 rep[3]; } ZSTD_compressedBlockState_t; typedef struct { U32 off; U32 len; } ZSTD_match_t; typedef struct { int price; U32 off; U32 mlen; U32 litlen; U32 rep[3]; } ZSTD_optimal_t; typedef enum { zop_dynamic = 0, zop_predef = 1, } ZSTD_OptPrice_e; typedef struct { unsigned int *litFreq; unsigned int *litLengthFreq; unsigned int *matchLengthFreq; unsigned int *offCodeFreq; ZSTD_match_t *matchTable; ZSTD_optimal_t *priceTable; U32 litSum; U32 litLengthSum; U32 matchLengthSum; U32 offCodeSum; U32 litSumBasePrice; U32 litLengthSumBasePrice; U32 matchLengthSumBasePrice; U32 offCodeSumBasePrice; ZSTD_OptPrice_e priceType; const ZSTD_entropyCTables_t *symbolCosts; ZSTD_paramSwitch_e literalCompressionMode; } optState_t; struct ZSTD_matchState_t; typedef struct ZSTD_matchState_t ZSTD_matchState_t; struct ZSTD_matchState_t { ZSTD_window_t window; U32 loadedDictEnd; U32 nextToUpdate; U32 hashLog3; U32 rowHashLog; U16 *tagTable; U32 hashCache[8]; U32 *hashTable; U32 *hashTable3; U32 *chainTable; U32 forceNonContiguous; int dedicatedDictSearch; optState_t opt; const ZSTD_matchState_t *dictMatchState; ZSTD_compressionParameters cParams; const rawSeqStore_t *ldmSeqStore; }; typedef struct { ZSTD_compressedBlockState_t *prevCBlock; ZSTD_compressedBlockState_t *nextCBlock; ZSTD_matchState_t matchState; } ZSTD_blockState_t; typedef enum { ZSTDb_not_buffered = 0, ZSTDb_buffered = 1, } ZSTD_buffered_policy_e; typedef enum { zcss_init = 0, zcss_load = 1, zcss_flush = 2, } ZSTD_cStreamStage; typedef enum { ZSTD_dct_auto = 0, ZSTD_dct_rawContent = 1, ZSTD_dct_fullDict = 2, } ZSTD_dictContentType_e; struct ZSTD_CDict_s; typedef struct ZSTD_CDict_s ZSTD_CDict; typedef struct { void *dictBuffer; const void *dict; size_t dictSize; ZSTD_dictContentType_e dictContentType; ZSTD_CDict *cdict; } ZSTD_localDict; struct ZSTD_prefixDict_s { const void *dict; size_t dictSize; ZSTD_dictContentType_e dictContentType; }; typedef struct ZSTD_prefixDict_s ZSTD_prefixDict; typedef enum { set_basic = 0, set_rle = 1, set_compressed = 2, set_repeat = 3, } symbolEncodingType_e; typedef struct { symbolEncodingType_e hType; BYTE hufDesBuffer[128]; size_t hufDesSize; } ZSTD_hufCTablesMetadata_t; typedef struct { symbolEncodingType_e llType; symbolEncodingType_e ofType; symbolEncodingType_e mlType; BYTE fseTablesBuffer[133]; size_t fseTablesSize; size_t lastCountSize; } ZSTD_fseCTablesMetadata_t; typedef struct { ZSTD_hufCTablesMetadata_t hufMetadata; ZSTD_fseCTablesMetadata_t fseMetadata; } ZSTD_entropyCTablesMetadata_t; typedef struct { seqStore_t fullSeqStoreChunk; seqStore_t firstHalfSeqStore; seqStore_t secondHalfSeqStore; seqStore_t currSeqStore; seqStore_t nextSeqStore; U32 partitions[196]; ZSTD_entropyCTablesMetadata_t entropyMetadata; } ZSTD_blockSplitCtx; struct ZSTD_CCtx_s { ZSTD_compressionStage_e stage; int cParamsChanged; int bmi2; ZSTD_CCtx_params requestedParams; ZSTD_CCtx_params appliedParams; ZSTD_CCtx_params simpleApiParams; U32 dictID; size_t dictContentSize; ZSTD_cwksp workspace; size_t blockSize; unsigned long long pledgedSrcSizePlusOne; unsigned long long consumedSrcSize; unsigned long long producedCSize; struct xxh64_state xxhState; ZSTD_customMem customMem; ZSTD_threadPool *pool; size_t staticSize; SeqCollector seqCollector; int isFirstBlock; int initialized; seqStore_t seqStore; ldmState_t ldmState; rawSeq *ldmSequences; size_t maxNbLdmSequences; rawSeqStore_t externSeqStore; ZSTD_blockState_t blockState; U32 *entropyWorkspace; ZSTD_buffered_policy_e bufferedPolicy; char *inBuff; size_t inBuffSize; size_t inToCompress; size_t inBuffPos; size_t inBuffTarget; char *outBuff; size_t outBuffSize; size_t outBuffContentSize; size_t outBuffFlushedSize; ZSTD_cStreamStage streamStage; U32 frameEnded; ZSTD_inBuffer expectedInBuffer; size_t expectedOutBufferSize; ZSTD_localDict localDict; const ZSTD_CDict *cdict; ZSTD_prefixDict prefixDict; ZSTD_blockSplitCtx blockSplitCtx; }; typedef struct ZSTD_CCtx_s ZSTD_CCtx; typedef ZSTD_CCtx ZSTD_CStream; typedef ZSTD_CStream zstd_cstream; typedef enum { ZSTD_error_no_error = 0, ZSTD_error_GENERIC = 1, ZSTD_error_prefix_unknown = 10, ZSTD_error_version_unsupported = 12, ZSTD_error_frameParameter_unsupported = 14, ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, ZSTD_error_workSpace_tooSmall = 66, ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, ZSTD_error_dstBuffer_wrong = 104, ZSTD_error_srcBuffer_wrong = 105, ZSTD_error_maxCode = 120, } ZSTD_ErrorCode; typedef ZSTD_ErrorCode zstd_error_code; enum pstore_type_id { PSTORE_TYPE_DMESG = 0, PSTORE_TYPE_MCE = 1, PSTORE_TYPE_CONSOLE = 2, PSTORE_TYPE_FTRACE = 3, PSTORE_TYPE_PPC_RTAS = 4, PSTORE_TYPE_PPC_OF = 5, PSTORE_TYPE_PPC_COMMON = 6, PSTORE_TYPE_PMSG = 7, PSTORE_TYPE_PPC_OPAL = 8, PSTORE_TYPE_MAX = 9, }; enum { Opt_kmsg_bytes = 0, Opt_err___7 = 1, }; struct pstore_record; struct pstore_private { struct list_head list; struct dentry *dentry; struct pstore_record *record; size_t total_size; }; struct pstore_info; struct pstore_record { struct pstore_info *psi; enum pstore_type_id type; u64 id; struct timespec64 time; char *buf; ssize_t size; ssize_t ecc_notice_size; void *priv; int count; enum kmsg_dump_reason reason; unsigned int part; bool compressed; }; struct pstore_info { struct module *owner; const char *name; spinlock_t buf_lock; char *buf; size_t bufsize; struct mutex read_mutex; int flags; int max_reason; void *data; int (*open)(struct pstore_info *); int (*close)(struct pstore_info *); ssize_t (*read)(struct pstore_record *); int (*write)(struct pstore_record *); int (*write_user)(struct pstore_record *, const char __attribute__((btf_type_tag("user"))) *); int (*erase)(struct pstore_record *); }; struct pstore_ftrace_record { unsigned long ip; unsigned long parent_ip; u64 ts; }; struct pstore_ftrace_seq_data { const void *ptr; size_t off; size_t size; }; typedef unsigned char Byte; typedef unsigned long uLong; struct internal_state; struct z_stream_s { const Byte *next_in; uLong avail_in; uLong total_in; Byte *next_out; uLong avail_out; uLong total_out; char *msg; struct internal_state *state; void *workspace; int data_type; uLong adler; uLong reserved; }; struct internal_state { int dummy; }; typedef struct z_stream_s z_stream; typedef z_stream *z_streamp; struct persistent_ram_ecc_info { int block_size; int ecc_size; int symsize; int poly; uint16_t *par; }; struct persistent_ram_zone; struct ramoops_context { struct persistent_ram_zone **dprzs; struct persistent_ram_zone *cprz; struct persistent_ram_zone **fprzs; struct persistent_ram_zone *mprz; phys_addr_t phys_addr; unsigned long size; unsigned int memtype; size_t record_size; size_t console_size; size_t ftrace_size; size_t pmsg_size; u32 flags; struct persistent_ram_ecc_info ecc_info; unsigned int max_dump_cnt; unsigned int dump_write_cnt; unsigned int dump_read_cnt; unsigned int console_read_cnt; unsigned int max_ftrace_cnt; unsigned int ftrace_read_cnt; unsigned int pmsg_read_cnt; struct pstore_info pstore; }; struct persistent_ram_buffer; struct rs_control; struct persistent_ram_zone { phys_addr_t paddr; size_t size; void *vaddr; char *label; enum pstore_type_id type; u32 flags; raw_spinlock_t buffer_lock; struct persistent_ram_buffer *buffer; size_t buffer_size; char *par_buffer; char *par_header; struct rs_control *rs_decoder; int corrected_bytes; int bad_blocks; struct persistent_ram_ecc_info ecc_info; char *old_log; size_t old_log_size; }; struct ramoops_platform_data { unsigned long mem_size; phys_addr_t mem_address; unsigned int mem_type; unsigned long record_size; unsigned long console_size; unsigned long ftrace_size; unsigned long pmsg_size; int max_reason; u32 flags; struct persistent_ram_ecc_info ecc_info; }; struct persistent_ram_buffer { uint32_t sig; atomic_t start; atomic_t size; uint8_t data[0]; }; struct rs_codec; struct rs_control { struct rs_codec *codec; uint16_t buffers[0]; }; struct rs_codec { int mm; int nn; uint16_t *alpha_to; uint16_t *index_of; uint16_t *genpoly; int nroots; int fcr; int prim; int iprim; int gfpoly; int (*gffunc)(int); int users; struct list_head list; }; typedef void (*btf_trace_erofs_lookup)(void *, struct inode *, struct dentry *, unsigned int); typedef void (*btf_trace_erofs_fill_inode)(void *, struct inode *); typedef void (*btf_trace_erofs_read_folio)(void *, struct folio *, bool); typedef void (*btf_trace_erofs_readpages)(void *, struct inode *, unsigned long, unsigned int, bool); struct erofs_map_blocks; typedef void (*btf_trace_erofs_map_blocks_enter)(void *, struct inode *, struct erofs_map_blocks *, unsigned int); enum erofs_kmap_type { EROFS_NO_KMAP = 0, EROFS_KMAP = 1, }; struct erofs_buf { struct inode *inode; struct page *page; void *base; enum erofs_kmap_type kmap_type; }; typedef u64 erofs_off_t; struct erofs_map_blocks { struct erofs_buf buf; erofs_off_t m_pa; erofs_off_t m_la; u64 m_plen; u64 m_llen; unsigned short m_deviceid; char m_algorithmformat; unsigned int m_flags; }; typedef void (*btf_trace_z_erofs_map_blocks_iter_enter)(void *, struct inode *, struct erofs_map_blocks *, unsigned int); typedef void (*btf_trace_erofs_map_blocks_exit)(void *, struct inode *, struct erofs_map_blocks *, unsigned int, int); typedef void (*btf_trace_z_erofs_map_blocks_iter_exit)(void *, struct inode *, struct erofs_map_blocks *, unsigned int, int); typedef void (*btf_trace_erofs_destroy_inode)(void *, struct inode *); enum { EROFS_ZIP_CACHE_DISABLED = 0, EROFS_ZIP_CACHE_READAHEAD = 1, EROFS_ZIP_CACHE_READAROUND = 2, }; enum { EROFS_SYNC_DECOMPRESS_AUTO = 0, EROFS_SYNC_DECOMPRESS_FORCE_ON = 1, EROFS_SYNC_DECOMPRESS_FORCE_OFF = 2, }; enum { Opt_user_xattr___3 = 0, Opt_acl___3 = 1, Opt_cache_strategy = 2, Opt_dax___2 = 3, Opt_dax_enum = 4, Opt_device = 5, Opt_fsid = 6, Opt_domain_id = 7, Opt_err___8 = 8, }; typedef u32 erofs_blk_t; typedef u64 erofs_nid_t; struct erofs_inode { erofs_nid_t nid; unsigned long flags; unsigned char datalayout; unsigned char inode_isize; unsigned int xattr_isize; unsigned int xattr_name_filter; unsigned int xattr_shared_count; unsigned int *xattr_shared_xattrs; union { erofs_blk_t raw_blkaddr; struct { unsigned short chunkformat; unsigned char chunkbits; }; struct { unsigned short z_advise; unsigned char z_algorithmtype[2]; unsigned char z_logical_clusterbits; unsigned long z_tailextent_headlcn; union { struct { erofs_off_t z_idataoff; unsigned short z_idata_size; }; erofs_off_t z_fragmentoff; }; }; }; struct inode vfs_inode; }; struct trace_event_raw_erofs_lookup { struct trace_entry ent; dev_t dev; erofs_nid_t nid; u32 __data_loc_name; unsigned int flags; char __data[0]; }; struct erofs_mount_opts { unsigned char cache_strategy; unsigned int sync_decompress; unsigned int max_sync_decompress_pages; unsigned int mount_opt; }; struct erofs_sb_lz4_info { u16 max_distance_pages; u16 max_pclusterblks; }; struct fscache_volume; struct erofs_dev_context; struct erofs_xattr_prefix_item; struct erofs_fscache; struct erofs_domain; struct erofs_sb_info { struct erofs_mount_opts opt; struct list_head list; struct mutex umount_mutex; struct xarray managed_pslots; unsigned int shrinker_run_no; u16 available_compr_algs; struct inode *managed_cache; struct erofs_sb_lz4_info lz4; struct inode *packed_inode; struct erofs_dev_context *devs; struct dax_device *dax_dev; u64 dax_part_off; u64 total_blocks; u32 primarydevice_blocks; u32 meta_blkaddr; u32 xattr_blkaddr; u32 xattr_prefix_start; u8 xattr_prefix_count; struct erofs_xattr_prefix_item *xattr_prefixes; unsigned int xattr_filter_reserved; u16 device_id_mask; unsigned char islotbits; unsigned char blkszbits; u32 sb_size; u32 build_time_nsec; u64 build_time; erofs_nid_t root_nid; erofs_nid_t packed_nid; u64 inos; u8 uuid[16]; u8 volume_name[16]; u32 feature_compat; u32 feature_incompat; struct kobject s_kobj; struct completion s_kobj_unregister; struct fscache_volume *volume; struct erofs_fscache *s_fscache; struct erofs_domain *domain; char *fsid; char *domain_id; }; struct erofs_dev_context { struct idr tree; struct rw_semaphore rwsem; unsigned int extra_devices; bool flatdev; }; struct erofs_xattr_long_prefix; struct erofs_xattr_prefix_item { struct erofs_xattr_long_prefix *prefix; u8 infix_len; }; struct erofs_xattr_long_prefix { __u8 base_index; char infix[0]; }; struct fscache_cookie; struct erofs_fscache { struct fscache_cookie *cookie; struct inode *inode; struct erofs_domain *domain; struct list_head node; refcount_t ref; char *name; }; struct erofs_domain { refcount_t ref; struct list_head list; struct fscache_volume *volume; char *domain_id; }; struct trace_event_raw_erofs_fill_inode { struct trace_entry ent; dev_t dev; erofs_nid_t nid; erofs_blk_t blkaddr; unsigned int ofs; char __data[0]; }; struct trace_event_raw_erofs_read_folio { struct trace_entry ent; dev_t dev; erofs_nid_t nid; int dir; unsigned long index; int uptodate; bool raw; char __data[0]; }; struct trace_event_raw_erofs_readpages { struct trace_entry ent; dev_t dev; erofs_nid_t nid; unsigned long start; unsigned int nrpage; bool raw; char __data[0]; }; struct trace_event_raw_erofs__map_blocks_enter { struct trace_entry ent; dev_t dev; erofs_nid_t nid; erofs_off_t la; u64 llen; unsigned int flags; char __data[0]; }; struct trace_event_raw_erofs__map_blocks_exit { struct trace_entry ent; dev_t dev; erofs_nid_t nid; unsigned int flags; erofs_off_t la; erofs_off_t pa; u64 llen; u64 plen; unsigned int mflags; int ret; char __data[0]; }; struct trace_event_raw_erofs_destroy_inode { struct trace_entry ent; dev_t dev; erofs_nid_t nid; char __data[0]; }; struct erofs_super_block { __le32 magic; __le32 checksum; __le32 feature_compat; __u8 blkszbits; __u8 sb_extslots; __le16 root_nid; __le64 inos; __le64 build_time; __le32 build_time_nsec; __le32 blocks; __le32 meta_blkaddr; __le32 xattr_blkaddr; __u8 uuid[16]; __u8 volume_name[16]; __le32 feature_incompat; union { __le16 available_compr_algs; __le16 lz4_max_distance; } u1; __le16 extra_devices; __le16 devt_slotoff; __u8 dirblkbits; __u8 xattr_prefix_count; __le32 xattr_prefix_start; __le64 packed_nid; __u8 xattr_filter_reserved; __u8 reserved2[23]; }; struct trace_event_data_offsets_erofs_lookup { u32 name; }; struct erofs_fs_context { struct erofs_mount_opts opt; struct erofs_dev_context *devs; char *fsid; char *domain_id; }; struct bdev_handle { struct block_device *bdev; void *holder; }; struct trace_event_data_offsets_erofs_fill_inode {}; struct trace_event_data_offsets_erofs_read_folio {}; struct trace_event_data_offsets_erofs_readpages {}; struct trace_event_data_offsets_erofs__map_blocks_enter {}; struct trace_event_data_offsets_erofs__map_blocks_exit {}; struct trace_event_data_offsets_erofs_destroy_inode {}; struct erofs_device_info { char *path; struct erofs_fscache *fscache; struct bdev_handle *bdev_handle; struct dax_device *dax_dev; u64 dax_part_off; u32 blocks; u32 mapped_blkaddr; }; struct erofs_deviceslot { u8 tag[64]; __le32 blocks; __le32 mapped_blkaddr; u8 reserved[56]; }; enum { EROFS_INODE_FLAT_PLAIN = 0, EROFS_INODE_COMPRESSED_FULL = 1, EROFS_INODE_FLAT_INLINE = 2, EROFS_INODE_COMPRESSED_COMPACT = 3, EROFS_INODE_CHUNK_BASED = 4, EROFS_INODE_DATALAYOUT_MAX = 5, }; struct erofs_inode_chunk_info { __le16 format; __le16 reserved; }; union erofs_inode_i_u { __le32 compressed_blocks; __le32 raw_blkaddr; __le32 rdev; struct erofs_inode_chunk_info c; }; struct erofs_inode_extended { __le16 i_format; __le16 i_xattr_icount; __le16 i_mode; __le16 i_reserved; __le64 i_size; union erofs_inode_i_u i_u; __le32 i_ino; __le32 i_uid; __le32 i_gid; __le64 i_mtime; __le32 i_mtime_nsec; __le32 i_nlink; __u8 i_reserved2[16]; }; struct erofs_inode_compact { __le16 i_format; __le16 i_xattr_icount; __le16 i_mode; __le16 i_nlink; __le32 i_size; __le32 i_reserved; union erofs_inode_i_u i_u; __le32 i_ino; __le16 i_uid; __le16 i_gid; __le32 i_reserved2; }; struct erofs_inode_chunk_index { __le16 advise; __le16 device_id; __le32 blkaddr; }; struct erofs_map_dev { struct erofs_fscache *m_fscache; struct block_device *m_bdev; struct dax_device *m_daxdev; u64 m_dax_part_off; erofs_off_t m_pa; unsigned int m_deviceid; }; struct erofs_dirent { __le64 nid; __le16 nameoff; __u8 file_type; __u8 reserved; } __attribute__((packed)); struct erofs_qstr { const unsigned char *name; const unsigned char *end; }; struct erofs_attr { struct attribute attr; short attr_id; int struct_type; int offset; }; enum { attr_feature___2 = 0, attr_pointer_ui___2 = 1, attr_pointer_bool = 2, }; enum { struct_erofs_sb_info = 0, struct_erofs_mount_opts = 1, }; struct erofs_xattr_entry { __u8 e_name_len; __u8 e_name_index; __le16 e_value_size; char e_name[0]; }; struct erofs_xattr_iter { struct super_block *sb; struct erofs_buf buf; erofs_off_t pos; void *kaddr; char *buffer; int buffer_size; int buffer_ofs; int index; int infix_len; struct qstr name; struct dentry *dentry; }; struct erofs_xattr_ibody_header { __le32 h_name_filter; __u8 h_shared_count; __u8 h_reserved2[7]; __le32 h_shared_xattrs[0]; }; struct z_erofs_decompress_req; struct z_erofs_decompressor { int (*config)(struct super_block *, struct erofs_super_block *, void *, int); int (*decompress)(struct z_erofs_decompress_req *, struct page **); char *name; }; struct z_erofs_decompress_req { struct super_block *sb; struct page **in; struct page **out; unsigned short pageofs_in; unsigned short pageofs_out; unsigned int inputsize; unsigned int outputsize; unsigned int alg; bool inplace_io; bool partial_decoding; bool fillgaps; gfp_t gfp; }; enum { Z_EROFS_COMPRESSION_LZ4 = 0, Z_EROFS_COMPRESSION_LZMA = 1, Z_EROFS_COMPRESSION_DEFLATE = 2, Z_EROFS_COMPRESSION_MAX = 3, }; enum { Z_EROFS_COMPRESSION_SHIFTED = 3, Z_EROFS_COMPRESSION_INTERLACED = 4, Z_EROFS_COMPRESSION_RUNTIME_MAX = 5, }; struct z_erofs_lz4_decompress_ctx { struct z_erofs_decompress_req *rq; unsigned int inpages; unsigned int outpages; unsigned int oend; }; struct z_erofs_lz4_cfgs { __le16 max_distance; __le16 max_pclusterblks; u8 reserved[10]; }; enum { Z_EROFS_LCLUSTER_TYPE_PLAIN = 0, Z_EROFS_LCLUSTER_TYPE_HEAD1 = 1, Z_EROFS_LCLUSTER_TYPE_NONHEAD = 2, Z_EROFS_LCLUSTER_TYPE_HEAD2 = 3, Z_EROFS_LCLUSTER_TYPE_MAX = 4, }; struct z_erofs_map_header { union { __le32 h_fragmentoff; struct { __le16 h_reserved1; __le16 h_idata_size; }; }; __le16 h_advise; __u8 h_algorithmtype; __u8 h_clusterbits; }; struct z_erofs_maprecorder { struct inode *inode; struct erofs_map_blocks *map; void *kaddr; unsigned long lcn; u8 type; u8 headtype; u16 clusterofs; u16 delta[2]; erofs_blk_t pblk; erofs_blk_t compressedblks; erofs_off_t nextpackoff; bool partialref; }; struct z_erofs_lcluster_index { __le16 di_advise; __le16 di_clusterofs; union { __le32 blkaddr; __le16 delta[2]; } di_u; }; struct z_erofs_pcluster_slab { struct kmem_cache *slab; unsigned int maxpages; char name[48]; }; enum z_erofs_pclustermode { Z_EROFS_PCLUSTER_INFLIGHT = 0, Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE = 1, Z_EROFS_PCLUSTER_FOLLOWED = 2, }; enum { JQ_BYPASS = 0, JQ_SUBMIT = 1, NR_JOBQUEUES = 2, }; struct z_erofs_bvec { struct page *page; int offset; unsigned int end; }; struct z_erofs_bvset_inline { struct page *nextpage; struct z_erofs_bvec bvec[2]; }; struct erofs_workgroup { unsigned long index; struct lockref lockref; }; typedef void *z_erofs_next_pcluster_t; struct z_erofs_pcluster { struct erofs_workgroup obj; struct mutex lock; z_erofs_next_pcluster_t next; unsigned int length; unsigned int vcnt; unsigned int pclustersize; unsigned short pageofs_out; unsigned short pageofs_in; union { struct z_erofs_bvset_inline bvset; struct callback_head rcu; }; unsigned char algorithmformat; bool partial; bool multibases; bool besteffort; struct z_erofs_bvec compressed_bvecs[0]; }; struct z_erofs_bvset { struct page *nextpage; struct z_erofs_bvec bvec[0]; }; struct z_erofs_decompressqueue { struct super_block *sb; atomic_t pending_bios; z_erofs_next_pcluster_t head; union { struct completion done; struct work_struct work; struct kthread_work kthread_work; } u; bool eio; bool sync; }; struct z_erofs_bvec_item { struct z_erofs_bvec bvec; struct list_head list; }; struct z_erofs_bvec_iter { struct page *bvpage; struct z_erofs_bvset *bvset; unsigned int nr; unsigned int cur; }; struct z_erofs_decompress_frontend { struct inode * const inode; struct erofs_map_blocks map; struct z_erofs_bvec_iter biter; struct page *pagepool; struct page *candidate_bvpage; struct z_erofs_pcluster *pcl; z_erofs_next_pcluster_t owned_head; enum z_erofs_pclustermode mode; erofs_off_t headoffset; unsigned int icur; }; struct z_erofs_decompress_backend { struct page *onstack_pages[32]; struct super_block *sb; struct z_erofs_pcluster *pcl; struct page **decompressed_pages; struct page **compressed_pages; struct list_head decompressed_secondary_bvecs; struct page **pagepool; unsigned int onstack_used; unsigned int nr_pages; }; struct z_erofs_gbuf { spinlock_t lock; void *ptr; struct page **pages; unsigned int nrpages; }; struct key_user { struct rb_node node; struct mutex cons_lock; spinlock_t lock; refcount_t usage; atomic_t nkeys; atomic_t nikeys; kuid_t uid; int qnkeys; int qnbytes; }; enum key_notification_subtype { NOTIFY_KEY_INSTANTIATED = 0, NOTIFY_KEY_UPDATED = 1, NOTIFY_KEY_LINKED = 2, NOTIFY_KEY_UNLINKED = 3, NOTIFY_KEY_CLEARED = 4, NOTIFY_KEY_REVOKED = 5, NOTIFY_KEY_INVALIDATED = 6, NOTIFY_KEY_SETATTR = 7, }; struct assoc_array_ops { unsigned long (*get_key_chunk)(const void *, int); unsigned long (*get_object_key_chunk)(const void *, int); bool (*compare_object)(const void *, const void *); int (*diff_objects)(const void *, const void *); void (*free_object)(void *); }; struct assoc_array_shortcut { struct assoc_array_ptr *back_pointer; int parent_slot; int skip_to_level; struct assoc_array_ptr *next_node; unsigned long index_key[0]; }; struct assoc_array_node { struct assoc_array_ptr *back_pointer; u8 parent_slot; struct assoc_array_ptr *slots[16]; unsigned long nr_leaves_on_branch; }; struct assoc_array_edit { struct callback_head rcu; struct assoc_array *array; const struct assoc_array_ops *ops; const struct assoc_array_ops *ops_for_excised_subtree; struct assoc_array_ptr *leaf; struct assoc_array_ptr **leaf_p; struct assoc_array_ptr *dead_leaf; struct assoc_array_ptr *new_meta[3]; struct assoc_array_ptr *excised_meta[1]; struct assoc_array_ptr *excised_subtree; struct assoc_array_ptr **set_backpointers[16]; struct assoc_array_ptr *set_backpointers_to; struct assoc_array_node *adjust_count_on; long adjust_count_by; struct { struct assoc_array_ptr **ptr; struct assoc_array_ptr *to; } set[2]; struct { u8 *p; u8 to; } set_parent_slot[1]; u8 segment_cache[17]; }; struct keyring_search_context { struct keyring_index_key index_key; const struct cred *cred; struct key_match_data match_data; unsigned int flags; int (*iterator)(const void *, void *); int skipped_ret; bool possessed; key_ref_t result; time64_t now; }; struct keyring_read_iterator_context { size_t buflen; size_t count; key_serial_t *buffer; }; struct keyctl_dh_params { union { __s32 private; __s32 priv; }; __s32 prime; __s32 base; }; struct keyctl_kdf_params { char __attribute__((btf_type_tag("user"))) *hashname; char __attribute__((btf_type_tag("user"))) *otherinfo; __u32 otherinfolen; __u32 __spare[8]; }; struct keyctl_pkey_query { __u32 supported_ops; __u32 key_size; __u16 max_data_size; __u16 max_sig_size; __u16 max_enc_size; __u16 max_dec_size; __u32 __spare[10]; }; struct keyctl_pkey_params { __s32 key_id; __u32 in_len; union { __u32 out_len; __u32 in2_len; }; __u32 __spare[7]; }; struct request_key_auth { struct callback_head rcu; struct key *target_key; struct key *dest_keyring; const struct cred *cred; void *callout_info; size_t callout_len; pid_t pid; char op[8]; }; enum { Opt_err___9 = 0, Opt_enc = 1, Opt_hash = 2, }; enum lsm_order { LSM_ORDER_FIRST = -1, LSM_ORDER_MUTABLE = 0, LSM_ORDER_LAST = 1, }; struct lsm_blob_sizes; struct lsm_info { const char *name; enum lsm_order order; unsigned long flags; int *enabled; int (*init)(); struct lsm_blob_sizes *blobs; }; struct lsm_blob_sizes { int lbs_cred; int lbs_file; int lbs_inode; int lbs_superblock; int lbs_ipc; int lbs_msg_msg; int lbs_task; int lbs_xattr_count; }; struct msg_msg; struct sctp_association; union security_list_options { int (*binder_set_context_mgr)(const struct cred *); int (*binder_transaction)(const struct cred *, const struct cred *); int (*binder_transfer_binder)(const struct cred *, const struct cred *); int (*binder_transfer_file)(const struct cred *, const struct cred *, const struct file *); int (*ptrace_access_check)(struct task_struct *, unsigned int); int (*ptrace_traceme)(struct task_struct *); int (*capget)(const struct task_struct *, kernel_cap_t *, kernel_cap_t *, kernel_cap_t *); int (*capset)(struct cred *, const struct cred *, const kernel_cap_t *, const kernel_cap_t *, const kernel_cap_t *); int (*capable)(const struct cred *, struct user_namespace *, int, unsigned int); int (*quotactl)(int, int, int, struct super_block *); int (*quota_on)(struct dentry *); int (*syslog)(int); int (*settime)(const struct timespec64 *, const struct timezone *); int (*vm_enough_memory)(struct mm_struct *, long); int (*bprm_creds_for_exec)(struct linux_binprm *); int (*bprm_creds_from_file)(struct linux_binprm *, struct file *); int (*bprm_check_security)(struct linux_binprm *); void (*bprm_committing_creds)(struct linux_binprm *); void (*bprm_committed_creds)(struct linux_binprm *); int (*fs_context_submount)(struct fs_context *, struct super_block *); int (*fs_context_dup)(struct fs_context *, struct fs_context *); int (*fs_context_parse_param)(struct fs_context *, struct fs_parameter *); int (*sb_alloc_security)(struct super_block *); void (*sb_delete)(struct super_block *); void (*sb_free_security)(struct super_block *); void (*sb_free_mnt_opts)(void *); int (*sb_eat_lsm_opts)(char *, void **); int (*sb_mnt_opts_compat)(struct super_block *, void *); int (*sb_remount)(struct super_block *, void *); int (*sb_kern_mount)(struct super_block *); int (*sb_show_options)(struct seq_file *, struct super_block *); int (*sb_statfs)(struct dentry *); int (*sb_mount)(const char *, const struct path *, const char *, unsigned long, void *); int (*sb_umount)(struct vfsmount *, int); int (*sb_pivotroot)(const struct path *, const struct path *); int (*sb_set_mnt_opts)(struct super_block *, void *, unsigned long, unsigned long *); int (*sb_clone_mnt_opts)(const struct super_block *, struct super_block *, unsigned long, unsigned long *); int (*move_mount)(const struct path *, const struct path *); int (*dentry_init_security)(struct dentry *, int, const struct qstr *, const char **, void **, u32 *); int (*dentry_create_files_as)(struct dentry *, int, struct qstr *, const struct cred *, struct cred *); int (*path_notify)(const struct path *, u64, unsigned int); int (*inode_alloc_security)(struct inode *); void (*inode_free_security)(struct inode *); int (*inode_init_security)(struct inode *, struct inode *, const struct qstr *, struct xattr *, int *); int (*inode_init_security_anon)(struct inode *, const struct qstr *, const struct inode *); int (*inode_create)(struct inode *, struct dentry *, umode_t); int (*inode_link)(struct dentry *, struct inode *, struct dentry *); int (*inode_unlink)(struct inode *, struct dentry *); int (*inode_symlink)(struct inode *, struct dentry *, const char *); int (*inode_mkdir)(struct inode *, struct dentry *, umode_t); int (*inode_rmdir)(struct inode *, struct dentry *); int (*inode_mknod)(struct inode *, struct dentry *, umode_t, dev_t); int (*inode_rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*inode_readlink)(struct dentry *); int (*inode_follow_link)(struct dentry *, struct inode *, bool); int (*inode_permission)(struct inode *, int); int (*inode_setattr)(struct dentry *, struct iattr *); int (*inode_getattr)(const struct path *); int (*inode_setxattr)(struct mnt_idmap *, struct dentry *, const char *, const void *, size_t, int); void (*inode_post_setxattr)(struct dentry *, const char *, const void *, size_t, int); int (*inode_getxattr)(struct dentry *, const char *); int (*inode_listxattr)(struct dentry *); int (*inode_removexattr)(struct mnt_idmap *, struct dentry *, const char *); int (*inode_set_acl)(struct mnt_idmap *, struct dentry *, const char *, struct posix_acl *); int (*inode_get_acl)(struct mnt_idmap *, struct dentry *, const char *); int (*inode_remove_acl)(struct mnt_idmap *, struct dentry *, const char *); int (*inode_need_killpriv)(struct dentry *); int (*inode_killpriv)(struct mnt_idmap *, struct dentry *); int (*inode_getsecurity)(struct mnt_idmap *, struct inode *, const char *, void **, bool); int (*inode_setsecurity)(struct inode *, const char *, const void *, size_t, int); int (*inode_listsecurity)(struct inode *, char *, size_t); void (*inode_getsecid)(struct inode *, u32 *); int (*inode_copy_up)(struct dentry *, struct cred **); int (*inode_copy_up_xattr)(const char *); int (*kernfs_init_security)(struct kernfs_node *, struct kernfs_node *); int (*file_permission)(struct file *, int); int (*file_alloc_security)(struct file *); void (*file_free_security)(struct file *); int (*file_ioctl)(struct file *, unsigned int, unsigned long); int (*file_ioctl_compat)(struct file *, unsigned int, unsigned long); int (*mmap_addr)(unsigned long); int (*mmap_file)(struct file *, unsigned long, unsigned long, unsigned long); int (*file_mprotect)(struct vm_area_struct *, unsigned long, unsigned long); int (*file_lock)(struct file *, unsigned int); int (*file_fcntl)(struct file *, unsigned int, unsigned long); void (*file_set_fowner)(struct file *); int (*file_send_sigiotask)(struct task_struct *, struct fown_struct *, int); int (*file_receive)(struct file *); int (*file_open)(struct file *); int (*file_truncate)(struct file *); int (*task_alloc)(struct task_struct *, unsigned long); void (*task_free)(struct task_struct *); int (*cred_alloc_blank)(struct cred *, gfp_t); void (*cred_free)(struct cred *); int (*cred_prepare)(struct cred *, const struct cred *, gfp_t); void (*cred_transfer)(struct cred *, const struct cred *); void (*cred_getsecid)(const struct cred *, u32 *); int (*kernel_act_as)(struct cred *, u32); int (*kernel_create_files_as)(struct cred *, struct inode *); int (*kernel_module_request)(char *); int (*kernel_load_data)(enum kernel_load_data_id, bool); int (*kernel_post_load_data)(char *, loff_t, enum kernel_load_data_id, char *); int (*kernel_read_file)(struct file *, enum kernel_read_file_id, bool); int (*kernel_post_read_file)(struct file *, char *, loff_t, enum kernel_read_file_id); int (*task_fix_setuid)(struct cred *, const struct cred *, int); int (*task_fix_setgid)(struct cred *, const struct cred *, int); int (*task_fix_setgroups)(struct cred *, const struct cred *); int (*task_setpgid)(struct task_struct *, pid_t); int (*task_getpgid)(struct task_struct *); int (*task_getsid)(struct task_struct *); void (*current_getsecid_subj)(u32 *); void (*task_getsecid_obj)(struct task_struct *, u32 *); int (*task_setnice)(struct task_struct *, int); int (*task_setioprio)(struct task_struct *, int); int (*task_getioprio)(struct task_struct *); int (*task_prlimit)(const struct cred *, const struct cred *, unsigned int); int (*task_setrlimit)(struct task_struct *, unsigned int, struct rlimit *); int (*task_setscheduler)(struct task_struct *); int (*task_getscheduler)(struct task_struct *); int (*task_movememory)(struct task_struct *); int (*task_kill)(struct task_struct *, struct kernel_siginfo *, int, const struct cred *); int (*task_prctl)(int, unsigned long, unsigned long, unsigned long, unsigned long); void (*task_to_inode)(struct task_struct *, struct inode *); int (*userns_create)(const struct cred *); int (*ipc_permission)(struct kern_ipc_perm *, short); void (*ipc_getsecid)(struct kern_ipc_perm *, u32 *); int (*msg_msg_alloc_security)(struct msg_msg *); void (*msg_msg_free_security)(struct msg_msg *); int (*msg_queue_alloc_security)(struct kern_ipc_perm *); void (*msg_queue_free_security)(struct kern_ipc_perm *); int (*msg_queue_associate)(struct kern_ipc_perm *, int); int (*msg_queue_msgctl)(struct kern_ipc_perm *, int); int (*msg_queue_msgsnd)(struct kern_ipc_perm *, struct msg_msg *, int); int (*msg_queue_msgrcv)(struct kern_ipc_perm *, struct msg_msg *, struct task_struct *, long, int); int (*shm_alloc_security)(struct kern_ipc_perm *); void (*shm_free_security)(struct kern_ipc_perm *); int (*shm_associate)(struct kern_ipc_perm *, int); int (*shm_shmctl)(struct kern_ipc_perm *, int); int (*shm_shmat)(struct kern_ipc_perm *, char __attribute__((btf_type_tag("user"))) *, int); int (*sem_alloc_security)(struct kern_ipc_perm *); void (*sem_free_security)(struct kern_ipc_perm *); int (*sem_associate)(struct kern_ipc_perm *, int); int (*sem_semctl)(struct kern_ipc_perm *, int); int (*sem_semop)(struct kern_ipc_perm *, struct sembuf *, unsigned int, int); int (*netlink_send)(struct sock *, struct sk_buff *); void (*d_instantiate)(struct dentry *, struct inode *); int (*getprocattr)(struct task_struct *, const char *, char **); int (*setprocattr)(const char *, void *, size_t); int (*ismaclabel)(const char *); int (*secid_to_secctx)(u32, char **, u32 *); int (*secctx_to_secid)(const char *, u32, u32 *); void (*release_secctx)(char *, u32); void (*inode_invalidate_secctx)(struct inode *); int (*inode_notifysecctx)(struct inode *, void *, u32); int (*inode_setsecctx)(struct dentry *, void *, u32); int (*inode_getsecctx)(struct inode *, void **, u32 *); int (*unix_stream_connect)(struct sock *, struct sock *, struct sock *); int (*unix_may_send)(struct socket *, struct socket *); int (*socket_create)(int, int, int, int); int (*socket_post_create)(struct socket *, int, int, int, int); int (*socket_socketpair)(struct socket *, struct socket *); int (*socket_bind)(struct socket *, struct sockaddr *, int); int (*socket_connect)(struct socket *, struct sockaddr *, int); int (*socket_listen)(struct socket *, int); int (*socket_accept)(struct socket *, struct socket *); int (*socket_sendmsg)(struct socket *, struct msghdr *, int); int (*socket_recvmsg)(struct socket *, struct msghdr *, int, int); int (*socket_getsockname)(struct socket *); int (*socket_getpeername)(struct socket *); int (*socket_getsockopt)(struct socket *, int, int); int (*socket_setsockopt)(struct socket *, int, int); int (*socket_shutdown)(struct socket *, int); int (*socket_sock_rcv_skb)(struct sock *, struct sk_buff *); int (*socket_getpeersec_stream)(struct socket *, sockptr_t, sockptr_t, unsigned int); int (*socket_getpeersec_dgram)(struct socket *, struct sk_buff *, u32 *); int (*sk_alloc_security)(struct sock *, int, gfp_t); void (*sk_free_security)(struct sock *); void (*sk_clone_security)(const struct sock *, struct sock *); void (*sk_getsecid)(const struct sock *, u32 *); void (*sock_graft)(struct sock *, struct socket *); int (*inet_conn_request)(const struct sock *, struct sk_buff *, struct request_sock *); void (*inet_csk_clone)(struct sock *, const struct request_sock *); void (*inet_conn_established)(struct sock *, struct sk_buff *); int (*secmark_relabel_packet)(u32); void (*secmark_refcount_inc)(); void (*secmark_refcount_dec)(); void (*req_classify_flow)(const struct request_sock *, struct flowi_common *); int (*tun_dev_alloc_security)(void **); void (*tun_dev_free_security)(void *); int (*tun_dev_create)(); int (*tun_dev_attach_queue)(void *); int (*tun_dev_attach)(struct sock *, void *); int (*tun_dev_open)(void *); int (*sctp_assoc_request)(struct sctp_association *, struct sk_buff *); int (*sctp_bind_connect)(struct sock *, int, struct sockaddr *, int); void (*sctp_sk_clone)(struct sctp_association *, struct sock *, struct sock *); int (*sctp_assoc_established)(struct sctp_association *, struct sk_buff *); int (*mptcp_add_subflow)(struct sock *, struct sock *); int (*key_alloc)(struct key *, const struct cred *, unsigned long); void (*key_free)(struct key *); int (*key_permission)(key_ref_t, const struct cred *, enum key_need_perm); int (*key_getsecurity)(struct key *, char **); int (*audit_rule_init)(u32, u32, char *, void **); int (*audit_rule_known)(struct audit_krule *); int (*audit_rule_match)(u32, u32, u32, void *); void (*audit_rule_free)(void *); int (*bpf)(int, union bpf_attr *, unsigned int); int (*bpf_map)(struct bpf_map *, fmode_t); int (*bpf_prog)(struct bpf_prog *); int (*bpf_map_alloc_security)(struct bpf_map *); void (*bpf_map_free_security)(struct bpf_map *); int (*bpf_prog_alloc_security)(struct bpf_prog_aux *); void (*bpf_prog_free_security)(struct bpf_prog_aux *); int (*locked_down)(enum lockdown_reason); int (*perf_event_open)(struct perf_event_attr *, int); int (*perf_event_alloc)(struct perf_event *); void (*perf_event_free)(struct perf_event *); int (*perf_event_read)(struct perf_event *); int (*perf_event_write)(struct perf_event *); int (*uring_override_creds)(const struct cred *); int (*uring_sqpoll)(); int (*uring_cmd)(struct io_uring_cmd *); }; struct security_hook_list { struct hlist_node list; struct hlist_head *head; union security_list_options hook; const char *lsm; }; struct vfs_cap_data { __le32 magic_etc; struct { __le32 permitted; __le32 inheritable; } data[2]; }; struct vfs_ns_cap_data { __le32 magic_etc; struct { __le32 permitted; __le32 inheritable; } data[2]; __le32 rootid; }; struct security_hook_heads { struct hlist_head binder_set_context_mgr; struct hlist_head binder_transaction; struct hlist_head binder_transfer_binder; struct hlist_head binder_transfer_file; struct hlist_head ptrace_access_check; struct hlist_head ptrace_traceme; struct hlist_head capget; struct hlist_head capset; struct hlist_head capable; struct hlist_head quotactl; struct hlist_head quota_on; struct hlist_head syslog; struct hlist_head settime; struct hlist_head vm_enough_memory; struct hlist_head bprm_creds_for_exec; struct hlist_head bprm_creds_from_file; struct hlist_head bprm_check_security; struct hlist_head bprm_committing_creds; struct hlist_head bprm_committed_creds; struct hlist_head fs_context_submount; struct hlist_head fs_context_dup; struct hlist_head fs_context_parse_param; struct hlist_head sb_alloc_security; struct hlist_head sb_delete; struct hlist_head sb_free_security; struct hlist_head sb_free_mnt_opts; struct hlist_head sb_eat_lsm_opts; struct hlist_head sb_mnt_opts_compat; struct hlist_head sb_remount; struct hlist_head sb_kern_mount; struct hlist_head sb_show_options; struct hlist_head sb_statfs; struct hlist_head sb_mount; struct hlist_head sb_umount; struct hlist_head sb_pivotroot; struct hlist_head sb_set_mnt_opts; struct hlist_head sb_clone_mnt_opts; struct hlist_head move_mount; struct hlist_head dentry_init_security; struct hlist_head dentry_create_files_as; struct hlist_head path_notify; struct hlist_head inode_alloc_security; struct hlist_head inode_free_security; struct hlist_head inode_init_security; struct hlist_head inode_init_security_anon; struct hlist_head inode_create; struct hlist_head inode_link; struct hlist_head inode_unlink; struct hlist_head inode_symlink; struct hlist_head inode_mkdir; struct hlist_head inode_rmdir; struct hlist_head inode_mknod; struct hlist_head inode_rename; struct hlist_head inode_readlink; struct hlist_head inode_follow_link; struct hlist_head inode_permission; struct hlist_head inode_setattr; struct hlist_head inode_getattr; struct hlist_head inode_setxattr; struct hlist_head inode_post_setxattr; struct hlist_head inode_getxattr; struct hlist_head inode_listxattr; struct hlist_head inode_removexattr; struct hlist_head inode_set_acl; struct hlist_head inode_get_acl; struct hlist_head inode_remove_acl; struct hlist_head inode_need_killpriv; struct hlist_head inode_killpriv; struct hlist_head inode_getsecurity; struct hlist_head inode_setsecurity; struct hlist_head inode_listsecurity; struct hlist_head inode_getsecid; struct hlist_head inode_copy_up; struct hlist_head inode_copy_up_xattr; struct hlist_head kernfs_init_security; struct hlist_head file_permission; struct hlist_head file_alloc_security; struct hlist_head file_free_security; struct hlist_head file_ioctl; struct hlist_head file_ioctl_compat; struct hlist_head mmap_addr; struct hlist_head mmap_file; struct hlist_head file_mprotect; struct hlist_head file_lock; struct hlist_head file_fcntl; struct hlist_head file_set_fowner; struct hlist_head file_send_sigiotask; struct hlist_head file_receive; struct hlist_head file_open; struct hlist_head file_truncate; struct hlist_head task_alloc; struct hlist_head task_free; struct hlist_head cred_alloc_blank; struct hlist_head cred_free; struct hlist_head cred_prepare; struct hlist_head cred_transfer; struct hlist_head cred_getsecid; struct hlist_head kernel_act_as; struct hlist_head kernel_create_files_as; struct hlist_head kernel_module_request; struct hlist_head kernel_load_data; struct hlist_head kernel_post_load_data; struct hlist_head kernel_read_file; struct hlist_head kernel_post_read_file; struct hlist_head task_fix_setuid; struct hlist_head task_fix_setgid; struct hlist_head task_fix_setgroups; struct hlist_head task_setpgid; struct hlist_head task_getpgid; struct hlist_head task_getsid; struct hlist_head current_getsecid_subj; struct hlist_head task_getsecid_obj; struct hlist_head task_setnice; struct hlist_head task_setioprio; struct hlist_head task_getioprio; struct hlist_head task_prlimit; struct hlist_head task_setrlimit; struct hlist_head task_setscheduler; struct hlist_head task_getscheduler; struct hlist_head task_movememory; struct hlist_head task_kill; struct hlist_head task_prctl; struct hlist_head task_to_inode; struct hlist_head userns_create; struct hlist_head ipc_permission; struct hlist_head ipc_getsecid; struct hlist_head msg_msg_alloc_security; struct hlist_head msg_msg_free_security; struct hlist_head msg_queue_alloc_security; struct hlist_head msg_queue_free_security; struct hlist_head msg_queue_associate; struct hlist_head msg_queue_msgctl; struct hlist_head msg_queue_msgsnd; struct hlist_head msg_queue_msgrcv; struct hlist_head shm_alloc_security; struct hlist_head shm_free_security; struct hlist_head shm_associate; struct hlist_head shm_shmctl; struct hlist_head shm_shmat; struct hlist_head sem_alloc_security; struct hlist_head sem_free_security; struct hlist_head sem_associate; struct hlist_head sem_semctl; struct hlist_head sem_semop; struct hlist_head netlink_send; struct hlist_head d_instantiate; struct hlist_head getprocattr; struct hlist_head setprocattr; struct hlist_head ismaclabel; struct hlist_head secid_to_secctx; struct hlist_head secctx_to_secid; struct hlist_head release_secctx; struct hlist_head inode_invalidate_secctx; struct hlist_head inode_notifysecctx; struct hlist_head inode_setsecctx; struct hlist_head inode_getsecctx; struct hlist_head unix_stream_connect; struct hlist_head unix_may_send; struct hlist_head socket_create; struct hlist_head socket_post_create; struct hlist_head socket_socketpair; struct hlist_head socket_bind; struct hlist_head socket_connect; struct hlist_head socket_listen; struct hlist_head socket_accept; struct hlist_head socket_sendmsg; struct hlist_head socket_recvmsg; struct hlist_head socket_getsockname; struct hlist_head socket_getpeername; struct hlist_head socket_getsockopt; struct hlist_head socket_setsockopt; struct hlist_head socket_shutdown; struct hlist_head socket_sock_rcv_skb; struct hlist_head socket_getpeersec_stream; struct hlist_head socket_getpeersec_dgram; struct hlist_head sk_alloc_security; struct hlist_head sk_free_security; struct hlist_head sk_clone_security; struct hlist_head sk_getsecid; struct hlist_head sock_graft; struct hlist_head inet_conn_request; struct hlist_head inet_csk_clone; struct hlist_head inet_conn_established; struct hlist_head secmark_relabel_packet; struct hlist_head secmark_refcount_inc; struct hlist_head secmark_refcount_dec; struct hlist_head req_classify_flow; struct hlist_head tun_dev_alloc_security; struct hlist_head tun_dev_free_security; struct hlist_head tun_dev_create; struct hlist_head tun_dev_attach_queue; struct hlist_head tun_dev_attach; struct hlist_head tun_dev_open; struct hlist_head sctp_assoc_request; struct hlist_head sctp_bind_connect; struct hlist_head sctp_sk_clone; struct hlist_head sctp_assoc_established; struct hlist_head mptcp_add_subflow; struct hlist_head key_alloc; struct hlist_head key_free; struct hlist_head key_permission; struct hlist_head key_getsecurity; struct hlist_head audit_rule_init; struct hlist_head audit_rule_known; struct hlist_head audit_rule_match; struct hlist_head audit_rule_free; struct hlist_head bpf; struct hlist_head bpf_map; struct hlist_head bpf_prog; struct hlist_head bpf_map_alloc_security; struct hlist_head bpf_map_free_security; struct hlist_head bpf_prog_alloc_security; struct hlist_head bpf_prog_free_security; struct hlist_head locked_down; struct hlist_head perf_event_open; struct hlist_head perf_event_alloc; struct hlist_head perf_event_free; struct hlist_head perf_event_read; struct hlist_head perf_event_write; struct hlist_head uring_override_creds; struct hlist_head uring_sqpoll; struct hlist_head uring_cmd; }; enum lsm_event { LSM_POLICY_CHANGE = 0, }; struct msg_msgseg; struct msg_msg { struct list_head m_list; long m_type; size_t m_ts; struct msg_msgseg *next; void *security; }; struct security_class_mapping { const char *name; const char *perms[33]; }; struct ethtool_drvinfo { __u32 cmd; char driver[32]; char version[32]; char fw_version[32]; char bus_info[32]; char erom_version[32]; char reserved2[12]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; }; struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0]; }; struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6]; }; enum ethtool_link_ext_substate_autoneg { ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED = 2, ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED = 3, ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE = 4, ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE = 5, ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD = 6, }; enum ethtool_link_ext_substate_link_training { ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT = 2, ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY = 3, ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT = 4, }; enum ethtool_link_ext_substate_link_logical_mismatch { ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK = 2, ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS = 3, ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED = 4, ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED = 5, }; enum ethtool_link_ext_substate_bad_signal_integrity { ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE = 2, ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST = 3, ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS = 4, }; enum ethtool_link_ext_substate_cable_issue { ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE = 2, }; enum ethtool_link_ext_substate_module { ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1, }; enum ethtool_link_ext_state { ETHTOOL_LINK_EXT_STATE_AUTONEG = 0, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE = 1, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH = 2, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY = 3, ETHTOOL_LINK_EXT_STATE_NO_CABLE = 4, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE = 5, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE = 6, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE = 7, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED = 8, ETHTOOL_LINK_EXT_STATE_OVERHEAT = 9, ETHTOOL_LINK_EXT_STATE_MODULE = 10, }; struct ethtool_link_ext_state_info { enum ethtool_link_ext_state link_ext_state; union { enum ethtool_link_ext_substate_autoneg autoneg; enum ethtool_link_ext_substate_link_training link_training; enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; enum ethtool_link_ext_substate_cable_issue cable_issue; enum ethtool_link_ext_substate_module module; u32 __link_ext_substate; }; }; struct ethtool_link_ext_stats { u64 link_down_events; }; struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0]; }; struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; }; struct kernel_ethtool_coalesce { u8 use_cqe_mode_tx; u8 use_cqe_mode_rx; u32 tx_aggr_max_bytes; u32 tx_aggr_max_frames; u32 tx_aggr_time_usecs; }; struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; }; struct kernel_ethtool_ringparam { u32 rx_buf_len; u8 tcp_data_split; u8 tx_push; u8 rx_push; u32 cqe_size; u32 tx_push_buf_len; u32 tx_push_buf_max_len; }; enum ethtool_mac_stats_src { ETHTOOL_MAC_STATS_SRC_AGGREGATE = 0, ETHTOOL_MAC_STATS_SRC_EMAC = 1, ETHTOOL_MAC_STATS_SRC_PMAC = 2, }; struct ethtool_pause_stats { enum ethtool_mac_stats_src src; union { struct { u64 tx_pause_frames; u64 rx_pause_frames; }; struct { u64 tx_pause_frames; u64 rx_pause_frames; } stats; }; }; struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; }; struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0]; }; struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0]; }; struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; }; struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; }; struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; }; struct ethtool_tcpip6_spec { __be32 ip6src[4]; __be32 ip6dst[4]; __be16 psrc; __be16 pdst; __u8 tclass; }; struct ethtool_ah_espip6_spec { __be32 ip6src[4]; __be32 ip6dst[4]; __be32 spi; __u8 tclass; }; struct ethtool_usrip6_spec { __be32 ip6src[4]; __be32 ip6dst[4]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; }; struct ethhdr { unsigned char h_dest[6]; unsigned char h_source[6]; __be16 h_proto; }; union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52]; }; struct ethtool_flow_ext { __u8 padding[2]; unsigned char h_dest[6]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2]; }; struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; }; struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; union { __u32 rule_cnt; __u32 rss_context; }; __u32 rule_locs[0]; }; struct ethtool_flash { __u32 cmd; __u32 region; char data[128]; }; struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; }; struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0]; }; struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3]; __u32 rx_filters; __u32 rx_reserved[3]; }; struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8]; }; struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2]; }; struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0]; }; struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u8 transceiver; __u8 master_slave_cfg; __u8 master_slave_state; __u8 rate_matching; __u32 reserved[7]; __u32 link_mode_masks[0]; }; struct ethtool_link_ksettings { struct ethtool_link_settings base; struct { unsigned long supported[2]; unsigned long advertising[2]; unsigned long lp_advertising[2]; } link_modes; u32 lanes; }; struct ethtool_fec_stat { u64 total; u64 lanes[8]; }; struct ethtool_fec_stats { struct ethtool_fec_stat corrected_blocks; struct ethtool_fec_stat uncorrectable_blocks; struct ethtool_fec_stat corrected_bits; }; struct ethtool_fecparam { __u32 cmd; __u32 active_fec; __u32 fec; __u32 reserved; }; struct ethtool_module_eeprom { u32 offset; u32 length; u8 page; u8 bank; u8 i2c_address; u8 *data; }; struct ethtool_eth_phy_stats { enum ethtool_mac_stats_src src; union { struct { u64 SymbolErrorDuringCarrier; }; struct { u64 SymbolErrorDuringCarrier; } stats; }; }; struct ethtool_eth_mac_stats { enum ethtool_mac_stats_src src; union { struct { u64 FramesTransmittedOK; u64 SingleCollisionFrames; u64 MultipleCollisionFrames; u64 FramesReceivedOK; u64 FrameCheckSequenceErrors; u64 AlignmentErrors; u64 OctetsTransmittedOK; u64 FramesWithDeferredXmissions; u64 LateCollisions; u64 FramesAbortedDueToXSColls; u64 FramesLostDueToIntMACXmitError; u64 CarrierSenseErrors; u64 OctetsReceivedOK; u64 FramesLostDueToIntMACRcvError; u64 MulticastFramesXmittedOK; u64 BroadcastFramesXmittedOK; u64 FramesWithExcessiveDeferral; u64 MulticastFramesReceivedOK; u64 BroadcastFramesReceivedOK; u64 InRangeLengthErrors; u64 OutOfRangeLengthField; u64 FrameTooLongErrors; }; struct { u64 FramesTransmittedOK; u64 SingleCollisionFrames; u64 MultipleCollisionFrames; u64 FramesReceivedOK; u64 FrameCheckSequenceErrors; u64 AlignmentErrors; u64 OctetsTransmittedOK; u64 FramesWithDeferredXmissions; u64 LateCollisions; u64 FramesAbortedDueToXSColls; u64 FramesLostDueToIntMACXmitError; u64 CarrierSenseErrors; u64 OctetsReceivedOK; u64 FramesLostDueToIntMACRcvError; u64 MulticastFramesXmittedOK; u64 BroadcastFramesXmittedOK; u64 FramesWithExcessiveDeferral; u64 MulticastFramesReceivedOK; u64 BroadcastFramesReceivedOK; u64 InRangeLengthErrors; u64 OutOfRangeLengthField; u64 FrameTooLongErrors; } stats; }; }; struct ethtool_eth_ctrl_stats { enum ethtool_mac_stats_src src; union { struct { u64 MACControlFramesTransmitted; u64 MACControlFramesReceived; u64 UnsupportedOpcodesReceived; }; struct { u64 MACControlFramesTransmitted; u64 MACControlFramesReceived; u64 UnsupportedOpcodesReceived; } stats; }; }; struct ethtool_rmon_stats { enum ethtool_mac_stats_src src; union { struct { u64 undersize_pkts; u64 oversize_pkts; u64 fragments; u64 jabbers; u64 hist[10]; u64 hist_tx[10]; }; struct { u64 undersize_pkts; u64 oversize_pkts; u64 fragments; u64 jabbers; u64 hist[10]; u64 hist_tx[10]; } stats; }; }; struct ethtool_rmon_hist_range { u16 low; u16 high; }; enum ethtool_module_power_mode_policy { ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1, ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO = 2, }; enum ethtool_module_power_mode { ETHTOOL_MODULE_POWER_MODE_LOW = 1, ETHTOOL_MODULE_POWER_MODE_HIGH = 2, }; struct ethtool_module_power_mode_params { enum ethtool_module_power_mode_policy policy; enum ethtool_module_power_mode mode; }; enum ethtool_mm_verify_status { ETHTOOL_MM_VERIFY_STATUS_UNKNOWN = 0, ETHTOOL_MM_VERIFY_STATUS_INITIAL = 1, ETHTOOL_MM_VERIFY_STATUS_VERIFYING = 2, ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED = 3, ETHTOOL_MM_VERIFY_STATUS_FAILED = 4, ETHTOOL_MM_VERIFY_STATUS_DISABLED = 5, }; struct ethtool_mm_state { u32 verify_time; u32 max_verify_time; enum ethtool_mm_verify_status verify_status; bool tx_enabled; bool tx_active; bool pmac_enabled; bool verify_enabled; u32 tx_min_frag_size; u32 rx_min_frag_size; }; struct ethtool_mm_cfg { u32 verify_time; bool verify_enabled; bool tx_enabled; bool pmac_enabled; u32 tx_min_frag_size; }; struct ethtool_mm_stats { u64 MACMergeFrameAssErrorCount; u64 MACMergeFrameSmdErrorCount; u64 MACMergeFrameAssOkCount; u64 MACMergeFragCountRx; u64 MACMergeFragCountTx; u64 MACMergeHoldCount; }; struct selinux_audit_data; typedef void (*btf_trace_selinux_audited)(void *, struct selinux_audit_data *, char *, char *, const char *); struct selinux_audit_data { u32 ssid; u32 tsid; u16 tclass; u32 requested; u32 audited; u32 denied; int result; }; struct avc_cache_stats { unsigned int lookups; unsigned int misses; unsigned int allocations; unsigned int reclaims; unsigned int frees; }; struct avc_cache { struct hlist_head slots[512]; spinlock_t slots_lock[512]; atomic_t lru_hint; atomic_t active_nodes; u32 latest_notif; }; struct selinux_avc { unsigned int avc_cache_threshold; struct avc_cache avc_cache; }; struct avc_callback_node { int (*callback)(u32); u32 events; struct avc_callback_node *next; }; struct av_decision { u32 allowed; u32 auditallow; u32 auditdeny; u32 seqno; u32 flags; }; struct avc_xperms_node; struct avc_entry { u32 ssid; u32 tsid; u16 tclass; struct av_decision avd; struct avc_xperms_node *xp_node; }; struct avc_node { struct avc_entry ae; struct hlist_node list; struct callback_head rhead; }; struct extended_perms_data { u32 p[8]; }; struct extended_perms { u16 len; struct extended_perms_data drivers; }; struct avc_xperms_node { struct extended_perms xp; struct list_head xpd_head; }; struct trace_event_raw_selinux_audited { struct trace_entry ent; u32 requested; u32 denied; u32 audited; int result; u32 __data_loc_scontext; u32 __data_loc_tcontext; u32 __data_loc_tclass; char __data[0]; }; struct extended_perms_decision { u8 used; u8 driver; struct extended_perms_data *allowed; struct extended_perms_data *auditallow; struct extended_perms_data *dontaudit; }; struct avc_xperms_decision_node { struct extended_perms_decision xpd; struct list_head xpd_list; }; struct lsm_network_audit; struct lsm_ioctlop_audit; struct lsm_ibpkey_audit; struct lsm_ibendport_audit; struct common_audit_data { char type; union { struct path path; struct dentry *dentry; struct inode *inode; struct lsm_network_audit *net; int cap; int ipc_id; struct task_struct *tsk; struct { key_serial_t key; char *key_desc; } key_struct; char *kmod_name; struct lsm_ioctlop_audit *op; struct file *file; struct lsm_ibpkey_audit *ibpkey; struct lsm_ibendport_audit *ibendport; int reason; const char *anonclass; } u; union { struct selinux_audit_data *selinux_audit_data; }; }; struct lsm_network_audit { int netif; const struct sock *sk; u16 family; __be16 dport; __be16 sport; union { struct { __be32 daddr; __be32 saddr; } v4; struct { struct in6_addr daddr; struct in6_addr saddr; } v6; } fam; }; struct lsm_ioctlop_audit { struct path path; u16 cmd; }; struct lsm_ibpkey_audit { u64 subnet_prefix; u16 pkey; }; struct lsm_ibendport_audit { const char *dev_name; u8 port; }; struct trace_event_data_offsets_selinux_audited { u32 scontext; u32 tcontext; u32 tclass; }; struct selinux_policy; struct selinux_state { bool enforcing; bool initialized; bool policycap[8]; bool android_netlink_route; bool android_netlink_getneigh; struct page *status_page; struct mutex status_lock; struct selinux_policy __attribute__((btf_type_tag("rcu"))) *policy; struct mutex policy_mutex; }; struct xfrm_address_filter; struct xfrm_state_walk { struct list_head all; u8 state; u8 dying; u8 proto; u32 seq; struct xfrm_address_filter *filter; }; struct xfrm_replay_state { __u32 oseq; __u32 seq; __u32 bitmap; }; enum xfrm_replay_mode { XFRM_REPLAY_MODE_LEGACY = 0, XFRM_REPLAY_MODE_BMP = 1, XFRM_REPLAY_MODE_ESN = 2, }; struct xfrm_stats { __u32 replay_window; __u32 replay; __u32 integrity_failed; }; struct xfrm_mode { u8 encap; u8 family; u8 flags; }; struct xfrm_algo_auth; struct xfrm_algo; struct xfrm_algo_aead; struct xfrm_encap_tmpl; struct xfrm_replay_state_esn; struct xfrm_type; struct xfrm_type_offload; struct xfrm_state { possible_net_t xs_net; union { struct hlist_node gclist; struct hlist_node bydst; }; struct hlist_node bysrc; struct hlist_node byspi; struct hlist_node byseq; refcount_t refcnt; spinlock_t lock; struct xfrm_id id; struct xfrm_selector sel; struct xfrm_mark mark; u32 if_id; u32 tfcpad; u32 genid; struct xfrm_state_walk km; struct { u32 reqid; u8 mode; u8 replay_window; u8 aalgo; u8 ealgo; u8 calgo; u8 flags; u16 family; xfrm_address_t saddr; int header_len; int trailer_len; u32 extra_flags; struct xfrm_mark smark; } props; struct xfrm_lifetime_cfg lft; struct xfrm_algo_auth *aalg; struct xfrm_algo *ealg; struct xfrm_algo *calg; struct xfrm_algo_aead *aead; const char *geniv; __be16 new_mapping_sport; u32 new_mapping; u32 mapping_maxage; struct xfrm_encap_tmpl *encap; struct sock __attribute__((btf_type_tag("rcu"))) *encap_sk; xfrm_address_t *coaddr; struct xfrm_state *tunnel; atomic_t tunnel_users; struct xfrm_replay_state replay; struct xfrm_replay_state_esn *replay_esn; struct xfrm_replay_state preplay; struct xfrm_replay_state_esn *preplay_esn; enum xfrm_replay_mode repl_mode; u32 xflags; u32 replay_maxage; u32 replay_maxdiff; struct timer_list rtimer; struct xfrm_stats stats; struct xfrm_lifetime_cur curlft; struct hrtimer mtimer; struct xfrm_dev_offload xso; long saved_tmo; time64_t lastused; struct page_frag xfrag; const struct xfrm_type *type; struct xfrm_mode inner_mode; struct xfrm_mode inner_mode_iaf; struct xfrm_mode outer_mode; const struct xfrm_type_offload *type_offload; struct xfrm_sec_ctx *security; void *data; }; struct xfrm_address_filter { xfrm_address_t saddr; xfrm_address_t daddr; __u16 family; __u8 splen; __u8 dplen; }; struct xfrm_algo_auth { char alg_name[64]; unsigned int alg_key_len; unsigned int alg_trunc_len; char alg_key[0]; }; struct xfrm_algo { char alg_name[64]; unsigned int alg_key_len; char alg_key[0]; }; struct xfrm_algo_aead { char alg_name[64]; unsigned int alg_key_len; unsigned int alg_icv_len; char alg_key[0]; }; struct xfrm_encap_tmpl { __u16 encap_type; __be16 encap_sport; __be16 encap_dport; xfrm_address_t encap_oa; }; struct xfrm_replay_state_esn { unsigned int bmp_len; __u32 oseq; __u32 seq; __u32 oseq_hi; __u32 seq_hi; __u32 replay_window; __u32 bmp[0]; }; struct xfrm_type { struct module *owner; u8 proto; u8 flags; int (*init_state)(struct xfrm_state *, struct netlink_ext_ack *); void (*destructor)(struct xfrm_state *); int (*input)(struct xfrm_state *, struct sk_buff *); int (*output)(struct xfrm_state *, struct sk_buff *); int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); }; struct xfrm_type_offload { struct module *owner; u8 proto; void (*encap)(struct xfrm_state *, struct sk_buff *); int (*input_tail)(struct xfrm_state *, struct sk_buff *); int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t); }; struct xfrm_sec_ctx { __u8 ctx_doi; __u8 ctx_alg; __u16 ctx_len; __u32 ctx_sid; char ctx_str[0]; }; struct rt6key { struct in6_addr addr; int plen; }; struct rtable; struct fnhe_hash_bucket; struct fib_nh_common { struct net_device *nhc_dev; netdevice_tracker nhc_dev_tracker; int nhc_oif; unsigned char nhc_scope; u8 nhc_family; u8 nhc_gw_family; unsigned char nhc_flags; struct lwtunnel_state *nhc_lwtstate; union { __be32 ipv4; struct in6_addr ipv6; } nhc_gw; int nhc_weight; atomic_t nhc_upper_bound; struct rtable __attribute__((btf_type_tag("rcu"))) * __attribute__((btf_type_tag("percpu"))) *nhc_pcpu_rth_output; struct rtable __attribute__((btf_type_tag("rcu"))) *nhc_rth_input; struct fnhe_hash_bucket __attribute__((btf_type_tag("rcu"))) *nhc_exceptions; }; struct rt6_exception_bucket; struct fib6_nh { struct fib_nh_common nh_common; unsigned long last_probe; struct rt6_info * __attribute__((btf_type_tag("percpu"))) *rt6i_pcpu; struct rt6_exception_bucket __attribute__((btf_type_tag("rcu"))) *rt6i_exception_bucket; }; struct fib6_node; struct dst_metrics; struct nexthop; struct fib6_info { struct fib6_table *fib6_table; struct fib6_info __attribute__((btf_type_tag("rcu"))) *fib6_next; struct fib6_node __attribute__((btf_type_tag("rcu"))) *fib6_node; union { struct list_head fib6_siblings; struct list_head nh_list; }; unsigned int fib6_nsiblings; refcount_t fib6_ref; unsigned long expires; struct dst_metrics *fib6_metrics; struct rt6key fib6_dst; u32 fib6_flags; struct rt6key fib6_src; struct rt6key fib6_prefsrc; u32 fib6_metric; u8 fib6_protocol; u8 fib6_type; u8 offload; u8 trap; u8 offload_failed; u8 should_flush: 1; u8 dst_nocount: 1; u8 dst_nopolicy: 1; u8 fib6_destroying: 1; u8 unused: 4; struct callback_head rcu; struct nexthop *nh; u64 android_kabi_reserved1; struct fib6_nh fib6_nh[0]; }; struct fib6_node { struct fib6_node __attribute__((btf_type_tag("rcu"))) *parent; struct fib6_node __attribute__((btf_type_tag("rcu"))) *left; struct fib6_node __attribute__((btf_type_tag("rcu"))) *right; struct fib6_info __attribute__((btf_type_tag("rcu"))) *leaf; __u16 fn_bit; __u16 fn_flags; int fn_sernum; struct fib6_info __attribute__((btf_type_tag("rcu"))) *rr_ptr; struct callback_head rcu; u64 android_kabi_reserved1; }; struct fib6_table { struct hlist_node tb6_hlist; u32 tb6_id; spinlock_t tb6_lock; struct fib6_node tb6_root; struct inet_peer_base tb6_peers; unsigned int flags; unsigned int fib_seq; }; struct dst_metrics { u32 metrics[17]; refcount_t refcnt; }; struct rtable { struct dst_entry dst; int rt_genid; unsigned int rt_flags; __u16 rt_type; __u8 rt_is_input; __u8 rt_uses_gateway; int rt_iif; u8 rt_gw_family; union { __be32 rt_gw4; struct in6_addr rt_gw6; }; u32 rt_mtu_locked: 1; u32 rt_pmtu: 31; }; struct fib_nh_exception; struct fnhe_hash_bucket { struct fib_nh_exception __attribute__((btf_type_tag("rcu"))) *chain; }; struct fib_nh_exception { struct fib_nh_exception __attribute__((btf_type_tag("rcu"))) *fnhe_next; int fnhe_genid; __be32 fnhe_daddr; u32 fnhe_pmtu; bool fnhe_mtu_locked; __be32 fnhe_gw; unsigned long fnhe_expires; struct rtable __attribute__((btf_type_tag("rcu"))) *fnhe_rth_input; struct rtable __attribute__((btf_type_tag("rcu"))) *fnhe_rth_output; unsigned long fnhe_stamp; struct callback_head rcu; }; struct rt6_info { struct dst_entry dst; struct fib6_info __attribute__((btf_type_tag("rcu"))) *from; int sernum; struct rt6key rt6i_dst; struct rt6key rt6i_src; struct in6_addr rt6i_gateway; struct inet6_dev *rt6i_idev; u32 rt6i_flags; unsigned short rt6i_nfheader_len; u64 android_kabi_reserved1; }; struct rt6_exception_bucket { struct hlist_head chain; int depth; }; struct rt6_statistics { __u32 fib_nodes; __u32 fib_route_nodes; __u32 fib_rt_entries; __u32 fib_rt_cache; __u32 fib_discarded_routes; atomic_t fib_rt_alloc; }; struct hashtab_node; struct hashtab { struct hashtab_node **htable; u32 size; u32 nel; }; struct symtab { struct hashtab table; u32 nprim; }; struct avtab_node; struct avtab { struct avtab_node **htable; u32 nel; u32 nslot; u32 mask; }; struct ebitmap_node; struct ebitmap { struct ebitmap_node *node; u32 highbit; }; struct class_datum; struct role_datum; struct user_datum; struct type_datum; struct cond_bool_datum; struct cond_node; struct role_allow; struct ocontext; struct genfs; struct policydb { int mls_enabled; int android_netlink_route; int android_netlink_getneigh; struct symtab symtab[8]; char **sym_val_to_name[8]; struct class_datum **class_val_to_struct; struct role_datum **role_val_to_struct; struct user_datum **user_val_to_struct; struct type_datum **type_val_to_struct; struct avtab te_avtab; struct hashtab role_tr; struct ebitmap filename_trans_ttypes; struct hashtab filename_trans; u32 compat_filename_trans_count; struct cond_bool_datum **bool_val_to_struct; struct avtab te_cond_avtab; struct cond_node *cond_list; u32 cond_list_len; struct role_allow *role_allow; struct ocontext *ocontexts[9]; struct genfs *genfs; struct hashtab range_tr; struct ebitmap *type_attr_map_array; struct ebitmap policycaps; struct ebitmap permissive_map; size_t len; unsigned int policyvers; unsigned int reject_unknown: 1; unsigned int allow_unknown: 1; u16 process_class; u32 process_trans_perms; }; struct selinux_mapping; struct selinux_map { struct selinux_mapping *mapping; u16 size; }; struct sidtab; struct selinux_policy { struct sidtab *sidtab; struct policydb policydb; struct selinux_map map; u32 latest_granting; }; struct in_addr { __be32 s_addr; }; struct sockaddr_in { __kernel_sa_family_t sin_family; __be16 sin_port; struct in_addr sin_addr; unsigned char __pad[8]; }; struct sockaddr_in6 { unsigned short sin6_family; __be16 sin6_port; __be32 sin6_flowinfo; struct in6_addr sin6_addr; __u32 sin6_scope_id; }; union sctp_addr { struct sockaddr_in v4; struct sockaddr_in6 v6; struct sockaddr sa; }; struct sctp_tsnmap { unsigned long *tsn_map; __u32 base_tsn; __u32 cumulative_tsn_ack_point; __u32 max_tsn_seen; __u16 len; __u16 pending_data; __u16 num_dup_tsns; __be32 dup_tsns[16]; }; struct sctp_inithdr_host { __u32 init_tag; __u32 a_rwnd; __u16 num_outbound_streams; __u16 num_inbound_streams; __u32 initial_tsn; }; enum sctp_endpoint_type { SCTP_EP_TYPE_SOCKET = 0, SCTP_EP_TYPE_ASSOCIATION = 1, }; struct sctp_chunk; struct sctp_inq { struct list_head in_chunk_list; struct sctp_chunk *in_progress; struct work_struct immediate; }; struct sctp_bind_addr { __u16 port; struct list_head address_list; }; struct sctp_ep_common { enum sctp_endpoint_type type; refcount_t refcnt; bool dead; struct sock *sk; struct net *net; struct sctp_inq inqueue; struct sctp_bind_addr bind_addr; }; typedef __s32 sctp_assoc_t; struct sctp_cookie { __u32 my_vtag; __u32 peer_vtag; __u32 my_ttag; __u32 peer_ttag; ktime_t expiration; __u16 sinit_num_ostreams; __u16 sinit_max_instreams; __u32 initial_tsn; union sctp_addr peer_addr; __u16 my_port; __u8 prsctp_capable; __u8 padding; __u32 adaptation_ind; __u8 auth_random[36]; __u8 auth_hmacs[10]; __u8 auth_chunks[20]; __u32 raw_addr_list_len; }; enum sctp_state { SCTP_STATE_CLOSED = 0, SCTP_STATE_COOKIE_WAIT = 1, SCTP_STATE_COOKIE_ECHOED = 2, SCTP_STATE_ESTABLISHED = 3, SCTP_STATE_SHUTDOWN_PENDING = 4, SCTP_STATE_SHUTDOWN_SENT = 5, SCTP_STATE_SHUTDOWN_RECEIVED = 6, SCTP_STATE_SHUTDOWN_ACK_SENT = 7, }; struct sctp_stream_out_ext; struct sctp_stream_out { union { __u32 mid; __u16 ssn; }; __u32 mid_uo; struct sctp_stream_out_ext *ext; __u8 state; }; struct sctp_stream_in { union { __u32 mid; __u16 ssn; }; __u32 mid_uo; __u32 fsn; __u32 fsn_uo; char pd_mode; char pd_mode_uo; }; struct sctp_stream_interleave; struct sctp_stream { struct { struct __genradix tree; struct sctp_stream_out type[0]; } out; struct { struct __genradix tree; struct sctp_stream_in type[0]; } in; __u16 outcnt; __u16 incnt; struct sctp_stream_out *out_curr; union { struct { struct list_head prio_list; }; struct { struct list_head rr_list; struct sctp_stream_out_ext *rr_next; }; struct { struct list_head fc_list; }; }; struct sctp_stream_interleave *si; }; struct sctp_sched_ops; struct sctp_outq { struct sctp_association *asoc; struct list_head out_chunk_list; struct sctp_sched_ops *sched; unsigned int out_qlen; unsigned int error; struct list_head control_chunk_list; struct list_head sacked; struct list_head retransmit; struct list_head abandoned; __u32 outstanding_bytes; char fast_rtx; char cork; }; struct sctp_ulpq { char pd_mode; struct sctp_association *asoc; struct sk_buff_head reasm; struct sk_buff_head reasm_uo; struct sk_buff_head lobby; }; struct sctp_priv_assoc_stats { struct __kernel_sockaddr_storage obs_rto_ipaddr; __u64 max_obs_rto; __u64 isacks; __u64 osacks; __u64 opackets; __u64 ipackets; __u64 rtxchunks; __u64 outofseqtsns; __u64 idupchunks; __u64 gapcnt; __u64 ouodchunks; __u64 iuodchunks; __u64 oodchunks; __u64 iodchunks; __u64 octrlchunks; __u64 ictrlchunks; }; struct sctp_endpoint; struct sctp_transport; struct sctp_random_param; struct sctp_chunks_param; struct sctp_hmac_algo_param; struct sctp_auth_bytes; struct sctp_shared_key; struct sctp_association { struct sctp_ep_common base; struct list_head asocs; sctp_assoc_t assoc_id; struct sctp_endpoint *ep; struct sctp_cookie c; struct { struct list_head transport_addr_list; __u32 rwnd; __u16 transport_count; __u16 port; struct sctp_transport *primary_path; union sctp_addr primary_addr; struct sctp_transport *active_path; struct sctp_transport *retran_path; struct sctp_transport *last_sent_to; struct sctp_transport *last_data_from; struct sctp_tsnmap tsn_map; __be16 addip_disabled_mask; __u16 ecn_capable: 1; __u16 ipv4_address: 1; __u16 ipv6_address: 1; __u16 asconf_capable: 1; __u16 prsctp_capable: 1; __u16 reconf_capable: 1; __u16 intl_capable: 1; __u16 auth_capable: 1; __u16 sack_needed: 1; __u16 sack_generation: 1; __u16 zero_window_announced: 1; __u32 sack_cnt; __u32 adaptation_ind; struct sctp_inithdr_host i; void *cookie; int cookie_len; __u32 addip_serial; struct sctp_random_param *peer_random; struct sctp_chunks_param *peer_chunks; struct sctp_hmac_algo_param *peer_hmacs; } peer; enum sctp_state state; int overall_error_count; ktime_t cookie_life; unsigned long rto_initial; unsigned long rto_max; unsigned long rto_min; int max_burst; int max_retrans; __u16 pf_retrans; __u16 ps_retrans; __u16 max_init_attempts; __u16 init_retries; unsigned long max_init_timeo; unsigned long hbinterval; unsigned long probe_interval; __be16 encap_port; __u16 pathmaxrxt; __u32 flowlabel; __u8 dscp; __u8 pmtu_pending; __u32 pathmtu; __u32 param_flags; __u32 sackfreq; unsigned long sackdelay; unsigned long timeouts[12]; struct timer_list timers[12]; struct sctp_transport *shutdown_last_sent_to; struct sctp_transport *init_last_sent_to; int shutdown_retries; __u32 next_tsn; __u32 ctsn_ack_point; __u32 adv_peer_ack_point; __u32 highest_sacked; __u32 fast_recovery_exit; __u8 fast_recovery; __u16 unack_data; __u32 rtx_data_chunks; __u32 rwnd; __u32 a_rwnd; __u32 rwnd_over; __u32 rwnd_press; int sndbuf_used; atomic_t rmem_alloc; wait_queue_head_t wait; __u32 frag_point; __u32 user_frag; int init_err_counter; int init_cycle; __u16 default_stream; __u16 default_flags; __u32 default_ppid; __u32 default_context; __u32 default_timetolive; __u32 default_rcv_context; struct sctp_stream stream; struct sctp_outq outqueue; struct sctp_ulpq ulpq; __u32 last_ecne_tsn; __u32 last_cwr_tsn; int numduptsns; struct sctp_chunk *addip_last_asconf; struct list_head asconf_ack_list; struct list_head addip_chunk_list; __u32 addip_serial; int src_out_of_asoc_ok; union sctp_addr *asconf_addr_del_pending; struct sctp_transport *new_transport; struct list_head endpoint_shared_keys; struct sctp_auth_bytes *asoc_shared_key; struct sctp_shared_key *shkey; __u16 default_hmac_id; __u16 active_key_id; __u8 need_ecne: 1; __u8 temp: 1; __u8 pf_expose: 2; __u8 force_delay: 1; __u8 strreset_enable; __u8 strreset_outstanding; __u32 strreset_outseq; __u32 strreset_inseq; __u32 strreset_result[2]; struct sctp_chunk *strreset_chunk; struct sctp_priv_assoc_stats stats; int sent_cnt_removable; __u16 subscribe; __u64 abandoned_unsent[3]; __u64 abandoned_sent[3]; u32 secid; u32 peer_secid; struct callback_head rcu; }; struct sctp_paramhdr; struct sctp_cookie_preserve_param; struct sctp_hostname_param; struct sctp_cookie_param; struct sctp_supported_addrs_param; struct sctp_ipv4addr_param; struct sctp_ipv6addr_param; union sctp_addr_param; struct sctp_adaptation_ind_param; struct sctp_supported_ext_param; struct sctp_addip_param; union sctp_params { void *v; struct sctp_paramhdr *p; struct sctp_cookie_preserve_param *life; struct sctp_hostname_param *dns; struct sctp_cookie_param *cookie; struct sctp_supported_addrs_param *sat; struct sctp_ipv4addr_param *v4; struct sctp_ipv6addr_param *v6; union sctp_addr_param *addr; struct sctp_adaptation_ind_param *aind; struct sctp_supported_ext_param *ext; struct sctp_random_param *random; struct sctp_chunks_param *chunks; struct sctp_hmac_algo_param *hmac_algo; struct sctp_addip_param *addip; }; struct sctp_sndrcvinfo { __u16 sinfo_stream; __u16 sinfo_ssn; __u16 sinfo_flags; __u32 sinfo_ppid; __u32 sinfo_context; __u32 sinfo_timetolive; __u32 sinfo_tsn; __u32 sinfo_cumtsn; sctp_assoc_t sinfo_assoc_id; }; struct sctp_datahdr; struct sctp_inithdr; struct sctp_sackhdr; struct sctp_heartbeathdr; struct sctp_sender_hb_info; struct sctp_shutdownhdr; struct sctp_signed_cookie; struct sctp_ecnehdr; struct sctp_cwrhdr; struct sctp_errhdr; struct sctp_addiphdr; struct sctp_fwdtsn_hdr; struct sctp_authhdr; struct sctp_idatahdr; struct sctp_ifwdtsn_hdr; struct sctp_chunkhdr; struct sctphdr; struct sctp_datamsg; struct sctp_chunk { struct list_head list; refcount_t refcnt; int sent_count; union { struct list_head transmitted_list; struct list_head stream_list; }; struct list_head frag_list; struct sk_buff *skb; union { struct sk_buff *head_skb; struct sctp_shared_key *shkey; }; union sctp_params param_hdr; union { __u8 *v; struct sctp_datahdr *data_hdr; struct sctp_inithdr *init_hdr; struct sctp_sackhdr *sack_hdr; struct sctp_heartbeathdr *hb_hdr; struct sctp_sender_hb_info *hbs_hdr; struct sctp_shutdownhdr *shutdown_hdr; struct sctp_signed_cookie *cookie_hdr; struct sctp_ecnehdr *ecne_hdr; struct sctp_cwrhdr *ecn_cwr_hdr; struct sctp_errhdr *err_hdr; struct sctp_addiphdr *addip_hdr; struct sctp_fwdtsn_hdr *fwdtsn_hdr; struct sctp_authhdr *auth_hdr; struct sctp_idatahdr *idata_hdr; struct sctp_ifwdtsn_hdr *ifwdtsn_hdr; } subh; __u8 *chunk_end; struct sctp_chunkhdr *chunk_hdr; struct sctphdr *sctp_hdr; struct sctp_sndrcvinfo sinfo; struct sctp_association *asoc; struct sctp_ep_common *rcvr; unsigned long sent_at; union sctp_addr source; union sctp_addr dest; struct sctp_datamsg *msg; struct sctp_transport *transport; struct sk_buff *auth_chunk; __u16 rtt_in_progress: 1; __u16 has_tsn: 1; __u16 has_ssn: 1; __u16 singleton: 1; __u16 end_of_packet: 1; __u16 ecn_ce_done: 1; __u16 pdiscard: 1; __u16 tsn_gap_acked: 1; __u16 data_accepted: 1; __u16 auth: 1; __u16 has_asconf: 1; __u16 pmtu_probe: 1; __u16 tsn_missing_report: 2; __u16 fast_retransmit: 2; }; struct sctp_shared_key { struct list_head key_list; struct sctp_auth_bytes *key; refcount_t refcnt; __u16 key_id; __u8 deactivated; }; struct sctp_auth_bytes { refcount_t refcnt; __u32 len; __u8 data[0]; }; struct sctp_paramhdr { __be16 type; __be16 length; }; struct sctp_cookie_preserve_param { struct sctp_paramhdr param_hdr; __be32 lifespan_increment; }; struct sctp_hostname_param { struct sctp_paramhdr param_hdr; uint8_t hostname[0]; }; struct sctp_cookie_param { struct sctp_paramhdr p; __u8 body[0]; }; struct sctp_supported_addrs_param { struct sctp_paramhdr param_hdr; __be16 types[0]; }; struct sctp_ipv4addr_param { struct sctp_paramhdr param_hdr; struct in_addr addr; }; struct sctp_ipv6addr_param { struct sctp_paramhdr param_hdr; struct in6_addr addr; }; union sctp_addr_param { struct sctp_paramhdr p; struct sctp_ipv4addr_param v4; struct sctp_ipv6addr_param v6; }; struct sctp_adaptation_ind_param { struct sctp_paramhdr param_hdr; __be32 adaptation_ind; }; struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; }; struct sctp_random_param { struct sctp_paramhdr param_hdr; __u8 random_val[0]; }; struct sctp_chunks_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; }; struct sctp_hmac_algo_param { struct sctp_paramhdr param_hdr; __be16 hmac_ids[0]; }; struct sctp_addip_param { struct sctp_paramhdr param_hdr; __be32 crr_id; }; struct sctp_datahdr { __be32 tsn; __be16 stream; __be16 ssn; __u32 ppid; }; struct sctp_inithdr { __be32 init_tag; __be32 a_rwnd; __be16 num_outbound_streams; __be16 num_inbound_streams; __be32 initial_tsn; }; struct sctp_sackhdr { __be32 cum_tsn_ack; __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; }; struct sctp_heartbeathdr { struct sctp_paramhdr info; }; struct sctp_sender_hb_info { struct sctp_paramhdr param_hdr; union sctp_addr daddr; unsigned long sent_at; __u64 hb_nonce; __u32 probe_size; }; struct sctp_shutdownhdr { __be32 cum_tsn_ack; }; struct sctp_signed_cookie { __u8 signature[32]; __u32 __pad; struct sctp_cookie c; } __attribute__((packed)); struct sctp_ecnehdr { __be32 lowest_tsn; }; struct sctp_cwrhdr { __be32 lowest_tsn; }; struct sctp_errhdr { __be16 cause; __be16 length; }; struct sctp_addiphdr { __be32 serial; }; struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; }; struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; }; struct sctp_idatahdr { __be32 tsn; __be16 stream; __be16 reserved; __be32 mid; union { __u32 ppid; __be32 fsn; }; __u8 payload[0]; }; struct sctp_ifwdtsn_hdr { __be32 new_cum_tsn; }; struct sctp_chunkhdr { __u8 type; __u8 flags; __be16 length; }; struct sctphdr { __be16 source; __be16 dest; __be32 vtag; __le32 checksum; }; struct sctp_datamsg { struct list_head chunks; refcount_t refcnt; unsigned long expires_at; int send_error; u8 send_failed: 1; u8 can_delay: 1; u8 abandoned: 1; }; struct sctp_packet { __u16 source_port; __u16 destination_port; __u32 vtag; struct list_head chunk_list; size_t overhead; size_t size; size_t max_size; struct sctp_transport *transport; struct sctp_chunk *auth; u8 has_cookie_echo: 1; u8 has_sack: 1; u8 has_auth: 1; u8 has_data: 1; u8 ipfragok: 1; }; struct sctp_af; struct sctp_transport { struct list_head transports; struct rhlist_head node; refcount_t refcnt; __u32 rto_pending: 1; __u32 hb_sent: 1; __u32 pmtu_pending: 1; __u32 dst_pending_confirm: 1; __u32 sack_generation: 1; u32 dst_cookie; struct flowi fl; union sctp_addr ipaddr; struct sctp_af *af_specific; struct sctp_association *asoc; unsigned long rto; __u32 rtt; __u32 rttvar; __u32 srtt; __u32 cwnd; __u32 ssthresh; __u32 partial_bytes_acked; __u32 flight_size; __u32 burst_limited; struct dst_entry *dst; union sctp_addr saddr; unsigned long hbinterval; unsigned long probe_interval; unsigned long sackdelay; __u32 sackfreq; atomic_t mtu_info; ktime_t last_time_heard; unsigned long last_time_sent; unsigned long last_time_ecne_reduced; __be16 encap_port; __u16 pathmaxrxt; __u32 flowlabel; __u8 dscp; __u16 pf_retrans; __u16 ps_retrans; __u32 pathmtu; __u32 param_flags; int init_sent_count; int state; unsigned short error_count; struct timer_list T3_rtx_timer; struct timer_list hb_timer; struct timer_list proto_unreach_timer; struct timer_list reconf_timer; struct timer_list probe_timer; struct list_head transmitted; struct sctp_packet packet; struct list_head send_ready; struct { __u32 next_tsn_at_change; char changeover_active; char cycling_changeover; char cacc_saw_newack; } cacc; struct { __u16 pmtu; __u16 probe_size; __u16 probe_high; __u8 probe_count; __u8 state; } pl; __u64 hb_nonce; struct callback_head rcu; }; enum sctp_scope { SCTP_SCOPE_GLOBAL = 0, SCTP_SCOPE_PRIVATE = 1, SCTP_SCOPE_LINK = 2, SCTP_SCOPE_LOOPBACK = 3, SCTP_SCOPE_UNUSABLE = 4, }; struct sctp_sock; struct sctp_af { int (*sctp_xmit)(struct sk_buff *, struct sctp_transport *); int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); int (*getsockopt)(struct sock *, int, int, char __attribute__((btf_type_tag("user"))) *, int __attribute__((btf_type_tag("user"))) *); void (*get_dst)(struct sctp_transport *, union sctp_addr *, struct flowi *, struct sock *); void (*get_saddr)(struct sctp_sock *, struct sctp_transport *, struct flowi *); void (*copy_addrlist)(struct list_head *, struct net_device *); int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *); void (*addr_copy)(union sctp_addr *, union sctp_addr *); void (*from_skb)(union sctp_addr *, struct sk_buff *, int); void (*from_sk)(union sctp_addr *, struct sock *); bool (*from_addr_param)(union sctp_addr *, union sctp_addr_param *, __be16, int); int (*to_addr_param)(const union sctp_addr *, union sctp_addr_param *); int (*addr_valid)(union sctp_addr *, struct sctp_sock *, const struct sk_buff *); enum sctp_scope (*scope)(union sctp_addr *); void (*inaddr_any)(union sctp_addr *, __be16); int (*is_any)(const union sctp_addr *); int (*available)(union sctp_addr *, struct sctp_sock *); int (*skb_iif)(const struct sk_buff *); int (*skb_sdif)(const struct sk_buff *); int (*is_ce)(const struct sk_buff *); void (*seq_dump_addr)(struct seq_file *, union sctp_addr *); void (*ecn_capable)(struct sock *); __u16 net_header_len; int sockaddr_len; int (*ip_options_len)(struct sock *); sa_family_t sa_family; struct list_head list; }; struct ip_options; struct inet_cork { unsigned int flags; __be32 addr; struct ip_options *opt; unsigned int fragsize; int length; struct dst_entry *dst; u8 tx_flags; __u8 ttl; __s16 tos; char priority; __u16 gso_size; u64 transmit_time; u32 mark; }; struct inet_cork_full { struct inet_cork base; struct flowi fl; }; struct ipv6_pinfo; struct ip_options_rcu; struct ip_mc_socklist; struct inet_sock { struct sock sk; struct ipv6_pinfo *pinet6; unsigned long inet_flags; __be32 inet_saddr; __s16 uc_ttl; __be16 inet_sport; struct ip_options_rcu __attribute__((btf_type_tag("rcu"))) *inet_opt; atomic_t inet_id; __u8 tos; __u8 min_ttl; __u8 mc_ttl; __u8 pmtudisc; __u8 rcv_tos; __u8 convert_csum; int uc_index; int mc_index; __be32 mc_addr; struct { __u16 lo; __u16 hi; } local_port_range; struct ip_mc_socklist __attribute__((btf_type_tag("rcu"))) *mc_list; struct inet_cork_full cork; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; enum sctp_socket_type { SCTP_SOCKET_UDP = 0, SCTP_SOCKET_UDP_HIGH_BANDWIDTH = 1, SCTP_SOCKET_TCP = 2, }; struct sctp_rtoinfo { sctp_assoc_t srto_assoc_id; __u32 srto_initial; __u32 srto_max; __u32 srto_min; }; struct sctp_paddrparams { sctp_assoc_t spp_assoc_id; struct __kernel_sockaddr_storage spp_address; __u32 spp_hbinterval; __u16 spp_pathmaxrxt; __u32 spp_pathmtu; __u32 spp_sackdelay; __u32 spp_flags; __u32 spp_ipv6_flowlabel; __u8 spp_dscp; int: 0; } __attribute__((packed)); struct sctp_assocparams { sctp_assoc_t sasoc_assoc_id; __u16 sasoc_asocmaxrxt; __u16 sasoc_number_peer_destinations; __u32 sasoc_peer_rwnd; __u32 sasoc_local_rwnd; __u32 sasoc_cookie_life; }; struct sctp_initmsg { __u16 sinit_num_ostreams; __u16 sinit_max_instreams; __u16 sinit_max_attempts; __u16 sinit_max_init_timeo; }; struct sctp_pf; struct sctp_bind_bucket; struct sctp_sock { struct inet_sock inet; enum sctp_socket_type type; struct sctp_pf *pf; struct crypto_shash *hmac; char *sctp_hmac_alg; struct sctp_endpoint *ep; struct sctp_bind_bucket *bind_hash; __u16 default_stream; __u32 default_ppid; __u16 default_flags; __u32 default_context; __u32 default_timetolive; __u32 default_rcv_context; int max_burst; __u32 hbinterval; __u32 probe_interval; __be16 udp_port; __be16 encap_port; __u16 pathmaxrxt; __u32 flowlabel; __u8 dscp; __u16 pf_retrans; __u16 ps_retrans; __u32 pathmtu; __u32 sackdelay; __u32 sackfreq; __u32 param_flags; __u32 default_ss; struct sctp_rtoinfo rtoinfo; struct sctp_paddrparams paddrparam; struct sctp_assocparams assocparams; __u16 subscribe; struct sctp_initmsg initmsg; int user_frag; __u32 autoclose; __u32 adaptation_ind; __u32 pd_point; __u16 nodelay: 1; __u16 pf_expose: 2; __u16 reuse: 1; __u16 disable_fragments: 1; __u16 v4mapped: 1; __u16 frag_interleave: 1; __u16 recvrcvinfo: 1; __u16 recvnxtinfo: 1; __u16 data_ready_signalled: 1; atomic_t pd_mode; struct sk_buff_head pd_lobby; struct list_head auto_asconf_list; int do_auto_asconf; }; struct in6_pktinfo { struct in6_addr ipi6_addr; int ipi6_ifindex; }; struct ipv6_txoptions; struct inet6_cork { struct ipv6_txoptions *opt; u8 hop_limit; u8 tclass; }; struct ipv6_mc_socklist; struct ipv6_ac_socklist; struct ipv6_fl_socklist; struct ipv6_pinfo { struct in6_addr saddr; struct in6_pktinfo sticky_pktinfo; const struct in6_addr *daddr_cache; __be32 flow_label; __u32 frag_size; __u16 __unused_1: 7; __s16 hop_limit: 9; __u16 mc_loop: 1; __u16 __unused_2: 6; __s16 mcast_hops: 9; int ucast_oif; int mcast_oif; union { struct { __u16 srcrt: 1; __u16 osrcrt: 1; __u16 rxinfo: 1; __u16 rxoinfo: 1; __u16 rxhlim: 1; __u16 rxohlim: 1; __u16 hopopts: 1; __u16 ohopopts: 1; __u16 dstopts: 1; __u16 odstopts: 1; __u16 rxflow: 1; __u16 rxtclass: 1; __u16 rxpmtu: 1; __u16 rxorigdstaddr: 1; __u16 recvfragsize: 1; } bits; __u16 all; } rxopt; __u16 recverr: 1; __u16 sndflow: 1; __u16 repflow: 1; __u16 pmtudisc: 3; __u16 padding: 1; __u16 srcprefs: 3; __u16 dontfrag: 1; __u16 autoflowlabel: 1; __u16 autoflowlabel_set: 1; __u16 mc_all: 1; __u16 recverr_rfc4884: 1; __u16 rtalert_isolate: 1; __u8 min_hopcount; __u8 tclass; __be32 rcv_flowinfo; __u32 dst_cookie; struct ipv6_mc_socklist __attribute__((btf_type_tag("rcu"))) *ipv6_mc_list; struct ipv6_ac_socklist *ipv6_ac_list; struct ipv6_fl_socklist __attribute__((btf_type_tag("rcu"))) *ipv6_fl_list; struct ipv6_txoptions __attribute__((btf_type_tag("rcu"))) *opt; struct sk_buff *pktoptions; struct sk_buff *rxpmtu; struct inet6_cork cork; }; struct ip6_sf_socklist; struct ipv6_mc_socklist { struct in6_addr addr; int ifindex; unsigned int sfmode; struct ipv6_mc_socklist __attribute__((btf_type_tag("rcu"))) *next; struct ip6_sf_socklist __attribute__((btf_type_tag("rcu"))) *sflist; struct callback_head rcu; }; struct ip6_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct callback_head rcu; struct in6_addr sl_addr[0]; }; struct ipv6_ac_socklist { struct in6_addr acl_addr; int acl_ifindex; struct ipv6_ac_socklist *acl_next; }; struct ip6_flowlabel; struct ipv6_fl_socklist { struct ipv6_fl_socklist __attribute__((btf_type_tag("rcu"))) *next; struct ip6_flowlabel *fl; struct callback_head rcu; }; struct ip6_flowlabel { struct ip6_flowlabel __attribute__((btf_type_tag("rcu"))) *next; __be32 label; atomic_t users; struct in6_addr dst; struct ipv6_txoptions *opt; unsigned long linger; struct callback_head rcu; u8 share; union { struct pid *pid; kuid_t uid; } owner; unsigned long lastuse; unsigned long expires; struct net *fl_net; }; struct ipv6_opt_hdr; struct ipv6_rt_hdr; struct ipv6_txoptions { refcount_t refcnt; int tot_len; __u16 opt_flen; __u16 opt_nflen; struct ipv6_opt_hdr *hopopt; struct ipv6_opt_hdr *dst0opt; struct ipv6_rt_hdr *srcrt; struct ipv6_opt_hdr *dst1opt; struct callback_head rcu; }; struct ipv6_opt_hdr { __u8 nexthdr; __u8 hdrlen; }; struct ipv6_rt_hdr { __u8 nexthdr; __u8 hdrlen; __u8 type; __u8 segments_left; }; struct ip_options { __be32 faddr; __be32 nexthop; unsigned char optlen; unsigned char srr; unsigned char rr; unsigned char ts; unsigned char is_strictroute: 1; unsigned char srr_is_hit: 1; unsigned char is_changed: 1; unsigned char rr_needaddr: 1; unsigned char ts_needtime: 1; unsigned char ts_needaddr: 1; unsigned char router_alert; unsigned char cipso; unsigned char __pad2; unsigned char __data[0]; }; struct ip_options_rcu { struct callback_head rcu; struct ip_options opt; }; struct ip_mreqn { struct in_addr imr_multiaddr; struct in_addr imr_address; int imr_ifindex; }; struct ip_sf_socklist; struct ip_mc_socklist { struct ip_mc_socklist __attribute__((btf_type_tag("rcu"))) *next_rcu; struct ip_mreqn multi; unsigned int sfmode; struct ip_sf_socklist __attribute__((btf_type_tag("rcu"))) *sflist; struct callback_head rcu; }; struct sctp_ulpevent; struct sctp_pf { void (*event_msgname)(struct sctp_ulpevent *, char *, int *); void (*skb_msgname)(struct sk_buff *, char *, int *); int (*af_supported)(sa_family_t, struct sctp_sock *); int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *, struct sctp_sock *); int (*bind_verify)(struct sctp_sock *, union sctp_addr *); int (*send_verify)(struct sctp_sock *, union sctp_addr *); int (*supported_addrs)(const struct sctp_sock *, __be16 *); struct sock * (*create_accept_sk)(struct sock *, struct sctp_association *, bool); int (*addr_to_user)(struct sctp_sock *, union sctp_addr *); void (*to_sk_saddr)(union sctp_addr *, struct sock *); void (*to_sk_daddr)(union sctp_addr *, struct sock *); void (*copy_ip_options)(struct sock *, struct sock *); struct sctp_af *af; }; struct sctp_ulpevent { struct sctp_association *asoc; struct sctp_chunk *chunk; unsigned int rmem_len; union { __u32 mid; __u16 ssn; }; union { __u32 ppid; __u32 fsn; }; __u32 tsn; __u32 cumtsn; __u16 stream; __u16 flags; __u16 msg_flags; } __attribute__((packed)); struct sctp_endpoint { struct sctp_ep_common base; struct hlist_node node; int hashent; struct list_head asocs; __u8 secret_key[32]; __u8 *digest; __u32 sndbuf_policy; __u32 rcvbuf_policy; struct crypto_shash **auth_hmacs; struct sctp_hmac_algo_param *auth_hmacs_list; struct sctp_chunks_param *auth_chunk_list; struct list_head endpoint_shared_keys; __u16 active_key_id; __u8 ecn_enable: 1; __u8 auth_enable: 1; __u8 intl_enable: 1; __u8 prsctp_enable: 1; __u8 asconf_enable: 1; __u8 reconf_enable: 1; __u8 strreset_enable; struct callback_head rcu; }; struct sctp_bind_bucket { unsigned short port; signed char fastreuse; signed char fastreuseport; kuid_t fastuid; struct hlist_node node; struct hlist_head owner; struct net *net; }; struct sctp_stream_priorities; struct sctp_stream_out_ext { __u64 abandoned_unsent[3]; __u64 abandoned_sent[3]; struct list_head outq; union { struct { struct list_head prio_list; struct sctp_stream_priorities *prio_head; }; struct { struct list_head rr_list; }; struct { struct list_head fc_list; __u32 fc_length; __u16 fc_weight; }; }; }; struct sctp_stream_priorities { struct list_head prio_sched; struct list_head active; struct sctp_stream_out_ext *next; __u16 prio; __u16 users; }; struct sctp_stream_interleave { __u16 data_chunk_len; __u16 ftsn_chunk_len; struct sctp_chunk * (*make_datafrag)(const struct sctp_association *, const struct sctp_sndrcvinfo *, int, __u8, gfp_t); void (*assign_number)(struct sctp_chunk *); bool (*validate_data)(struct sctp_chunk *); int (*ulpevent_data)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); int (*enqueue_event)(struct sctp_ulpq *, struct sctp_ulpevent *); void (*renege_events)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); void (*start_pd)(struct sctp_ulpq *, gfp_t); void (*abort_pd)(struct sctp_ulpq *, gfp_t); void (*generate_ftsn)(struct sctp_outq *, __u32); bool (*validate_ftsn)(struct sctp_chunk *); void (*report_ftsn)(struct sctp_ulpq *, __u32); void (*handle_ftsn)(struct sctp_ulpq *, struct sctp_chunk *); }; enum nf_hook_ops_type { NF_HOOK_OP_UNDEFINED = 0, NF_HOOK_OP_NF_TABLES = 1, NF_HOOK_OP_BPF = 2, }; struct nf_hook_ops { nf_hookfn *hook; struct net_device *dev; void *priv; u8 pf; enum nf_hook_ops_type hook_ops_type: 8; unsigned int hooknum; int priority; }; enum label_initialized { LABEL_INVALID = 0, LABEL_INITIALIZED = 1, LABEL_PENDING = 2, }; enum { POLICYDB_CAP_NETPEER = 0, POLICYDB_CAP_OPENPERM = 1, POLICYDB_CAP_EXTSOCKCLASS = 2, POLICYDB_CAP_ALWAYSNETWORK = 3, POLICYDB_CAP_CGROUPSECLABEL = 4, POLICYDB_CAP_NNP_NOSUID_TRANSITION = 5, POLICYDB_CAP_GENFS_SECLABEL_SYMLINKS = 6, POLICYDB_CAP_IOCTL_SKIP_CLOEXEC = 7, __POLICYDB_CAP_MAX = 8, }; struct sk_security_struct { u32 sid; u32 peer_sid; u16 sclass; enum { SCTP_ASSOC_UNSET = 0, SCTP_ASSOC_SET = 1, } sctp_assoc_state; }; enum { Opt_error = -1, Opt_context = 0, Opt_defcontext = 1, Opt_fscontext = 2, Opt_rootcontext = 3, Opt_seclabel = 4, }; enum { TCP_ESTABLISHED = 1, TCP_SYN_SENT = 2, TCP_SYN_RECV = 3, TCP_FIN_WAIT1 = 4, TCP_FIN_WAIT2 = 5, TCP_TIME_WAIT = 6, TCP_CLOSE = 7, TCP_CLOSE_WAIT = 8, TCP_LAST_ACK = 9, TCP_LISTEN = 10, TCP_CLOSING = 11, TCP_NEW_SYN_RECV = 12, TCP_MAX_STATES = 13, }; struct inode_security_struct { struct inode *inode; struct list_head list; u32 task_sid; u32 sid; u16 sclass; unsigned char initialized; spinlock_t lock; }; struct tty_file_private { struct tty_struct *tty; struct file *file; struct list_head list; }; struct socket_alloc { struct socket socket; struct inode vfs_inode; }; struct inet_skb_parm { int iif; struct ip_options opt; u16 flags; u16 frag_max_size; }; struct inet6_skb_parm { int iif; __be16 ra; __u16 dst0; __u16 srcrt; __u16 dst1; __u16 lastopt; __u16 nhoff; __u16 flags; __u16 dsthao; __u16 frag_max_size; __u16 srhoff; }; struct task_security_struct { u32 osid; u32 sid; u32 exec_sid; u32 create_sid; u32 keycreate_sid; u32 sockcreate_sid; }; struct superblock_security_struct { u32 sid; u32 def_sid; u32 mntpoint_sid; unsigned short behavior; unsigned short flags; struct mutex lock; struct list_head isec_head; spinlock_t isec_lock; }; struct file_security_struct { u32 sid; u32 fown_sid; u32 isid; u32 pseqno; }; struct bpf_security_struct { u32 sid; }; struct ipc_security_struct { u16 sclass; u32 sid; }; struct msg_security_struct { u32 sid; }; typedef __u16 __sum16; struct iphdr { __u8 ihl: 4; __u8 version: 4; __u8 tos; __be16 tot_len; __be16 id; __be16 frag_off; __u8 ttl; __u8 protocol; __sum16 check; union { struct { __be32 saddr; __be32 daddr; }; struct { __be32 saddr; __be32 daddr; } addrs; }; }; struct tcphdr { __be16 source; __be16 dest; __be32 seq; __be32 ack_seq; __u16 res1: 4; __u16 doff: 4; __u16 fin: 1; __u16 syn: 1; __u16 rst: 1; __u16 psh: 1; __u16 ack: 1; __u16 urg: 1; __u16 ece: 1; __u16 cwr: 1; __be16 window; __sum16 check; __be16 urg_ptr; }; struct dccp_hdr { __be16 dccph_sport; __be16 dccph_dport; __u8 dccph_doff; __u8 dccph_cscov: 4; __u8 dccph_ccval: 4; __sum16 dccph_checksum; __u8 dccph_x: 1; __u8 dccph_type: 4; __u8 dccph_reserved: 3; __u8 dccph_seq2; __be16 dccph_seq; }; struct udphdr { __be16 source; __be16 dest; __be16 len; __sum16 check; }; struct ipv6hdr { __u8 priority: 4; __u8 version: 4; __u8 flow_lbl[3]; __be16 payload_len; __u8 nexthdr; __u8 hop_limit; union { struct { struct in6_addr saddr; struct in6_addr daddr; }; struct { struct in6_addr saddr; struct in6_addr daddr; } addrs; }; }; struct selinux_mnt_opts { u32 fscontext_sid; u32 context_sid; u32 rootcontext_sid; u32 defcontext_sid; }; struct tun_security_struct { u32 sid; }; struct key_security_struct { u32 sid; }; struct perf_event_security_struct { u32 sid; }; enum sel_inos { SEL_ROOT_INO = 2, SEL_LOAD = 3, SEL_ENFORCE = 4, SEL_CONTEXT = 5, SEL_ACCESS = 6, SEL_CREATE = 7, SEL_RELABEL = 8, SEL_USER = 9, SEL_POLICYVERS = 10, SEL_COMMIT_BOOLS = 11, SEL_MLS = 12, SEL_DISABLE = 13, SEL_MEMBER = 14, SEL_CHECKREQPROT = 15, SEL_COMPAT_NET = 16, SEL_REJECT_UNKNOWN = 17, SEL_DENY_UNKNOWN = 18, SEL_STATUS = 19, SEL_POLICY = 20, SEL_VALIDATE_TRANS = 21, SEL_INO_NEXT = 22, }; struct selinux_fs_info { struct dentry *bool_dir; unsigned int bool_num; char **bool_pending_names; int *bool_pending_values; struct dentry *class_dir; unsigned long last_class_ino; bool policy_opened; struct dentry *policycap_dir; unsigned long last_ino; struct super_block *sb; }; struct selinux_policy_convert_data; struct selinux_load_state { struct selinux_policy *policy; struct selinux_policy_convert_data *convert_data; }; struct policy_load_memory { size_t len; void *data; }; enum { SELNL_MSG_SETENFORCE = 16, SELNL_MSG_POLICYLOAD = 17, SELNL_MSG_MAX = 18, }; enum selinux_nlgroups { SELNLGRP_NONE = 0, SELNLGRP_AVC = 1, __SELNLGRP_MAX = 2, }; struct selnl_msg_setenforce { __s32 val; }; struct selnl_msg_policyload { __u32 seqno; }; struct nlmsg_perm { u16 nlmsg_type; u32 perm; }; enum { RTM_BASE = 16, RTM_NEWLINK = 16, RTM_DELLINK = 17, RTM_GETLINK = 18, RTM_SETLINK = 19, RTM_NEWADDR = 20, RTM_DELADDR = 21, RTM_GETADDR = 22, RTM_NEWROUTE = 24, RTM_DELROUTE = 25, RTM_GETROUTE = 26, RTM_NEWNEIGH = 28, RTM_DELNEIGH = 29, RTM_GETNEIGH = 30, RTM_NEWRULE = 32, RTM_DELRULE = 33, RTM_GETRULE = 34, RTM_NEWQDISC = 36, RTM_DELQDISC = 37, RTM_GETQDISC = 38, RTM_NEWTCLASS = 40, RTM_DELTCLASS = 41, RTM_GETTCLASS = 42, RTM_NEWTFILTER = 44, RTM_DELTFILTER = 45, RTM_GETTFILTER = 46, RTM_NEWACTION = 48, RTM_DELACTION = 49, RTM_GETACTION = 50, RTM_NEWPREFIX = 52, RTM_GETMULTICAST = 58, RTM_GETANYCAST = 62, RTM_NEWNEIGHTBL = 64, RTM_GETNEIGHTBL = 66, RTM_SETNEIGHTBL = 67, RTM_NEWNDUSEROPT = 68, RTM_NEWADDRLABEL = 72, RTM_DELADDRLABEL = 73, RTM_GETADDRLABEL = 74, RTM_GETDCB = 78, RTM_SETDCB = 79, RTM_NEWNETCONF = 80, RTM_DELNETCONF = 81, RTM_GETNETCONF = 82, RTM_NEWMDB = 84, RTM_DELMDB = 85, RTM_GETMDB = 86, RTM_NEWNSID = 88, RTM_DELNSID = 89, RTM_GETNSID = 90, RTM_NEWSTATS = 92, RTM_GETSTATS = 94, RTM_SETSTATS = 95, RTM_NEWCACHEREPORT = 96, RTM_NEWCHAIN = 100, RTM_DELCHAIN = 101, RTM_GETCHAIN = 102, RTM_NEWNEXTHOP = 104, RTM_DELNEXTHOP = 105, RTM_GETNEXTHOP = 106, RTM_NEWLINKPROP = 108, RTM_DELLINKPROP = 109, RTM_GETLINKPROP = 110, RTM_NEWVLAN = 112, RTM_DELVLAN = 113, RTM_GETVLAN = 114, RTM_NEWNEXTHOPBUCKET = 116, RTM_DELNEXTHOPBUCKET = 117, RTM_GETNEXTHOPBUCKET = 118, RTM_NEWTUNNEL = 120, RTM_DELTUNNEL = 121, RTM_GETTUNNEL = 122, __RTM_MAX = 123, }; struct netif_security_struct { struct net *ns; int ifindex; u32 sid; }; struct sel_netif { struct list_head list; struct netif_security_struct nsec; struct callback_head callback_head; }; struct sel_netnode_bkt { unsigned int size; struct list_head list; }; struct netnode_security_struct { union { __be32 ipv4; struct in6_addr ipv6; } addr; u32 sid; u16 family; }; struct sel_netnode { struct netnode_security_struct nsec; struct list_head list; struct callback_head rcu; }; struct sel_netport_bkt { int size; struct list_head list; }; struct netport_security_struct { u32 sid; u16 port; u8 protocol; }; struct sel_netport { struct netport_security_struct psec; struct list_head list; struct callback_head rcu; }; struct selinux_kernel_status { u32 version; u32 sequence; u32 enforcing; u32 policyload; u32 deny_unknown; }; struct ebitmap_node { struct ebitmap_node *next; unsigned long maps[6]; u32 startbit; }; struct policy_file { char *data; size_t len; }; struct hashtab_node { void *key; void *datum; struct hashtab_node *next; }; struct hashtab_key_params { u32 (*hash)(const void *); int (*cmp)(const void *, const void *); }; struct mls_level { u32 sens; struct ebitmap cat; }; struct mls_range { struct mls_level level[2]; }; struct context { u32 user; u32 role; u32 type; u32 len; struct mls_range range; char *str; }; struct sidtab_str_cache; struct sidtab_entry { u32 sid; u32 hash; struct context context; struct sidtab_str_cache __attribute__((btf_type_tag("rcu"))) *cache; struct hlist_node list; }; struct sidtab_str_cache { struct callback_head rcu_member; struct list_head lru_member; struct sidtab_entry *parent; u32 len; char str[0]; }; struct sidtab_node_inner; struct sidtab_node_leaf; union sidtab_entry_inner { struct sidtab_node_inner *ptr_inner; struct sidtab_node_leaf *ptr_leaf; }; struct sidtab_isid_entry { int set; struct sidtab_entry entry; }; struct sidtab_convert_params; struct sidtab { union sidtab_entry_inner roots[4]; u32 count; struct sidtab_convert_params *convert; bool frozen; spinlock_t lock; u32 cache_free_slots; struct list_head cache_lru_list; spinlock_t cache_lock; struct sidtab_isid_entry isids[27]; struct hlist_head context_to_sid[512]; }; struct sidtab_node_inner { union sidtab_entry_inner entries[512]; }; struct sidtab_node_leaf { struct sidtab_entry entries[39]; }; struct convert_context_args; struct sidtab_convert_params { struct convert_context_args *args; struct sidtab *target; }; struct convert_context_args { struct policydb *oldp; struct policydb *newp; }; struct common_datum; struct constraint_node; struct class_datum { u32 value; char *comkey; struct common_datum *comdatum; struct symtab permissions; struct constraint_node *constraints; struct constraint_node *validatetrans; char default_user; char default_role; char default_type; char default_range; }; struct common_datum { u32 value; struct symtab permissions; }; struct constraint_expr; struct constraint_node { u32 permissions; struct constraint_expr *expr; struct constraint_node *next; }; struct type_set; struct constraint_expr { u32 expr_type; u32 attr; u32 op; struct ebitmap names; struct type_set *type_names; struct constraint_expr *next; }; struct type_set { struct ebitmap types; struct ebitmap negset; u32 flags; }; struct role_datum { u32 value; u32 bounds; struct ebitmap dominates; struct ebitmap types; }; struct user_datum { u32 value; u32 bounds; struct ebitmap roles; struct mls_range range; struct mls_level dfltlevel; }; struct type_datum { u32 value; u32 bounds; unsigned char primary; unsigned char attribute; }; struct avtab_key { u16 source_type; u16 target_type; u16 target_class; u16 specified; }; struct avtab_extended_perms; struct avtab_datum { union { u32 data; struct avtab_extended_perms *xperms; } u; }; struct avtab_node { struct avtab_key key; struct avtab_datum datum; struct avtab_node *next; }; struct avtab_extended_perms { u8 specified; u8 driver; struct extended_perms_data perms; }; struct cond_bool_datum { __u32 value; int state; }; struct role_allow { u32 role; u32 new_role; struct role_allow *next; }; struct ocontext { union { char *name; struct { u8 protocol; u16 low_port; u16 high_port; } port; struct { u32 addr; u32 mask; } node; struct { u32 addr[4]; u32 mask[4]; } node6; struct { u64 subnet_prefix; u16 low_pkey; u16 high_pkey; } ibpkey; struct { char *dev_name; u8 port; } ibendport; } u; union { u32 sclass; u32 behavior; } v; struct context context[2]; u32 sid[2]; struct ocontext *next; }; struct genfs { char *fstype; struct ocontext *head; struct genfs *next; }; struct policydb_compat_info { unsigned int version; unsigned int sym_num; unsigned int ocon_num; }; struct cond_expr_node; struct cond_expr { struct cond_expr_node *nodes; u32 len; }; struct cond_av_list { struct avtab_node **nodes; u32 len; }; struct cond_node { int cur_state; struct cond_expr expr; struct cond_av_list true_list; struct cond_av_list false_list; }; struct cond_expr_node { u32 expr_type; u32 boolean; }; struct filename_trans_key { u32 ttype; u16 tclass; const char *name; }; struct range_trans { u32 source_type; u32 target_type; u32 target_class; }; struct role_trans_key { u32 role; u32 type; u32 tclass; }; struct filename_trans_datum { struct ebitmap stypes; u32 otype; struct filename_trans_datum *next; }; struct level_datum { struct mls_level *level; unsigned char isalias; }; struct role_trans_datum { u32 new_role; }; struct perm_datum { u32 value; }; struct policy_data { struct policydb *p; void *fp; }; struct cat_datum { u32 value; unsigned char isalias; }; struct selinux_mapping { u16 value; u16 num_perms; u32 perms[32]; }; struct selinux_audit_rule { u32 au_seqno; struct context au_ctxt; }; struct selinux_policy_convert_data { struct convert_context_args args; struct sidtab_convert_params sidtab_params; }; struct cond_insertf_data { struct policydb *p; struct avtab_node **dst; struct cond_av_list *other; }; struct scm_stat { atomic_t nr_fds; }; struct unix_address; struct unix_sock { struct sock sk; struct unix_address *addr; struct path path; struct mutex iolock; struct mutex bindlock; struct sock *peer; struct list_head link; unsigned long inflight; spinlock_t lock; unsigned long gc_flags; struct socket_wq peer_wq; wait_queue_entry_t peer_wake; struct scm_stat scm_stat; struct sk_buff *oob_skb; long: 64; }; struct sockaddr_un { __kernel_sa_family_t sun_family; char sun_path[108]; }; struct unix_address { refcount_t refcnt; int len; struct sockaddr_un name[0]; }; enum integrity_status { INTEGRITY_PASS = 0, INTEGRITY_PASS_IMMUTABLE = 1, INTEGRITY_FAIL = 2, INTEGRITY_FAIL_IMMUTABLE = 3, INTEGRITY_NOLABEL = 4, INTEGRITY_NOXATTRS = 5, INTEGRITY_UNKNOWN = 6, }; struct ima_digest_data; struct integrity_iint_cache { struct rb_node rb_node; struct mutex mutex; struct inode *inode; u64 version; unsigned long flags; unsigned long measured_pcrs; unsigned long atomic_flags; unsigned long real_ino; dev_t real_dev; enum integrity_status ima_file_status: 4; enum integrity_status ima_mmap_status: 4; enum integrity_status ima_bprm_status: 4; enum integrity_status ima_read_status: 4; enum integrity_status ima_creds_status: 4; enum integrity_status evm_status: 4; struct ima_digest_data *ima_hash; }; struct ima_digest_data { u8 algo; u8 length; union { struct { u8 unused; u8 type; } sha1; struct { u8 type; u8 algo; } ng; u8 data[2]; } xattr; u8 digest[0]; }; enum { CRYPTO_MSG_ALG_REQUEST = 0, CRYPTO_MSG_ALG_REGISTER = 1, CRYPTO_MSG_ALG_LOADED = 2, }; struct crypto_larval { struct crypto_alg alg; struct crypto_alg *adult; struct completion completion; u32 mask; bool test_started; }; struct crypto_cipher { struct crypto_tfm base; }; struct crypto_comp { struct crypto_tfm base; }; struct crypto_queue { struct list_head list; struct list_head *backlog; unsigned int qlen; unsigned int max_qlen; }; struct crypto_engine { char name[30]; bool idling; bool busy; bool running; bool retry_support; struct list_head list; spinlock_t queue_lock; struct crypto_queue queue; struct device *dev; bool rt; int (*prepare_crypt_hardware)(struct crypto_engine *); int (*unprepare_crypt_hardware)(struct crypto_engine *); int (*do_batch_requests)(struct crypto_engine *); struct kthread_worker *kworker; struct kthread_work pump_requests; void *priv_data; struct crypto_async_request *cur_req; }; struct crypto_engine_op { int (*do_one_request)(struct crypto_engine *, void *); }; struct crypto_engine_alg { struct crypto_alg base; struct crypto_engine_op op; }; struct aead_engine_alg { struct aead_alg base; struct crypto_engine_op op; }; struct ahash_request; struct crypto_ahash; struct ahash_alg { int (*init)(struct ahash_request *); int (*update)(struct ahash_request *); int (*final)(struct ahash_request *); int (*finup)(struct ahash_request *); int (*digest)(struct ahash_request *); int (*export)(struct ahash_request *, void *); int (*import)(struct ahash_request *, const void *); int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); int (*init_tfm)(struct crypto_ahash *); void (*exit_tfm)(struct crypto_ahash *); int (*clone_tfm)(struct crypto_ahash *, struct crypto_ahash *); struct hash_alg_common halg; }; struct ahash_request { struct crypto_async_request base; unsigned int nbytes; struct scatterlist *src; u8 *result; void *priv; void *__ctx[0]; }; struct crypto_ahash { int (*init)(struct ahash_request *); int (*update)(struct ahash_request *); int (*final)(struct ahash_request *); int (*finup)(struct ahash_request *); int (*digest)(struct ahash_request *); int (*export)(struct ahash_request *, void *); int (*import)(struct ahash_request *, const void *); int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); unsigned int statesize; unsigned int reqsize; struct crypto_tfm base; }; struct ahash_engine_alg { struct ahash_alg base; struct crypto_engine_op op; }; struct akcipher_request; struct crypto_akcipher; struct akcipher_alg { int (*sign)(struct akcipher_request *); int (*verify)(struct akcipher_request *); int (*encrypt)(struct akcipher_request *); int (*decrypt)(struct akcipher_request *); int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int); int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int); unsigned int (*max_size)(struct crypto_akcipher *); int (*init)(struct crypto_akcipher *); void (*exit)(struct crypto_akcipher *); struct crypto_alg base; }; struct akcipher_request { struct crypto_async_request base; struct scatterlist *src; struct scatterlist *dst; unsigned int src_len; unsigned int dst_len; void *__ctx[0]; }; struct crypto_akcipher { unsigned int reqsize; struct crypto_tfm base; }; struct crypto_kpp; struct kpp_request; struct kpp_alg { int (*set_secret)(struct crypto_kpp *, const void *, unsigned int); int (*generate_public_key)(struct kpp_request *); int (*compute_shared_secret)(struct kpp_request *); unsigned int (*max_size)(struct crypto_kpp *); int (*init)(struct crypto_kpp *); void (*exit)(struct crypto_kpp *); struct crypto_alg base; }; struct crypto_kpp { unsigned int reqsize; struct crypto_tfm base; }; struct kpp_request { struct crypto_async_request base; struct scatterlist *src; struct scatterlist *dst; unsigned int src_len; unsigned int dst_len; void *__ctx[0]; }; struct skcipher_engine_alg { struct skcipher_alg base; struct crypto_engine_op op; }; struct akcipher_engine_alg { struct akcipher_alg base; struct crypto_engine_op op; }; struct kpp_engine_alg { struct kpp_alg base; struct crypto_engine_op op; }; enum { CRYPTOA_UNSPEC = 0, CRYPTOA_ALG = 1, CRYPTOA_TYPE = 2, __CRYPTOA_MAX = 3, }; struct rtattr { unsigned short rta_len; unsigned short rta_type; }; struct crypto_attr_type { u32 type; u32 mask; }; struct crypto_attr_alg { char name[128]; }; struct aead_instance { void (*free)(struct aead_instance *); union { struct { char head[64]; struct crypto_instance base; } s; struct aead_alg alg; }; }; struct crypto_istat_aead { atomic64_t encrypt_cnt; atomic64_t encrypt_tlen; atomic64_t decrypt_cnt; atomic64_t decrypt_tlen; atomic64_t err_cnt; }; struct crypto_aead_spawn { struct crypto_spawn base; }; struct crypto_rng; struct rng_alg { int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int); int (*seed)(struct crypto_rng *, const u8 *, unsigned int); void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int); unsigned int seedsize; struct crypto_alg base; }; struct crypto_rng { struct crypto_tfm base; }; struct crypto_sync_skcipher; struct aead_geniv_ctx { spinlock_t lock; struct crypto_aead *child; struct crypto_sync_skcipher *sknull; u8 salt[0]; }; enum { SKCIPHER_WALK_PHYS = 1, SKCIPHER_WALK_SLOW = 2, SKCIPHER_WALK_COPY = 4, SKCIPHER_WALK_DIFF = 8, SKCIPHER_WALK_SLEEP = 16, }; struct skcipher_walk_buffer { struct list_head entry; struct scatter_walk dst; unsigned int len; u8 *data; u8 buffer[0]; }; struct crypto_sync_skcipher { struct crypto_skcipher base; }; struct skcipher_instance { void (*free)(struct skcipher_instance *); union { struct { char head[64]; struct crypto_instance base; } s; struct skcipher_alg alg; }; }; struct crypto_istat_cipher { atomic64_t encrypt_cnt; atomic64_t encrypt_tlen; atomic64_t decrypt_cnt; atomic64_t decrypt_tlen; atomic64_t err_cnt; }; struct crypto_cipher_spawn { struct crypto_spawn base; }; struct skcipher_ctx_simple { struct crypto_cipher *cipher; }; struct crypto_skcipher_spawn { struct crypto_spawn base; }; struct ahash_instance { void (*free)(struct ahash_instance *); union { struct { char head[96]; struct crypto_instance base; } s; struct ahash_alg alg; }; }; struct crypto_hash_walk { char *data; unsigned int offset; unsigned int alignmask; struct page *pg; unsigned int entrylen; unsigned int total; struct scatterlist *sg; unsigned int flags; }; struct crypto_ahash_spawn { struct crypto_spawn base; }; struct shash_instance { void (*free)(struct shash_instance *); union { struct { char head[120]; struct crypto_instance base; } s; struct shash_alg alg; }; }; struct crypto_istat_hash { atomic64_t hash_cnt; atomic64_t hash_tlen; atomic64_t err_cnt; }; struct crypto_shash_spawn { struct crypto_spawn base; }; struct akcipher_instance { void (*free)(struct akcipher_instance *); union { struct { char head[72]; struct crypto_instance base; } s; struct akcipher_alg alg; }; }; struct crypto_istat_akcipher { atomic64_t encrypt_cnt; atomic64_t encrypt_tlen; atomic64_t decrypt_cnt; atomic64_t decrypt_tlen; atomic64_t verify_cnt; atomic64_t sign_cnt; atomic64_t err_cnt; }; struct crypto_akcipher_sync_data { struct crypto_akcipher *tfm; const void *src; void *dst; unsigned int slen; unsigned int dlen; struct akcipher_request *req; struct crypto_wait cwait; struct scatterlist sg; u8 *buf; }; struct crypto_akcipher_spawn { struct crypto_spawn base; }; struct crypto_sig { struct crypto_tfm base; }; struct kpp_instance { void (*free)(struct kpp_instance *); union { struct { char head[48]; struct crypto_instance base; } s; struct kpp_alg alg; }; }; struct crypto_istat_kpp { atomic64_t setsecret_cnt; atomic64_t generate_public_key_cnt; atomic64_t compute_shared_secret_cnt; atomic64_t err_cnt; }; struct crypto_kpp_spawn { struct crypto_spawn base; }; typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t); struct asn1_decoder { const unsigned char *machine; size_t machlen; const asn1_action_t *actions; }; struct gcry_mpi; typedef struct gcry_mpi *MPI; struct rsa_mpi_key { MPI n; MPI e; MPI d; MPI p; MPI q; MPI dp; MPI dq; MPI qinv; }; typedef unsigned long mpi_limb_t; struct gcry_mpi { int alloced; int nlimbs; int nbits; int sign; unsigned int flags; mpi_limb_t *d; }; struct rsa_key { const u8 *n; const u8 *e; const u8 *d; const u8 *p; const u8 *q; const u8 *dp; const u8 *dq; const u8 *qinv; size_t n_sz; size_t e_sz; size_t d_sz; size_t p_sz; size_t q_sz; size_t dp_sz; size_t dq_sz; size_t qinv_sz; }; struct rsa_asn1_template { const char *name; const u8 *data; size_t size; }; struct pkcs1pad_inst_ctx { struct crypto_akcipher_spawn spawn; const struct rsa_asn1_template *digest_info; }; struct pkcs1pad_ctx { struct crypto_akcipher *child; unsigned int key_size; }; struct pkcs1pad_request { struct scatterlist in_sg[2]; struct scatterlist out_sg[1]; uint8_t *in_buf; uint8_t *out_buf; struct akcipher_request child_req; }; struct acomp_req; struct crypto_acomp { int (*compress)(struct acomp_req *); int (*decompress)(struct acomp_req *); void (*dst_free)(struct scatterlist *); unsigned int reqsize; struct crypto_tfm base; }; struct acomp_req { struct crypto_async_request base; struct scatterlist *src; struct scatterlist *dst; unsigned int slen; unsigned int dlen; u32 flags; void *__ctx[0]; }; struct comp_alg_common { struct crypto_alg base; }; struct acomp_alg { int (*compress)(struct acomp_req *); int (*decompress)(struct acomp_req *); void (*dst_free)(struct scatterlist *); int (*init)(struct crypto_acomp *); void (*exit)(struct crypto_acomp *); unsigned int reqsize; union { struct { struct crypto_alg base; }; struct comp_alg_common calg; }; }; struct crypto_istat_compress { atomic64_t compress_cnt; atomic64_t compress_tlen; atomic64_t decompress_cnt; atomic64_t decompress_tlen; atomic64_t err_cnt; }; struct scomp_scratch { spinlock_t lock; void *src; void *dst; }; struct crypto_scomp; struct scomp_alg { void * (*alloc_ctx)(struct crypto_scomp *); void (*free_ctx)(struct crypto_scomp *, void *); int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); union { struct { struct crypto_alg base; }; struct comp_alg_common calg; }; }; struct crypto_scomp { struct crypto_tfm base; }; struct cryptomgr_param { struct rtattr *tb[34]; struct { struct rtattr attr; struct crypto_attr_type data; } type; struct { struct rtattr attr; struct crypto_attr_alg data; } attrs[32]; char template[128]; struct crypto_larval *larval; u32 otype; u32 omask; }; struct cmac_desc_ctx { unsigned int len; u8 ctx[0]; }; struct cmac_tfm_ctx { struct crypto_cipher *child; u8 ctx[0]; }; struct hmac_ctx { struct crypto_shash *hash; }; typedef u32 u_int32_t; struct xcbc_tfm_ctx { struct crypto_cipher *child; u8 ctx[0]; }; struct xcbc_desc_ctx { unsigned int len; u8 ctx[0]; }; struct md5_state { u32 hash[4]; u32 block[16]; u64 byte_count; }; struct sha1_state; typedef void sha1_block_fn(struct sha1_state *, const u8 *, int); struct sha1_state { u32 state[5]; u64 count; u8 buffer[64]; }; struct sha3_state { u64 st[25]; unsigned int rsiz; unsigned int rsizw; unsigned int partial; u8 buf[144]; }; enum blake2b_lengths { BLAKE2B_BLOCK_SIZE = 128, BLAKE2B_HASH_SIZE = 64, BLAKE2B_KEY_SIZE = 64, BLAKE2B_160_HASH_SIZE = 20, BLAKE2B_256_HASH_SIZE = 32, BLAKE2B_384_HASH_SIZE = 48, BLAKE2B_512_HASH_SIZE = 64, }; enum blake2b_iv { BLAKE2B_IV0 = 7640891576956012808ULL, BLAKE2B_IV1 = 13503953896175478587ULL, BLAKE2B_IV2 = 4354685564936845355ULL, BLAKE2B_IV3 = 11912009170470909681ULL, BLAKE2B_IV4 = 5840696475078001361ULL, BLAKE2B_IV5 = 11170449401992604703ULL, BLAKE2B_IV6 = 2270897969802886507ULL, BLAKE2B_IV7 = 6620516959819538809ULL, }; struct blake2b_state { u64 h[8]; u64 t[2]; u64 f[2]; u8 buf[128]; unsigned int buflen; unsigned int outlen; }; typedef void (*blake2b_compress_t)(struct blake2b_state *, const u8 *, size_t, u32); struct blake2b_tfm_ctx { u8 key[64]; unsigned int keylen; }; struct crypto_cts_reqctx { struct scatterlist sg[2]; unsigned int offset; struct skcipher_request subreq; }; struct crypto_cts_ctx { struct crypto_skcipher *child; }; struct xts_instance_ctx { struct crypto_skcipher_spawn spawn; struct crypto_cipher_spawn tweak_spawn; }; typedef struct { __le64 b; __le64 a; } le128; struct xts_request_ctx { le128 t; struct scatterlist *tail; struct scatterlist sg[2]; struct skcipher_request subreq; }; struct xts_tfm_ctx { struct crypto_skcipher *child; struct crypto_cipher *tweak; }; struct crypto_rfc3686_ctx { struct crypto_skcipher *child; u8 nonce[4]; }; struct crypto_rfc3686_req_ctx { u8 iv[16]; struct skcipher_request subreq; }; struct hctr2_instance_ctx { struct crypto_cipher_spawn blockcipher_spawn; struct crypto_skcipher_spawn xctr_spawn; struct crypto_shash_spawn polyval_spawn; }; struct hctr2_tfm_ctx { struct crypto_cipher *blockcipher; struct crypto_skcipher *xctr; struct crypto_shash *polyval; u8 L[16]; int hashed_tweak_offset; }; struct hctr2_request_ctx { u8 first_block[16]; u8 xctr_iv[16]; struct scatterlist *bulk_part_dst; struct scatterlist *bulk_part_src; struct scatterlist sg_src[2]; struct scatterlist sg_dst[2]; union { struct shash_desc hash_desc; struct skcipher_request xctr_req; } u; }; struct sg_page_iter { struct scatterlist *sg; unsigned int sg_pgoffset; unsigned int __nents; int __pg_advance; }; struct sg_mapping_iter { struct page *page; void *addr; size_t length; size_t consumed; struct sg_page_iter piter; unsigned int __offset; unsigned int __remaining; unsigned int __flags; }; struct adiantum_instance_ctx { struct crypto_skcipher_spawn streamcipher_spawn; struct crypto_cipher_spawn blockcipher_spawn; struct crypto_shash_spawn hash_spawn; }; struct adiantum_request_ctx { union { u8 bytes[32]; __le32 words[8]; le128 bignum; } rbuf; bool enc; le128 header_hash; union { struct shash_desc hash_desc; struct skcipher_request streamcipher_req; } u; }; struct adiantum_tfm_ctx { struct crypto_skcipher *streamcipher; struct crypto_cipher *blockcipher; struct crypto_shash *hash; struct poly1305_core_key header_hash_key; }; struct nhpoly1305_state { struct poly1305_state poly_state; u8 buffer[16]; unsigned int buflen; unsigned int nh_remaining; __le64 nh_hash[4]; }; struct nhpoly1305_key { struct poly1305_core_key poly_key; u32 nh_key[268]; }; typedef void (*nh_t)(const u32 *, const u8 *, size_t, __le64 *); struct crypto_rfc4543_instance_ctx { struct crypto_aead_spawn aead; }; struct gcm_instance_ctx { struct crypto_skcipher_spawn ctr; struct crypto_ahash_spawn ghash; }; struct crypto_gcm_ghash_ctx { unsigned int cryptlen; struct scatterlist *src; int (*complete)(struct aead_request *, u32); }; struct crypto_gcm_req_priv_ctx { u8 iv[16]; u8 auth_tag[16]; u8 iauth_tag[16]; struct scatterlist src[3]; struct scatterlist dst[3]; struct scatterlist sg; struct crypto_gcm_ghash_ctx ghash_ctx; union { struct ahash_request ahreq; struct skcipher_request skreq; } u; }; struct crypto_gcm_ctx { struct crypto_skcipher *ctr; struct crypto_ahash *ghash; }; struct crypto_rfc4543_ctx { struct crypto_aead *child; struct crypto_sync_skcipher *null; u8 nonce[4]; }; struct crypto_rfc4106_ctx { struct crypto_aead *child; u8 nonce[4]; }; struct crypto_rfc4106_req_ctx { struct scatterlist src[3]; struct scatterlist dst[3]; struct aead_request subreq; }; struct crypto_rfc4543_req_ctx { struct aead_request subreq; }; struct ccm_instance_ctx { struct crypto_skcipher_spawn ctr; struct crypto_ahash_spawn mac; }; struct crypto_ccm_req_priv_ctx { u8 odata[16]; u8 idata[16]; u8 auth_tag[16]; u32 flags; struct scatterlist src[3]; struct scatterlist dst[3]; union { struct ahash_request ahreq; struct skcipher_request skreq; }; }; struct cbcmac_tfm_ctx { struct crypto_cipher *child; }; struct cbcmac_desc_ctx { unsigned int len; }; struct crypto_ccm_ctx { struct crypto_ahash *mac; struct crypto_skcipher *ctr; }; struct crypto_rfc4309_ctx { struct crypto_aead *child; u8 nonce[3]; }; struct crypto_rfc4309_req_ctx { struct scatterlist src[3]; struct scatterlist dst[3]; struct aead_request subreq; }; struct chachapoly_instance_ctx { struct crypto_skcipher_spawn chacha; struct crypto_ahash_spawn poly; unsigned int saltlen; }; struct chacha_req { u8 iv[16]; struct scatterlist src[1]; struct skcipher_request req; }; struct poly_req { u8 pad[16]; struct { __le64 assoclen; __le64 cryptlen; } tail; struct scatterlist src[1]; struct ahash_request req; }; struct chachapoly_req_ctx { struct scatterlist src[2]; struct scatterlist dst[2]; u8 key[32]; u8 tag[16]; unsigned int cryptlen; unsigned int assoclen; u32 flags; union { struct poly_req poly; struct chacha_req chacha; } u; }; struct chachapoly_ctx { struct crypto_skcipher *chacha; struct crypto_ahash *poly; unsigned int saltlen; u8 salt[0]; }; struct des_ctx { u32 expkey[32]; }; struct des3_ede_ctx { u32 expkey[96]; }; struct deflate_ctx { struct z_stream_s comp_stream; struct z_stream_s decomp_stream; }; struct chksum_desc_ctx { u32 crc; }; struct chksum_ctx { u32 key; }; enum { CRYPTO_AUTHENC_KEYA_UNSPEC = 0, CRYPTO_AUTHENC_KEYA_PARAM = 1, }; struct authenc_instance_ctx { struct crypto_ahash_spawn auth; struct crypto_skcipher_spawn enc; unsigned int reqoff; }; struct crypto_authenc_keys { const u8 *authkey; const u8 *enckey; unsigned int authkeylen; unsigned int enckeylen; }; struct crypto_authenc_key_param { __be32 enckeylen; }; struct crypto_authenc_ctx { struct crypto_ahash *auth; struct crypto_skcipher *enc; struct crypto_sync_skcipher *null; }; struct authenc_request_ctx { struct scatterlist src[2]; struct scatterlist dst[2]; char tail[0]; }; struct authenc_esn_instance_ctx { struct crypto_ahash_spawn auth; struct crypto_skcipher_spawn enc; }; struct crypto_authenc_esn_ctx { unsigned int reqoff; struct crypto_ahash *auth; struct crypto_skcipher *enc; struct crypto_sync_skcipher *null; }; struct authenc_esn_request_ctx { struct scatterlist src[2]; struct scatterlist dst[2]; char tail[0]; }; struct lzo_ctx { void *lzo_comp_mem; }; struct lzorle_ctx { void *lzorle_comp_mem; }; struct lz4_ctx { void *lz4_comp_mem; }; struct crypto_istat_rng { atomic64_t generate_cnt; atomic64_t generate_tlen; atomic64_t seed_cnt; atomic64_t err_cnt; }; struct prng_context { spinlock_t prng_lock; unsigned char rand_data[16]; unsigned char last_rand_data[16]; unsigned char DT[16]; unsigned char I[16]; unsigned char V[16]; u32 rand_data_valid; struct crypto_cipher *tfm; u32 flags; }; struct drbg_state; struct drbg_state_ops { int (*update)(struct drbg_state *, struct list_head *, int); int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *); int (*crypto_init)(struct drbg_state *); int (*crypto_fini)(struct drbg_state *); }; enum drbg_seed_state { DRBG_SEED_STATE_UNSEEDED = 0, DRBG_SEED_STATE_PARTIAL = 1, DRBG_SEED_STATE_FULL = 2, }; struct drbg_string { const unsigned char *buf; size_t len; struct list_head list; }; struct drbg_core; struct drbg_state { struct mutex drbg_mutex; unsigned char *V; unsigned char *Vbuf; unsigned char *C; unsigned char *Cbuf; size_t reseed_ctr; size_t reseed_threshold; unsigned char *scratchpad; unsigned char *scratchpadbuf; void *priv_data; struct crypto_skcipher *ctr_handle; struct skcipher_request *ctr_req; __u8 *outscratchpadbuf; __u8 *outscratchpad; struct crypto_wait ctr_wait; struct scatterlist sg_in; struct scatterlist sg_out; enum drbg_seed_state seeded; unsigned long last_seed_time; bool pr; bool fips_primed; unsigned char *prev; struct crypto_rng *jent; const struct drbg_state_ops *d_ops; const struct drbg_core *core; struct drbg_string test_data; }; typedef uint32_t drbg_flag_t; struct drbg_core { drbg_flag_t flags; __u8 statelen; __u8 blocklen_bytes; char cra_name[128]; char backend_cra_name[128]; }; enum drbg_prefixes { DRBG_PREFIX0 = 0, DRBG_PREFIX1 = 1, DRBG_PREFIX2 = 2, DRBG_PREFIX3 = 3, }; struct sdesc { struct shash_desc shash; char ctx[0]; }; typedef unsigned char u8___2; struct rand_data { void *hash_state; __u64 prev_time; __u64 last_delta; __s64 last_delta2; unsigned int osr; unsigned char *mem; unsigned int memlocation; unsigned int memblocks; unsigned int memblocksize; unsigned int memaccessloops; unsigned int rct_count; unsigned int apt_observations; unsigned int apt_count; unsigned int apt_base; unsigned int apt_base_set: 1; }; struct jitterentropy { spinlock_t jent_lock; struct rand_data *entropy_collector; struct crypto_shash *tfm; struct shash_desc *sdesc; }; struct gf128mul_4k { be128 t[256]; }; struct ghash_ctx { struct gf128mul_4k *gf128; }; struct ghash_desc_ctx___2 { u8 buffer[16]; u32 bytes; }; struct polyval_desc_ctx___2 { union { u8 buffer[16]; be128 buffer128; }; u32 bytes; }; struct polyval_tfm_ctx___2 { struct gf128mul_4k *gf128; }; typedef ZSTD_CCtx zstd_cctx; typedef ZSTD_DCtx zstd_dctx; struct zstd_ctx { zstd_cctx *cctx; zstd_dctx *dctx; void *cwksp; void *dwksp; }; struct ecc_point { u64 *x; u64 *y; u8 ndigits; }; struct ecc_curve { char *name; struct ecc_point g; u64 *p; u64 *n; u64 *a; u64 *b; }; typedef struct { u64 m_low; u64 m_high; } uint128_t; struct essiv_tfm_ctx { union { struct crypto_skcipher *skcipher; struct crypto_aead *aead; } u; struct crypto_cipher *essiv_cipher; struct crypto_shash *hash; int ivoffset; }; struct essiv_instance_ctx { union { struct crypto_skcipher_spawn skcipher_spawn; struct crypto_aead_spawn aead_spawn; } u; char essiv_cipher_name[128]; char shash_driver_name[128]; }; struct essiv_aead_request_ctx { struct scatterlist sg[4]; u8 *assoc; struct aead_request aead_req; }; struct ecdh { char *key; unsigned short key_size; }; struct ecdh_ctx { unsigned int curve_id; unsigned int ndigits; u64 private_key[8]; }; enum { CRYPTO_KPP_SECRET_TYPE_UNKNOWN = 0, CRYPTO_KPP_SECRET_TYPE_DH = 1, CRYPTO_KPP_SECRET_TYPE_ECDH = 2, }; struct kpp_secret { unsigned short type; unsigned short len; }; enum asymmetric_payload_bits { asym_crypto = 0, asym_subtype = 1, asym_key_ids = 2, asym_auth = 3, }; struct asymmetric_key_parser { struct list_head link; struct module *owner; const char *name; int (*parse)(struct key_preparsed_payload *); }; struct asymmetric_key_ids { void *id[3]; }; struct asymmetric_key_id { unsigned short len; unsigned char data[0]; }; struct public_key_signature; struct asymmetric_key_subtype { struct module *owner; const char *name; unsigned short name_len; void (*describe)(const struct key *, struct seq_file *); void (*destroy)(void *, void *); int (*query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); int (*eds_op)(struct kernel_pkey_params *, const void *, void *); int (*verify_signature)(const struct key *, const struct public_key_signature *); }; struct public_key_signature { struct asymmetric_key_id *auth_ids[3]; u8 *s; u8 *digest; u32 s_size; u32 digest_size; const char *pkey_algo; const char *hash_algo; const char *encoding; }; struct public_key { void *key; u32 keylen; enum OID algo; void *params; u32 paramlen; bool key_is_private; const char *id_type; const char *pkey_algo; unsigned long key_eflags; }; enum asn1_tag { ASN1_EOC = 0, ASN1_BOOL = 1, ASN1_INT = 2, ASN1_BTS = 3, ASN1_OTS = 4, ASN1_NULL = 5, ASN1_OID = 6, ASN1_ODE = 7, ASN1_EXT = 8, ASN1_REAL = 9, ASN1_ENUM = 10, ASN1_EPDV = 11, ASN1_UTF8STR = 12, ASN1_RELOID = 13, ASN1_SEQ = 16, ASN1_SET = 17, ASN1_NUMSTR = 18, ASN1_PRNSTR = 19, ASN1_TEXSTR = 20, ASN1_VIDSTR = 21, ASN1_IA5STR = 22, ASN1_UNITIM = 23, ASN1_GENTIM = 24, ASN1_GRASTR = 25, ASN1_VISSTR = 26, ASN1_GENSTR = 27, ASN1_UNISTR = 28, ASN1_CHRSTR = 29, ASN1_BMPSTR = 30, ASN1_LONG_TAG = 31, }; struct x509_certificate { struct x509_certificate *next; struct x509_certificate *signer; struct public_key *pub; struct public_key_signature *sig; char *issuer; char *subject; struct asymmetric_key_id *id; struct asymmetric_key_id *skid; time64_t valid_from; time64_t valid_to; const void *tbs; unsigned int tbs_size; unsigned int raw_sig_size; const void *raw_sig; const void *raw_serial; unsigned int raw_serial_size; unsigned int raw_issuer_size; const void *raw_issuer; const void *raw_subject; unsigned int raw_subject_size; unsigned int raw_skid_size; const void *raw_skid; unsigned int index; bool seen; bool verified; bool self_signed; bool unsupported_sig; bool blacklisted; }; struct x509_parse_context { struct x509_certificate *cert; unsigned long data; const void *key; size_t key_size; const void *params; size_t params_size; enum OID key_algo; enum OID last_oid; enum OID sig_algo; u8 o_size; u8 cn_size; u8 email_size; u16 o_offset; u16 cn_offset; u16 email_offset; unsigned int raw_akid_size; const void *raw_akid; const void *akid_raw_issuer; unsigned int akid_raw_issuer_size; }; enum blacklist_hash_type { BLACKLIST_HASH_X509_TBS = 1, BLACKLIST_HASH_BINARY = 2, }; enum asn1_class { ASN1_UNIV = 0, ASN1_APPL = 1, ASN1_CONT = 2, ASN1_PRIV = 3, }; struct pkcs7_signed_info { struct pkcs7_signed_info *next; struct x509_certificate *signer; unsigned int index; bool unsupported_crypto; bool blacklisted; const void *msgdigest; unsigned int msgdigest_len; unsigned int authattrs_len; const void *authattrs; unsigned long aa_set; time64_t signing_time; struct public_key_signature *sig; }; struct pkcs7_parse_context { struct pkcs7_message *msg; struct pkcs7_signed_info *sinfo; struct pkcs7_signed_info **ppsinfo; struct x509_certificate *certs; struct x509_certificate **ppcerts; unsigned long data; enum OID last_oid; unsigned int x509_index; unsigned int sinfo_index; const void *raw_serial; unsigned int raw_serial_size; unsigned int raw_issuer_size; const void *raw_issuer; const void *raw_skid; unsigned int raw_skid_size; bool expect_skid; }; enum { DISK_EVENT_FLAG_POLL = 1, DISK_EVENT_FLAG_UEVENT = 2, DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 4, }; enum { DISK_EVENT_MEDIA_CHANGE = 1, DISK_EVENT_EJECT_REQUEST = 2, }; struct bdev_inode { struct block_device bdev; struct inode vfs_inode; }; enum { DIO_SHOULD_DIRTY = 1, DIO_IS_SYNC = 2, }; struct blkdev_dio { union { struct kiocb *iocb; struct task_struct *waiter; }; size_t size; atomic_t ref; unsigned int flags; long: 64; long: 64; long: 64; long: 64; long: 64; struct bio bio; long: 64; long: 64; long: 64; long: 64; }; enum rq_qos_id { RQ_QOS_WBT = 0, RQ_QOS_LATENCY = 1, RQ_QOS_COST = 2, }; struct rq_qos_ops; struct rq_qos { const struct rq_qos_ops *ops; struct gendisk *disk; enum rq_qos_id id; struct rq_qos *next; struct dentry *debugfs_dir; }; struct blk_mq_debugfs_attr; struct rq_qos_ops { void (*throttle)(struct rq_qos *, struct bio *); void (*track)(struct rq_qos *, struct request *, struct bio *); void (*merge)(struct rq_qos *, struct request *, struct bio *); void (*issue)(struct rq_qos *, struct request *); void (*requeue)(struct rq_qos *, struct request *); void (*done)(struct rq_qos *, struct request *); void (*done_bio)(struct rq_qos *, struct bio *); void (*cleanup)(struct rq_qos *, struct bio *); void (*queue_depth_changed)(struct rq_qos *); void (*exit)(struct rq_qos *); const struct blk_mq_debugfs_attr *debugfs_attrs; }; struct blk_mq_debugfs_attr { const char *name; umode_t mode; int (*show)(void *, struct seq_file *); ssize_t (*write)(void *, const char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); const struct seq_operations *seq_ops; }; struct blkg_iostat { u64 bytes[3]; u64 ios[3]; }; struct blkg_iostat_set { struct u64_stats_sync sync; struct blkcg_gq *blkg; struct llist_node lnode; int lqueued; struct blkg_iostat cur; struct blkg_iostat last; }; struct blkcg; struct blkg_policy_data; struct blkcg_gq { struct request_queue *q; struct list_head q_node; struct hlist_node blkcg_node; struct blkcg *blkcg; struct blkcg_gq *parent; struct percpu_ref refcnt; bool online; struct blkg_iostat_set __attribute__((btf_type_tag("percpu"))) *iostat_cpu; struct blkg_iostat_set iostat; struct blkg_policy_data *pd[6]; union { struct work_struct async_bio_work; struct work_struct free_work; }; atomic_t use_delay; atomic64_t delay_nsec; atomic64_t delay_start; u64 last_delay; int last_use; struct callback_head callback_head; u64 android_oem_data1; }; struct blkcg_policy_data; struct blkcg { struct cgroup_subsys_state css; spinlock_t lock; refcount_t online_pin; struct xarray blkg_tree; struct blkcg_gq __attribute__((btf_type_tag("rcu"))) *blkg_hint; struct hlist_head blkg_list; struct blkcg_policy_data *cpd[6]; struct list_head all_blkcgs_node; struct llist_head __attribute__((btf_type_tag("percpu"))) *lhead; struct list_head cgwb_list; u64 android_oem_data1; }; struct blkcg_policy_data { struct blkcg *blkcg; int plid; }; struct blkg_policy_data { struct blkcg_gq *blkg; int plid; bool online; }; struct biovec_slab { int nr_vecs; char *name; struct kmem_cache *slab; }; struct bio_slab { struct kmem_cache *slab; unsigned int slab_ref; unsigned int slab_size; char name[8]; }; struct elevator_type; struct elevator_queue { struct elevator_type *type; void *elevator_data; struct kobject kobj; struct mutex sysfs_lock; unsigned long flags; struct hlist_head hash[64]; }; enum elv_merge { ELEVATOR_NO_MERGE = 0, ELEVATOR_FRONT_MERGE = 1, ELEVATOR_BACK_MERGE = 2, ELEVATOR_DISCARD_MERGE = 3, }; typedef unsigned int blk_insert_t; struct blk_mq_alloc_data; struct elevator_mq_ops { int (*init_sched)(struct request_queue *, struct elevator_type *); void (*exit_sched)(struct elevator_queue *); int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); void (*depth_updated)(struct blk_mq_hw_ctx *); bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); int (*request_merge)(struct request_queue *, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *); void (*prepare_request)(struct request *); void (*finish_request)(struct request *); void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, blk_insert_t); struct request * (*dispatch_request)(struct blk_mq_hw_ctx *); bool (*has_work)(struct blk_mq_hw_ctx *); void (*completed_request)(struct request *, u64); void (*requeue_request)(struct request *); struct request * (*former_request)(struct request_queue *, struct request *); struct request * (*next_request)(struct request_queue *, struct request *); void (*init_icq)(struct io_cq *); void (*exit_icq)(struct io_cq *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct elv_fs_entry; struct elevator_type { struct kmem_cache *icq_cache; struct elevator_mq_ops ops; size_t icq_size; size_t icq_align; struct elv_fs_entry *elevator_attrs; const char *elevator_name; const char *elevator_alias; const unsigned int elevator_features; struct module *elevator_owner; const struct blk_mq_debugfs_attr *queue_debugfs_attrs; const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; char icq_cache_name[22]; struct list_head list; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct blk_mq_ctxs { struct kobject kobj; struct blk_mq_ctx __attribute__((btf_type_tag("percpu"))) *queue_ctx; }; typedef __u32 blk_mq_req_flags_t; struct blk_mq_alloc_data { struct request_queue *q; blk_mq_req_flags_t flags; unsigned int shallow_depth; blk_opf_t cmd_flags; req_flags_t rq_flags; unsigned int nr_tags; struct request **cached_rq; struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; }; struct elv_fs_entry { struct attribute attr; ssize_t (*show)(struct elevator_queue *, char *); ssize_t (*store)(struct elevator_queue *, const char *, size_t); }; enum { BLK_MQ_F_SHOULD_MERGE = 1, BLK_MQ_F_TAG_QUEUE_SHARED = 2, BLK_MQ_F_STACKING = 4, BLK_MQ_F_TAG_HCTX_SHARED = 8, BLK_MQ_F_BLOCKING = 32, BLK_MQ_F_NO_SCHED = 64, BLK_MQ_F_NO_SCHED_BY_DEFAULT = 128, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_SCHED_RESTART = 2, BLK_MQ_S_INACTIVE = 3, BLK_MQ_MAX_DEPTH = 10240, BLK_MQ_CPU_WORK_BATCH = 8, }; typedef void (*btf_trace_block_touch_buffer)(void *, struct buffer_head *); typedef void (*btf_trace_block_dirty_buffer)(void *, struct buffer_head *); typedef void (*btf_trace_block_rq_requeue)(void *, struct request *); typedef void (*btf_trace_block_rq_complete)(void *, struct request *, blk_status_t, unsigned int); typedef void (*btf_trace_block_rq_error)(void *, struct request *, blk_status_t, unsigned int); typedef void (*btf_trace_block_rq_insert)(void *, struct request *); typedef void (*btf_trace_block_rq_issue)(void *, struct request *); typedef void (*btf_trace_block_rq_merge)(void *, struct request *); typedef void (*btf_trace_block_io_start)(void *, struct request *); typedef void (*btf_trace_block_io_done)(void *, struct request *); typedef void (*btf_trace_block_bio_complete)(void *, struct request_queue *, struct bio *); typedef void (*btf_trace_block_bio_bounce)(void *, struct bio *); typedef void (*btf_trace_block_bio_backmerge)(void *, struct bio *); typedef void (*btf_trace_block_bio_frontmerge)(void *, struct bio *); typedef void (*btf_trace_block_bio_queue)(void *, struct bio *); typedef void (*btf_trace_block_getrq)(void *, struct bio *); typedef void (*btf_trace_block_plug)(void *, struct request_queue *); typedef void (*btf_trace_block_unplug)(void *, struct request_queue *, unsigned int, bool); typedef void (*btf_trace_block_split)(void *, struct bio *, unsigned int); typedef void (*btf_trace_block_bio_remap)(void *, struct bio *, dev_t, sector_t); typedef void (*btf_trace_block_rq_remap)(void *, struct request *, dev_t, sector_t); enum { BLK_MQ_REQ_NOWAIT = 1, BLK_MQ_REQ_RESERVED = 2, BLK_MQ_REQ_PM = 4, }; enum blkg_rwstat_type { BLKG_RWSTAT_READ = 0, BLKG_RWSTAT_WRITE = 1, BLKG_RWSTAT_SYNC = 2, BLKG_RWSTAT_ASYNC = 3, BLKG_RWSTAT_DISCARD = 4, BLKG_RWSTAT_NR = 5, BLKG_RWSTAT_TOTAL = 5, }; struct blk_plug_cb; typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); struct blk_plug_cb { struct list_head list; blk_plug_cb_fn callback; void *data; }; struct trace_event_raw_block_buffer { struct trace_entry ent; dev_t dev; sector_t sector; size_t size; char __data[0]; }; struct trace_event_raw_block_rq_requeue { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; unsigned short ioprio; char rwbs[10]; u32 __data_loc_cmd; char __data[0]; }; struct trace_event_raw_block_rq_completion { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; int error; unsigned short ioprio; char rwbs[10]; u32 __data_loc_cmd; char __data[0]; }; struct trace_event_raw_block_rq { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; unsigned int bytes; unsigned short ioprio; char rwbs[10]; char comm[16]; u32 __data_loc_cmd; char __data[0]; }; struct trace_event_raw_block_bio_complete { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; int error; char rwbs[10]; char __data[0]; }; struct trace_event_raw_block_bio { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; char rwbs[10]; char comm[16]; char __data[0]; }; struct trace_event_raw_block_plug { struct trace_entry ent; char comm[16]; char __data[0]; }; struct trace_event_raw_block_unplug { struct trace_entry ent; int nr_rq; char comm[16]; char __data[0]; }; struct trace_event_raw_block_split { struct trace_entry ent; dev_t dev; sector_t sector; sector_t new_sector; char rwbs[10]; char comm[16]; char __data[0]; }; struct trace_event_raw_block_bio_remap { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; dev_t old_dev; sector_t old_sector; char rwbs[10]; char __data[0]; }; struct trace_event_raw_block_rq_remap { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; dev_t old_dev; sector_t old_sector; unsigned int nr_bios; char rwbs[10]; char __data[0]; }; struct throtl_service_queue { struct throtl_service_queue *parent_sq; struct list_head queued[2]; unsigned int nr_queued[2]; struct rb_root_cached pending_tree; unsigned int nr_pending; unsigned long first_pending_disptime; struct timer_list pending_timer; }; struct throtl_grp; struct throtl_qnode { struct list_head node; struct bio_list bios; struct throtl_grp *tg; }; struct blkg_rwstat { struct percpu_counter cpu_cnt[5]; atomic64_t aux_cnt[5]; }; struct throtl_grp { struct blkg_policy_data pd; struct rb_node rb_node; struct throtl_data *td; struct throtl_service_queue service_queue; struct throtl_qnode qnode_on_self[2]; struct throtl_qnode qnode_on_parent[2]; unsigned long disptime; unsigned int flags; bool has_rules_bps[2]; bool has_rules_iops[2]; uint64_t bps[4]; uint64_t bps_conf[4]; unsigned int iops[4]; unsigned int iops_conf[4]; uint64_t bytes_disp[2]; unsigned int io_disp[2]; unsigned long last_low_overflow_time[2]; uint64_t last_bytes_disp[2]; unsigned int last_io_disp[2]; long long carryover_bytes[2]; int carryover_ios[2]; unsigned long last_check_time; unsigned long latency_target; unsigned long latency_target_conf; unsigned long slice_start[2]; unsigned long slice_end[2]; unsigned long last_finish_time; unsigned long checked_last_finish_time; unsigned long avg_idletime; unsigned long idletime_threshold; unsigned long idletime_threshold_conf; unsigned int bio_cnt; unsigned int bad_bio_cnt; unsigned long bio_cnt_reset_time; struct blkg_rwstat stat_bytes; struct blkg_rwstat stat_ios; }; typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t); typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *); typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(struct gendisk *, struct blkcg *, gfp_t); typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *); typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *); typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *); typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *); typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *); typedef void blkcg_pol_stat_pd_fn(struct blkg_policy_data *, struct seq_file *); struct blkcg_policy { int plid; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; blkcg_pol_free_cpd_fn *cpd_free_fn; blkcg_pol_alloc_pd_fn *pd_alloc_fn; blkcg_pol_init_pd_fn *pd_init_fn; blkcg_pol_online_pd_fn *pd_online_fn; blkcg_pol_offline_pd_fn *pd_offline_fn; blkcg_pol_free_pd_fn *pd_free_fn; blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; blkcg_pol_stat_pd_fn *pd_stat_fn; }; struct trace_event_data_offsets_block_buffer {}; struct trace_event_data_offsets_block_rq_requeue { u32 cmd; }; struct trace_event_data_offsets_block_rq_completion { u32 cmd; }; struct trace_event_data_offsets_block_rq { u32 cmd; }; struct trace_event_data_offsets_block_bio_complete {}; struct trace_event_data_offsets_block_bio {}; struct trace_event_data_offsets_block_plug {}; struct trace_event_data_offsets_block_unplug {}; struct trace_event_data_offsets_block_split {}; struct trace_event_data_offsets_block_bio_remap {}; struct trace_event_data_offsets_block_rq_remap {}; struct queue_sysfs_entry { struct attribute attr; ssize_t (*show)(struct request_queue *, char *); ssize_t (*store)(struct request_queue *, const char *, size_t); }; enum { REQ_FSEQ_PREFLUSH = 1, REQ_FSEQ_DATA = 2, REQ_FSEQ_POSTFLUSH = 4, REQ_FSEQ_DONE = 8, REQ_FSEQ_ACTIONS = 7, FLUSH_PENDING_TIMEOUT = 1250, }; enum { BLK_MQ_NO_TAG = 4294967295, BLK_MQ_TAG_MIN = 1, BLK_MQ_TAG_MAX = 4294967294, }; enum hctx_type { HCTX_TYPE_DEFAULT = 0, HCTX_TYPE_READ = 1, HCTX_TYPE_POLL = 2, HCTX_MAX_TYPES = 3, }; enum { ICQ_EXITED = 4, ICQ_DESTROYED = 8, }; struct rq_map_data { struct page **pages; unsigned long offset; unsigned short page_order; unsigned short nr_entries; bool null_mapped; bool from_user; }; struct bio_map_data { bool is_our_pages: 1; bool is_null_mapped: 1; struct iov_iter iter; struct iovec iov[0]; }; enum bio_merge_status { BIO_MERGE_OK = 0, BIO_MERGE_NONE = 1, BIO_MERGE_FAILED = 2, }; struct req_iterator { struct bvec_iter iter; struct bio *bio; }; enum prep_dispatch { PREP_DISPATCH_OK = 0, PREP_DISPATCH_NO_TAG = 1, PREP_DISPATCH_NO_BUDGET = 2, }; struct blk_mq_qe_pair { struct list_head node; struct request_queue *q; struct elevator_type *type; }; typedef bool busy_tag_iter_fn(struct request *, void *); typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); struct flush_busy_ctx_data { struct blk_mq_hw_ctx *hctx; struct list_head *list; }; struct dispatch_rq_data { struct blk_mq_hw_ctx *hctx; struct request *rq; }; struct blk_expired_data { bool has_timedout_rq; unsigned long next; unsigned long timeout_start; }; struct rq_iter_data { struct blk_mq_hw_ctx *hctx; bool has_rq; }; struct mq_inflight { struct block_device *part; unsigned int inflight[2]; }; struct blk_rq_wait { struct completion done; blk_status_t ret; }; enum { BLK_MQ_UNIQUE_TAG_BITS = 16, BLK_MQ_UNIQUE_TAG_MASK = 65535, }; struct sbq_wait { struct sbitmap_queue *sbq; struct wait_queue_entry wait; }; struct bt_iter_data { struct blk_mq_hw_ctx *hctx; struct request_queue *q; busy_tag_iter_fn *fn; void *data; bool reserved; }; struct bt_tags_iter_data { struct blk_mq_tags *tags; busy_tag_iter_fn *fn; void *data; unsigned int flags; }; struct blk_rq_stat; struct blk_stat_callback { struct list_head list; struct timer_list timer; struct blk_rq_stat __attribute__((btf_type_tag("percpu"))) *cpu_stat; int (*bucket_fn)(const struct request *); unsigned int buckets; struct blk_rq_stat *stat; void (*timer_fn)(struct blk_stat_callback *); void *data; struct callback_head rcu; }; struct blk_rq_stat { u64 mean; u64 min; u64 max; u32 nr_samples; u64 batch; }; struct blk_queue_stats { struct list_head callbacks; spinlock_t lock; int accounting; }; struct blk_mq_hw_ctx_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_mq_hw_ctx *, char *); }; struct hd_geometry { unsigned char heads; unsigned char sectors; unsigned short cylinders; unsigned long start; }; struct pr_keys { u32 generation; u32 num_keys; u64 keys[0]; }; struct pr_held_reservation { u64 key; u32 generation; enum pr_type type; }; struct blkpg_partition { long long start; long long length; int pno; char devname[64]; char volname[64]; }; typedef u32 compat_caddr_t; struct blkpg_ioctl_arg { int op; int flags; int datalen; void __attribute__((btf_type_tag("user"))) *data; }; struct pr_preempt { __u64 old_key; __u64 new_key; __u32 type; __u32 flags; }; struct pr_clear { __u64 key; __u32 flags; __u32 __pad; }; struct pr_reservation { __u64 key; __u32 type; __u32 flags; }; struct pr_registration { __u64 old_key; __u64 new_key; __u32 flags; __u32 __pad; }; struct compat_hd_geometry { unsigned char heads; unsigned char sectors; unsigned short cylinders; u32 start; }; struct compat_blkpg_ioctl_arg { compat_int_t op; compat_int_t flags; compat_int_t datalen; compat_caddr_t data; }; struct badblocks { struct device *dev; int count; int unacked_exist; int shift; u64 *page; int changed; seqlock_t lock; sector_t sector; sector_t size; }; struct blk_major_name { struct blk_major_name *next; int major; char name[16]; void (*probe)(dev_t); }; enum { GENHD_FL_REMOVABLE = 1, GENHD_FL_HIDDEN = 2, GENHD_FL_NO_PART = 4, }; struct klist; struct klist_node; struct klist_iter { struct klist *i_klist; struct klist_node *i_cur; }; struct subsys_private; struct class_dev_iter { struct klist_iter ki; const struct device_type *type; struct subsys_private *sp; }; struct klist { spinlock_t k_lock; struct list_head k_list; void (*get)(struct klist_node *); void (*put)(struct klist_node *); }; struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; }; enum { IOPRIO_WHO_PROCESS = 1, IOPRIO_WHO_PGRP = 2, IOPRIO_WHO_USER = 3, }; struct parsed_partitions { struct gendisk *disk; char name[32]; struct { sector_t from; sector_t size; int flags; bool has_info; struct partition_meta_info info; } *parts; int next; int limit; bool access_beyond_eod; char *pp_buf; }; typedef struct { struct folio *v; } Sector; enum msdos_sys_ind { DOS_EXTENDED_PARTITION = 5, LINUX_EXTENDED_PARTITION = 133, WIN98_EXTENDED_PARTITION = 15, LINUX_DATA_PARTITION = 131, LINUX_LVM_PARTITION = 142, LINUX_RAID_PARTITION = 253, SOLARIS_X86_PARTITION = 130, NEW_SOLARIS_X86_PARTITION = 191, DM6_AUX1PARTITION = 81, DM6_AUX3PARTITION = 83, DM6_PARTITION = 84, EZD_PARTITION = 85, FREEBSD_PARTITION = 165, OPENBSD_PARTITION = 166, NETBSD_PARTITION = 169, BSDI_PARTITION = 183, MINIX_PARTITION = 129, UNIXWARE_PARTITION = 99, }; struct msdos_partition { u8 boot_ind; u8 head; u8 sector; u8 cyl; u8 sys_ind; u8 end_head; u8 end_sector; u8 end_cyl; __le32 start_sect; __le32 nr_sects; }; typedef struct { __u8 b[16]; } guid_t; typedef guid_t efi_guid_t; struct _gpt_header { __le64 signature; __le32 revision; __le32 header_size; __le32 header_crc32; __le32 reserved1; __le64 my_lba; __le64 alternate_lba; __le64 first_usable_lba; __le64 last_usable_lba; efi_guid_t disk_guid; __le64 partition_entry_lba; __le32 num_partition_entries; __le32 sizeof_partition_entry; __le32 partition_entry_array_crc32; } __attribute__((packed)); typedef struct _gpt_header gpt_header; struct _gpt_entry_attributes { u64 required_to_function: 1; u64 reserved: 47; u64 type_guid_specific: 16; }; typedef struct _gpt_entry_attributes gpt_entry_attributes; struct _gpt_entry { efi_guid_t partition_type_guid; efi_guid_t unique_partition_guid; __le64 starting_lba; __le64 ending_lba; gpt_entry_attributes attributes; __le16 partition_name[36]; }; typedef struct _gpt_entry gpt_entry; struct _gpt_mbr_record { u8 boot_indicator; u8 start_head; u8 start_sector; u8 start_track; u8 os_type; u8 end_head; u8 end_sector; u8 end_track; __le32 starting_lba; __le32 size_in_lba; }; typedef struct _gpt_mbr_record gpt_mbr_record; struct _legacy_mbr { u8 boot_code[440]; __le32 unique_mbr_signature; __le16 unknown; gpt_mbr_record partition_record[4]; __le16 signature; } __attribute__((packed)); typedef struct _legacy_mbr legacy_mbr; struct rq_wait; typedef bool acquire_inflight_cb_t(struct rq_wait *, void *); struct rq_qos_wait_data { struct wait_queue_entry wq; struct task_struct *task; struct rq_wait *rqw; acquire_inflight_cb_t *cb; void *private_data; bool got_token; }; struct rq_wait { wait_queue_head_t wait; atomic_t inflight; }; struct rq_depth { unsigned int max_depth; int scale_step; bool scaled_max; unsigned int queue_depth; unsigned int default_depth; }; typedef void cleanup_cb_t(struct rq_wait *, void *); struct disk_events { struct list_head node; struct gendisk *disk; spinlock_t lock; struct mutex block_mutex; int block; unsigned int pending; unsigned int clearing; long poll_msecs; struct delayed_work dwork; }; struct blk_ia_range_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_independent_access_range *, char *); }; struct uuidcmp { const char *uuid; int len; }; struct sg_io_v4; typedef int bsg_sg_io_fn(struct request_queue *, struct sg_io_v4 *, bool, unsigned int); struct bsg_device { struct request_queue *queue; struct device device; struct cdev cdev; int max_queue; unsigned int timeout; unsigned int reserved_size; bsg_sg_io_fn *sg_io_fn; }; struct sg_io_v4 { __s32 guard; __u32 protocol; __u32 subprotocol; __u32 request_len; __u64 request; __u64 request_tag; __u32 request_attr; __u32 request_priority; __u32 request_extra; __u32 max_response_len; __u64 response; __u32 dout_iovec_count; __u32 dout_xfer_len; __u32 din_iovec_count; __u32 din_xfer_len; __u64 dout_xferp; __u64 din_xferp; __u32 timeout; __u32 flags; __u64 usr_ptr; __u32 spare_in; __u32 driver_status; __u32 transport_status; __u32 device_status; __u32 retry_delay; __u32 info; __u32 duration; __u32 response_len; __s32 din_resid; __s32 dout_resid; __u64 generated_tag; __u32 spare_out; __u32 padding; }; struct bsg_job; typedef int bsg_job_fn(struct bsg_job *); typedef enum blk_eh_timer_return bsg_timeout_fn(struct request *); struct bsg_set { struct blk_mq_tag_set tag_set; struct bsg_device *bd; bsg_job_fn *job_fn; bsg_timeout_fn *timeout_fn; }; struct bsg_buffer { unsigned int payload_len; int sg_cnt; struct scatterlist *sg_list; }; struct bsg_job { struct device *dev; struct kref kref; unsigned int timeout; void *request; void *reply; unsigned int request_len; unsigned int reply_len; struct bsg_buffer request_payload; struct bsg_buffer reply_payload; int result; unsigned int reply_payload_rcv_len; struct request *bidi_rq; struct bio *bidi_bio; void *dd_data; }; enum blkg_iostat_type { BLKG_IOSTAT_READ = 0, BLKG_IOSTAT_WRITE = 1, BLKG_IOSTAT_DISCARD = 2, BLKG_IOSTAT_NR = 3, }; struct blkg_conf_ctx { char *input; char *body; struct block_device *bdev; struct blkcg_gq *blkg; }; struct blkg_rwstat_sample { u64 cnt[5]; }; struct latency_bucket { unsigned long total_latency; int samples; }; struct avg_latency_bucket { unsigned long latency; bool valid; }; struct throtl_data { struct throtl_service_queue service_queue; struct request_queue *queue; unsigned int nr_queued[2]; unsigned int throtl_slice; struct work_struct dispatch_work; unsigned int limit_index; bool limit_valid[2]; unsigned long low_upgrade_time; unsigned long low_downgrade_time; unsigned int scale; struct latency_bucket tmp_buckets[18]; struct avg_latency_bucket avg_buckets[18]; struct latency_bucket __attribute__((btf_type_tag("percpu"))) *latency_buckets[2]; unsigned long last_calculate_time; unsigned long filtered_latency; bool track_bio_latency; }; enum tg_state_flags { THROTL_TG_PENDING = 1, THROTL_TG_WAS_EMPTY = 2, THROTL_TG_CANCELING = 4, }; enum { LIMIT_LOW = 0, LIMIT_MAX = 1, LIMIT_CNT = 2, }; enum prio_policy { POLICY_NO_CHANGE = 0, POLICY_PROMOTE_TO_RT = 1, POLICY_RESTRICT_TO_BE = 2, POLICY_ALL_TO_IDLE = 3, POLICY_NONE_TO_RT = 4, }; struct ioprio_blkcg { struct blkcg_policy_data cpd; enum prio_policy prio_policy; }; struct ioprio_blkg { struct blkg_policy_data pd; }; struct ioc_gq; struct ioc_now; typedef void (*btf_trace_iocost_iocg_activate)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); struct iocg_stat { u64 usage_us; u64 wait_us; u64 indebt_us; u64 indelay_us; }; struct ioc; struct iocg_pcpu_stat; struct ioc_gq { struct blkg_policy_data pd; struct ioc *ioc; u32 cfg_weight; u32 weight; u32 active; u32 inuse; u32 last_inuse; s64 saved_margin; sector_t cursor; atomic64_t vtime; atomic64_t done_vtime; u64 abs_vdebt; u64 delay; u64 delay_at; atomic64_t active_period; struct list_head active_list; u64 child_active_sum; u64 child_inuse_sum; u64 child_adjusted_sum; int hweight_gen; u32 hweight_active; u32 hweight_inuse; u32 hweight_donating; u32 hweight_after_donation; struct list_head walk_list; struct list_head surplus_list; struct wait_queue_head waitq; struct hrtimer waitq_timer; u64 activated_at; struct iocg_pcpu_stat __attribute__((btf_type_tag("percpu"))) *pcpu_stat; struct iocg_stat stat; struct iocg_stat last_stat; u64 last_stat_abs_vusage; u64 usage_delta_us; u64 wait_since; u64 indebt_since; u64 indelay_since; int level; struct ioc_gq *ancestors[0]; }; struct ioc_params { u32 qos[6]; u64 i_lcoefs[6]; u64 lcoefs[6]; u32 too_fast_vrate_pct; u32 too_slow_vrate_pct; }; struct ioc_margins { s64 min; s64 low; s64 target; }; enum ioc_running { IOC_IDLE = 0, IOC_RUNNING = 1, IOC_STOP = 2, }; struct ioc_pcpu_stat; struct ioc { struct rq_qos rqos; bool enabled; struct ioc_params params; struct ioc_margins margins; u32 period_us; u32 timer_slack_ns; u64 vrate_min; u64 vrate_max; spinlock_t lock; struct timer_list timer; struct list_head active_iocgs; struct ioc_pcpu_stat __attribute__((btf_type_tag("percpu"))) *pcpu_stat; enum ioc_running running; atomic64_t vtime_rate; u64 vtime_base_rate; s64 vtime_err; seqcount_spinlock_t period_seqcount; u64 period_at; u64 period_at_vtime; atomic64_t cur_period; int busy_level; bool weights_updated; atomic_t hweight_gen; u64 dfgv_period_at; u64 dfgv_period_rem; u64 dfgv_usage_us_sum; u64 autop_too_fast_at; u64 autop_too_slow_at; int autop_idx; bool user_qos_params: 1; bool user_cost_model: 1; }; struct ioc_missed { local_t nr_met; local_t nr_missed; u32 last_met; u32 last_missed; }; struct ioc_pcpu_stat { struct ioc_missed missed[2]; local64_t rq_wait_ns; u64 last_rq_wait_ns; }; struct iocg_pcpu_stat { local64_t abs_vusage; }; struct ioc_now { u64 now_ns; u64 now; u64 vnow; }; typedef void (*btf_trace_iocost_iocg_idle)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); typedef void (*btf_trace_iocost_inuse_shortage)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); typedef void (*btf_trace_iocost_inuse_transfer)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); typedef void (*btf_trace_iocost_inuse_adjust)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); typedef void (*btf_trace_iocost_ioc_vrate_adj)(void *, struct ioc *, u64, u32 *, u32, int, int); typedef void (*btf_trace_iocost_iocg_forgive_debt)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u64, u64, u64, u64); enum { MILLION = 1000000, MIN_PERIOD = 1000, MAX_PERIOD = 1000000, MARGIN_MIN_PCT = 10, MARGIN_LOW_PCT = 20, MARGIN_TARGET_PCT = 50, INUSE_ADJ_STEP_PCT = 25, TIMER_SLACK_PCT = 1, WEIGHT_ONE = 65536, }; enum { QOS_RPPM = 0, QOS_RLAT = 1, QOS_WPPM = 2, QOS_WLAT = 3, QOS_MIN = 4, QOS_MAX = 5, NR_QOS_PARAMS = 6, }; enum { QOS_ENABLE = 0, QOS_CTRL = 1, NR_QOS_CTRL_PARAMS = 2, }; enum { VTIME_PER_SEC_SHIFT = 37ULL, VTIME_PER_SEC = 137438953472ULL, VTIME_PER_USEC = 137438ULL, VTIME_PER_NSEC = 137ULL, VRATE_MIN_PPM = 10000ULL, VRATE_MAX_PPM = 100000000ULL, VRATE_MIN = 1374ULL, VRATE_CLAMP_ADJ_PCT = 4ULL, AUTOP_CYCLE_NSEC = 10000000000ULL, }; enum { AUTOP_INVALID = 0, AUTOP_HDD = 1, AUTOP_SSD_QD1 = 2, AUTOP_SSD_DFL = 3, AUTOP_SSD_FAST = 4, }; enum { RQ_WAIT_BUSY_PCT = 5, UNBUSY_THR_PCT = 75, MIN_DELAY_THR_PCT = 500, MAX_DELAY_THR_PCT = 25000, MIN_DELAY = 250, MAX_DELAY = 250000, DFGV_USAGE_PCT = 50, DFGV_PERIOD = 100000, MAX_LAGGING_PERIODS = 10, IOC_PAGE_SHIFT = 12, IOC_PAGE_SIZE = 4096, IOC_SECT_TO_PAGE_SHIFT = 3, LCOEF_RANDIO_PAGES = 4096, }; enum { I_LCOEF_RBPS = 0, I_LCOEF_RSEQIOPS = 1, I_LCOEF_RRANDIOPS = 2, I_LCOEF_WBPS = 3, I_LCOEF_WSEQIOPS = 4, I_LCOEF_WRANDIOPS = 5, NR_I_LCOEFS = 6, }; enum { LCOEF_RPAGE = 0, LCOEF_RSEQIO = 1, LCOEF_RRANDIO = 2, LCOEF_WPAGE = 3, LCOEF_WSEQIO = 4, LCOEF_WRANDIO = 5, NR_LCOEFS = 6, }; enum { COST_CTRL = 0, COST_MODEL = 1, NR_COST_CTRL_PARAMS = 2, }; struct trace_event_raw_iocost_iocg_state { struct trace_entry ent; u32 __data_loc_devname; u32 __data_loc_cgroup; u64 now; u64 vnow; u64 vrate; u64 last_period; u64 cur_period; u64 vtime; u32 weight; u32 inuse; u64 hweight_active; u64 hweight_inuse; char __data[0]; }; struct trace_event_raw_iocg_inuse_update { struct trace_entry ent; u32 __data_loc_devname; u32 __data_loc_cgroup; u64 now; u32 old_inuse; u32 new_inuse; u64 old_hweight_inuse; u64 new_hweight_inuse; char __data[0]; }; struct trace_event_raw_iocost_ioc_vrate_adj { struct trace_entry ent; u32 __data_loc_devname; u64 old_vrate; u64 new_vrate; int busy_level; u32 read_missed_ppm; u32 write_missed_ppm; u32 rq_wait_pct; int nr_lagging; int nr_shortages; char __data[0]; }; struct trace_event_raw_iocost_iocg_forgive_debt { struct trace_entry ent; u32 __data_loc_devname; u32 __data_loc_cgroup; u64 now; u64 vnow; u32 usage_pct; u64 old_debt; u64 new_debt; u64 old_delay; u64 new_delay; char __data[0]; }; struct ioc_cgrp { struct blkcg_policy_data cpd; unsigned int dfl_weight; }; struct iocg_wait { struct wait_queue_entry wait; struct bio *bio; u64 abs_cost; bool committed; }; struct trace_event_data_offsets_iocost_ioc_vrate_adj { u32 devname; }; struct trace_event_data_offsets_iocost_iocg_state { u32 devname; u32 cgroup; }; struct trace_event_data_offsets_iocg_inuse_update { u32 devname; u32 cgroup; }; struct trace_event_data_offsets_iocost_iocg_forgive_debt { u32 devname; u32 cgroup; }; struct iocg_wake_ctx { struct ioc_gq *iocg; u32 hw_inuse; s64 vbudget; }; enum dd_prio { DD_RT_PRIO = 0, DD_BE_PRIO = 1, DD_IDLE_PRIO = 2, DD_PRIO_MAX = 2, }; enum dd_data_dir { DD_READ = 0, DD_WRITE = 1, }; struct io_stats_per_prio { uint32_t inserted; uint32_t merged; uint32_t dispatched; atomic_t completed; }; struct dd_per_prio { struct list_head dispatch; struct rb_root sort_list[2]; struct list_head fifo_list[2]; sector_t latest_pos[2]; struct io_stats_per_prio stats; }; struct deadline_data { struct dd_per_prio per_prio[3]; enum dd_data_dir last_dir; unsigned int batching; unsigned int starved; int fifo_expire[2]; int fifo_batch; int writes_starved; int front_merges; u32 async_depth; int prio_aging_expire; spinlock_t lock; spinlock_t zone_lock; }; typedef void (*btf_trace_kyber_latency)(void *, dev_t, const char *, const char *, unsigned int, unsigned int, unsigned int, unsigned int); typedef void (*btf_trace_kyber_adjust)(void *, dev_t, const char *, unsigned int); typedef void (*btf_trace_kyber_throttled)(void *, dev_t, const char *); enum { KYBER_READ = 0, KYBER_WRITE = 1, KYBER_DISCARD = 2, KYBER_OTHER = 3, KYBER_NUM_DOMAINS = 4, }; enum { KYBER_TOTAL_LATENCY = 0, KYBER_IO_LATENCY = 1, }; enum { KYBER_LATENCY_SHIFT = 2, KYBER_GOOD_BUCKETS = 4, KYBER_LATENCY_BUCKETS = 8, }; enum { KYBER_ASYNC_PERCENT = 75, }; struct trace_event_raw_kyber_latency { struct trace_entry ent; dev_t dev; char domain[16]; char type[8]; u8 percentile; u8 numerator; u8 denominator; unsigned int samples; char __data[0]; }; struct trace_event_raw_kyber_adjust { struct trace_entry ent; dev_t dev; char domain[16]; unsigned int depth; char __data[0]; }; struct trace_event_raw_kyber_throttled { struct trace_entry ent; dev_t dev; char domain[16]; char __data[0]; }; struct kyber_cpu_latency { atomic_t buckets[48]; }; struct kyber_queue_data { struct request_queue *q; dev_t dev; struct sbitmap_queue domain_tokens[4]; unsigned int async_depth; struct kyber_cpu_latency __attribute__((btf_type_tag("percpu"))) *cpu_latency; struct timer_list timer; unsigned int latency_buckets[48]; unsigned long latency_timeout[3]; int domain_p99[3]; u64 latency_targets[3]; }; struct kyber_ctx_queue { spinlock_t lock; struct list_head rq_list[4]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct kyber_hctx_data { spinlock_t lock; struct list_head rqs[4]; unsigned int cur_domain; unsigned int batching; struct kyber_ctx_queue *kcqs; struct sbitmap kcq_map[4]; struct sbq_wait domain_wait[4]; struct sbq_wait_state *domain_ws[4]; atomic_t wait_index[4]; }; struct flush_kcq_data { struct kyber_hctx_data *khd; unsigned int sched_domain; struct list_head *list; }; struct trace_event_data_offsets_kyber_latency {}; struct trace_event_data_offsets_kyber_adjust {}; struct trace_event_data_offsets_kyber_throttled {}; enum bfqq_state_flags { BFQQF_just_created = 0, BFQQF_busy = 1, BFQQF_wait_request = 2, BFQQF_non_blocking_wait_rq = 3, BFQQF_fifo_expire = 4, BFQQF_has_short_ttime = 5, BFQQF_sync = 6, BFQQF_IO_bound = 7, BFQQF_in_large_burst = 8, BFQQF_softrt_update = 9, BFQQF_coop = 10, BFQQF_split_coop = 11, }; enum bfqq_expiration { BFQQE_TOO_IDLE = 0, BFQQE_BUDGET_TIMEOUT = 1, BFQQE_BUDGET_EXHAUSTED = 2, BFQQE_NO_MORE_REQUESTS = 3, BFQQE_PREEMPTED = 4, }; struct bfq_weight_counter { unsigned int weight; unsigned int num_active; struct rb_node weights_node; }; struct bfq_sched_data; struct bfq_queue; struct bfq_entity { struct rb_node rb_node; bool on_st_or_in_serv; u64 start; u64 finish; struct rb_root *tree; u64 min_start; int service; int budget; int allocated; int dev_weight; int weight; int new_weight; int orig_weight; struct bfq_entity *parent; struct bfq_sched_data *my_sched_data; struct bfq_sched_data *sched_data; int prio_changed; bool in_groups_with_pending_reqs; struct bfq_queue *last_bfqq_created; }; struct bfq_ttime { u64 last_end_request; u64 ttime_total; unsigned long ttime_samples; u64 ttime_mean; }; struct bfq_data; struct bfq_io_cq; struct bfq_queue { int ref; int stable_ref; struct bfq_data *bfqd; unsigned short ioprio; unsigned short ioprio_class; unsigned short new_ioprio; unsigned short new_ioprio_class; u64 last_serv_time_ns; unsigned int inject_limit; unsigned long decrease_time_jif; struct bfq_queue *new_bfqq; struct rb_node pos_node; struct rb_root *pos_root; struct rb_root sort_list; struct request *next_rq; int queued[2]; int meta_pending; struct list_head fifo; struct bfq_entity entity; struct bfq_weight_counter *weight_counter; int max_budget; unsigned long budget_timeout; int dispatched; unsigned long flags; struct list_head bfqq_list; struct bfq_ttime ttime; u64 io_start_time; u64 tot_idle_time; u32 seek_history; struct hlist_node burst_list_node; sector_t last_request_pos; unsigned int requests_within_timer; pid_t pid; struct bfq_io_cq *bic; unsigned long wr_cur_max_time; unsigned long soft_rt_next_start; unsigned long last_wr_start_finish; unsigned int wr_coeff; unsigned long last_idle_bklogged; unsigned long service_from_backlogged; unsigned long service_from_wr; unsigned long wr_start_at_switch_to_srt; unsigned long split_time; unsigned long first_IO_time; unsigned long creation_time; struct bfq_queue *waker_bfqq; struct bfq_queue *tentative_waker_bfqq; unsigned int num_waker_detections; u64 waker_detection_started; struct hlist_node woken_list_node; struct hlist_head woken_list; unsigned int actuator_idx; }; struct bfq_group; struct bfq_data { struct request_queue *queue; struct list_head dispatch; struct bfq_group *root_group; struct rb_root_cached queue_weights_tree; unsigned int num_groups_with_pending_reqs; unsigned int busy_queues[3]; int wr_busy_queues; int queued; int tot_rq_in_driver; int rq_in_driver[8]; bool nonrot_with_queueing; int max_rq_in_driver; int hw_tag_samples; int hw_tag; int budgets_assigned; struct hrtimer idle_slice_timer; struct bfq_queue *in_service_queue; sector_t last_position; sector_t in_serv_last_pos; u64 last_completion; struct bfq_queue *last_completed_rq_bfqq; struct bfq_queue *last_bfqq_created; u64 last_empty_occupied_ns; bool wait_dispatch; struct request *waited_rq; bool rqs_injected; u64 first_dispatch; u64 last_dispatch; ktime_t last_budget_start; ktime_t last_idling_start; unsigned long last_idling_start_jiffies; int peak_rate_samples; u32 sequential_samples; u64 tot_sectors_dispatched; u32 last_rq_max_size; u64 delta_from_first; u32 peak_rate; int bfq_max_budget; struct list_head active_list[8]; struct list_head idle_list; u64 bfq_fifo_expire[2]; unsigned int bfq_back_penalty; unsigned int bfq_back_max; u32 bfq_slice_idle; int bfq_user_max_budget; unsigned int bfq_timeout; bool strict_guarantees; unsigned long last_ins_in_burst; unsigned long bfq_burst_interval; int burst_size; struct bfq_entity *burst_parent_entity; unsigned long bfq_large_burst_thresh; bool large_burst; struct hlist_head burst_list; bool low_latency; unsigned int bfq_wr_coeff; unsigned int bfq_wr_rt_max_time; unsigned int bfq_wr_min_idle_time; unsigned long bfq_wr_min_inter_arr_async; unsigned int bfq_wr_max_softrt_rate; u64 rate_dur_prod; struct bfq_queue oom_bfqq; spinlock_t lock; struct bfq_io_cq *bio_bic; struct bfq_queue *bio_bfqq; unsigned int word_depths[4]; unsigned int full_depth_shift; unsigned int num_actuators; sector_t sector[8]; sector_t nr_sectors[8]; struct blk_independent_access_range ia_ranges[8]; unsigned int actuator_load_threshold; }; struct bfq_service_tree { struct rb_root active; struct rb_root idle; struct bfq_entity *first_idle; struct bfq_entity *last_idle; u64 vtime; unsigned long wsum; }; struct bfq_sched_data { struct bfq_entity *in_service_entity; struct bfq_entity *next_in_service; struct bfq_service_tree service_tree[3]; unsigned long bfq_class_idle_last_service; }; struct bfqg_stats { struct blkg_rwstat bytes; struct blkg_rwstat ios; }; struct bfq_group { struct blkg_policy_data pd; char blkg_path[128]; refcount_t ref; struct bfq_entity entity; struct bfq_sched_data sched_data; struct bfq_data *bfqd; struct bfq_queue *async_bfqq[128]; struct bfq_queue *async_idle_bfqq[8]; struct bfq_entity *my_entity; int active_entities; int num_queues_with_pending_reqs; struct rb_root rq_pos_tree; struct bfqg_stats stats; }; struct bfq_iocq_bfqq_data { bool saved_has_short_ttime; bool saved_IO_bound; u64 saved_io_start_time; u64 saved_tot_idle_time; bool saved_in_large_burst; bool was_in_burst_list; unsigned int saved_weight; unsigned long saved_wr_coeff; unsigned long saved_last_wr_start_finish; unsigned long saved_service_from_wr; unsigned long saved_wr_start_at_switch_to_srt; unsigned int saved_wr_cur_max_time; struct bfq_ttime saved_ttime; u64 saved_last_serv_time_ns; unsigned int saved_inject_limit; unsigned long saved_decrease_time_jif; struct bfq_queue *stable_merge_bfqq; bool stably_merged; }; struct bfq_io_cq { struct io_cq icq; struct bfq_queue *bfqq[16]; int ioprio; uint64_t blkcg_serial_nr; struct bfq_iocq_bfqq_data bfqq_data[8]; unsigned int requests; }; struct bfq_group_data { struct blkcg_policy_data pd; unsigned int weight; }; enum blk_zone_report_flags { BLK_ZONE_REP_CAPACITY = 1, }; struct blk_zone_report { __u64 sector; __u32 nr_zones; __u32 flags; struct blk_zone zones[0]; }; struct zone_report_args { struct blk_zone __attribute__((btf_type_tag("user"))) *zones; }; struct blk_zone_range { __u64 sector; __u64 nr_sectors; }; struct blk_revalidate_zone_args { struct gendisk *disk; unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; unsigned int nr_zones; sector_t zone_sectors; sector_t sector; }; typedef void (*btf_trace_wbt_stat)(void *, struct backing_dev_info *, struct blk_rq_stat *); typedef void (*btf_trace_wbt_lat)(void *, struct backing_dev_info *, unsigned long); typedef void (*btf_trace_wbt_step)(void *, struct backing_dev_info *, const char *, int, unsigned long, unsigned int, unsigned int, unsigned int); typedef void (*btf_trace_wbt_timer)(void *, struct backing_dev_info *, unsigned int, int, unsigned int); enum { WBT_STATE_ON_DEFAULT = 1, WBT_STATE_ON_MANUAL = 2, WBT_STATE_OFF_DEFAULT = 3, WBT_STATE_OFF_MANUAL = 4, }; enum { WBT_RWQ_BG = 0, WBT_RWQ_KSWAPD = 1, WBT_RWQ_DISCARD = 2, WBT_NUM_RWQ = 3, }; enum { RWB_DEF_DEPTH = 16, RWB_WINDOW_NSEC = 100000000, RWB_MIN_WRITE_SAMPLES = 3, RWB_UNKNOWN_BUMP = 5, }; enum { LAT_OK = 1, LAT_UNKNOWN = 2, LAT_UNKNOWN_WRITES = 3, LAT_EXCEEDED = 4, }; enum wbt_flags { WBT_TRACKED = 1, WBT_READ = 2, WBT_KSWAPD = 4, WBT_DISCARD = 8, WBT_NR_BITS = 4, }; struct trace_event_raw_wbt_stat { struct trace_entry ent; char name[32]; s64 rmean; u64 rmin; u64 rmax; s64 rnr_samples; s64 rtime; s64 wmean; u64 wmin; u64 wmax; s64 wnr_samples; s64 wtime; char __data[0]; }; struct trace_event_raw_wbt_lat { struct trace_entry ent; char name[32]; unsigned long lat; char __data[0]; }; struct trace_event_raw_wbt_step { struct trace_entry ent; char name[32]; const char *msg; int step; unsigned long window; unsigned int bg; unsigned int normal; unsigned int max; char __data[0]; }; struct trace_event_raw_wbt_timer { struct trace_entry ent; char name[32]; unsigned int status; int step; unsigned int inflight; char __data[0]; }; struct rq_wb { unsigned int wb_background; unsigned int wb_normal; short enable_state; unsigned int unknown_cnt; u64 win_nsec; u64 cur_win_nsec; struct blk_stat_callback *cb; u64 sync_issue; void *sync_cookie; unsigned int wc; unsigned long last_issue; unsigned long last_comp; unsigned long min_lat_nsec; struct rq_qos rqos; struct rq_wait rq_wait[3]; struct rq_depth rq_depth; }; struct wbt_wait_data { struct rq_wb *rwb; enum wbt_flags wb_acct; blk_opf_t opf; }; struct trace_event_data_offsets_wbt_stat {}; struct trace_event_data_offsets_wbt_lat {}; struct trace_event_data_offsets_wbt_step {}; struct trace_event_data_offsets_wbt_timer {}; struct show_busy_params { struct seq_file *m; struct blk_mq_hw_ctx *hctx; }; struct blk_crypto_mode { const char *name; const char *cipher_str; unsigned int keysize; unsigned int security_strength; unsigned int ivsize; }; struct blk_crypto_ll_ops { int (*keyslot_program)(struct blk_crypto_profile *, const struct blk_crypto_key *, unsigned int); int (*keyslot_evict)(struct blk_crypto_profile *, const struct blk_crypto_key *, unsigned int); int (*derive_sw_secret)(struct blk_crypto_profile *, const u8 *, size_t, u8 *); }; struct blk_crypto_profile { struct blk_crypto_ll_ops ll_ops; unsigned int max_dun_bytes_supported; unsigned int key_types_supported; unsigned int modes_supported[5]; struct device *dev; unsigned int num_slots; struct rw_semaphore lock; struct lock_class_key lockdep_key; wait_queue_head_t idle_slots_wait_queue; struct list_head idle_slots; spinlock_t idle_slots_lock; struct hlist_head *slot_hashtable; unsigned int log_slot_ht_size; struct blk_crypto_keyslot *slots; }; struct blk_crypto_keyslot { atomic_t slot_refs; struct list_head idle_slot_node; struct hlist_node hash_node; const struct blk_crypto_key *key; struct blk_crypto_profile *profile; }; struct blk_crypto_attr { struct attribute attr; ssize_t (*show)(struct blk_crypto_profile *, struct blk_crypto_attr *, char *); }; struct blk_crypto_kobj { struct kobject kobj; struct blk_crypto_profile *profile; }; struct blk_crypto_fallback_keyslot { enum blk_crypto_mode_num crypto_mode; struct crypto_skcipher *tfms[5]; }; struct bio_fallback_crypt_ctx { struct bio_crypt_ctx crypt_ctx; struct bvec_iter crypt_iter; union { struct { struct work_struct work; struct bio *bio; }; struct { void *bi_private_orig; bio_end_io_t *bi_end_io_orig; }; }; }; union blk_crypto_iv { __le64 dun[4]; u8 bytes[32]; }; struct bd_holder_disk { struct list_head list; struct kobject *holder_dir; int refcnt; }; struct io_cache_entry { struct io_wq_work_node node; }; struct io_rsrc_put { u64 tag; union { void *rsrc; struct file *file; struct io_mapped_ubuf *buf; }; }; struct io_rsrc_node { union { struct io_cache_entry cache; struct io_ring_ctx *ctx; }; int refs; bool empty; u16 type; struct list_head node; struct io_rsrc_put item; }; struct io_mapped_ubuf { u64 ubuf; u64 ubuf_end; unsigned int nr_bvecs; unsigned long acct_pages; struct bio_vec bvec[0]; }; struct io_buffer { struct list_head list; __u64 addr; __u32 len; __u16 bid; __u16 bgid; }; struct io_uring_buf_ring; struct io_buffer_list { union { struct list_head buf_list; struct { struct page **buf_pages; struct io_uring_buf_ring *buf_ring; }; struct callback_head rcu; }; __u16 bgid; __u16 buf_nr_pages; __u16 nr_entries; __u16 head; __u16 mask; atomic_t refs; __u8 is_mapped; __u8 is_mmap; }; struct io_uring_buf { __u64 addr; __u32 len; __u16 bid; __u16 resv; }; struct io_uring_buf_ring { union { struct { __u64 resv1; __u32 resv2; __u16 resv3; __u16 tail; }; struct { struct {} __empty_bufs; struct io_uring_buf bufs[0]; }; }; }; struct io_poll { struct file *file; struct wait_queue_head *head; __poll_t events; int retries; struct wait_queue_entry wait; }; struct async_poll { union { struct io_poll poll; struct io_cache_entry cache; }; struct io_poll *double_poll; }; struct io_sq_data { refcount_t refs; atomic_t park_pending; struct mutex lock; struct list_head ctx_list; struct task_struct *thread; struct wait_queue_head wait; unsigned int sq_thread_idle; int sq_cpu; pid_t task_pid; pid_t task_tgid; unsigned long state; struct completion exited; }; struct io_rsrc_data { struct io_ring_ctx *ctx; u64 **tags; unsigned int nr; u16 rsrc_type; bool quiesce; }; typedef void (*btf_trace_io_uring_create)(void *, int, void *, u32, u32, u32); typedef void (*btf_trace_io_uring_register)(void *, void *, unsigned int, unsigned int, unsigned int, long); typedef void (*btf_trace_io_uring_file_get)(void *, struct io_kiocb *, int); typedef void (*btf_trace_io_uring_queue_async_work)(void *, struct io_kiocb *, int); typedef void (*btf_trace_io_uring_defer)(void *, struct io_kiocb *); typedef void (*btf_trace_io_uring_link)(void *, struct io_kiocb *, struct io_kiocb *); typedef void (*btf_trace_io_uring_cqring_wait)(void *, void *, int); typedef void (*btf_trace_io_uring_fail_link)(void *, struct io_kiocb *, struct io_kiocb *); typedef void (*btf_trace_io_uring_complete)(void *, void *, void *, u64, int, unsigned int, u64, u64); typedef void (*btf_trace_io_uring_submit_req)(void *, struct io_kiocb *); typedef void (*btf_trace_io_uring_poll_arm)(void *, struct io_kiocb *, int, int); typedef void (*btf_trace_io_uring_task_add)(void *, struct io_kiocb *, int); typedef void (*btf_trace_io_uring_req_failed)(void *, const struct io_uring_sqe *, struct io_kiocb *, int); typedef void (*btf_trace_io_uring_cqe_overflow)(void *, void *, unsigned long long, s32, u32, void *); typedef void (*btf_trace_io_uring_task_work_run)(void *, void *, unsigned int, unsigned int); typedef void (*btf_trace_io_uring_short_write)(void *, void *, u64, u64, u64); typedef void (*btf_trace_io_uring_local_work_run)(void *, void *, int, unsigned int); struct creds; enum { REQ_F_FIXED_FILE = 1, REQ_F_IO_DRAIN = 2, REQ_F_LINK = 4, REQ_F_HARDLINK = 8, REQ_F_FORCE_ASYNC = 16, REQ_F_BUFFER_SELECT = 32, REQ_F_CQE_SKIP = 64, REQ_F_FAIL = 256, REQ_F_INFLIGHT = 512, REQ_F_CUR_POS = 1024, REQ_F_NOWAIT = 2048, REQ_F_LINK_TIMEOUT = 4096, REQ_F_NEED_CLEANUP = 8192, REQ_F_POLLED = 16384, REQ_F_BUFFER_SELECTED = 32768, REQ_F_BUFFER_RING = 65536, REQ_F_REISSUE = 131072, REQ_F_SUPPORT_NOWAIT = 536870912, REQ_F_ISREG = 1073741824, REQ_F_CREDS = 262144, REQ_F_REFCOUNT = 524288, REQ_F_ARM_LTIMEOUT = 1048576, REQ_F_ASYNC_DATA = 2097152, REQ_F_SKIP_LINK_CQES = 4194304, REQ_F_SINGLE_POLL = 8388608, REQ_F_DOUBLE_POLL = 16777216, REQ_F_PARTIAL_IO = 33554432, REQ_F_APOLL_MULTISHOT = 67108864, REQ_F_CLEAR_POLLIN = 134217728, REQ_F_HASH_LOCKED = 268435456, }; enum { IO_WQ_WORK_CANCEL = 1, IO_WQ_WORK_HASHED = 2, IO_WQ_WORK_UNBOUND = 4, IO_WQ_WORK_CONCURRENT = 16, IO_WQ_HASH_SHIFT = 24, }; enum { IO_CHECK_CQ_OVERFLOW_BIT = 0, IO_CHECK_CQ_DROPPED_BIT = 1, }; enum io_uring_cmd_flags { IO_URING_F_COMPLETE_DEFER = 1, IO_URING_F_UNLOCKED = 2, IO_URING_F_MULTISHOT = 4, IO_URING_F_IOWQ = 8, IO_URING_F_NONBLOCK = -2147483648, IO_URING_F_SQE128 = 256, IO_URING_F_CQE32 = 512, IO_URING_F_IOPOLL = 1024, }; enum { IO_APOLL_OK = 0, IO_APOLL_ABORTED = 1, IO_APOLL_READY = 2, }; enum { IO_EVENTFD_OP_SIGNAL_BIT = 0, IO_EVENTFD_OP_FREE_BIT = 1, }; enum { IORING_CQE_BUFFER_SHIFT = 16, }; enum { IOU_F_TWQ_LAZY_WAKE = 1, }; enum { IOU_OK = 0, IOU_ISSUE_SKIP_COMPLETE = -529, IOU_REQUEUE = -3072, IOU_STOP_MULTISHOT = -125, }; enum { REQ_F_FIXED_FILE_BIT = 0, REQ_F_IO_DRAIN_BIT = 1, REQ_F_LINK_BIT = 2, REQ_F_HARDLINK_BIT = 3, REQ_F_FORCE_ASYNC_BIT = 4, REQ_F_BUFFER_SELECT_BIT = 5, REQ_F_CQE_SKIP_BIT = 6, REQ_F_FAIL_BIT = 8, REQ_F_INFLIGHT_BIT = 9, REQ_F_CUR_POS_BIT = 10, REQ_F_NOWAIT_BIT = 11, REQ_F_LINK_TIMEOUT_BIT = 12, REQ_F_NEED_CLEANUP_BIT = 13, REQ_F_POLLED_BIT = 14, REQ_F_BUFFER_SELECTED_BIT = 15, REQ_F_BUFFER_RING_BIT = 16, REQ_F_REISSUE_BIT = 17, REQ_F_CREDS_BIT = 18, REQ_F_REFCOUNT_BIT = 19, REQ_F_ARM_LTIMEOUT_BIT = 20, REQ_F_ASYNC_DATA_BIT = 21, REQ_F_SKIP_LINK_CQES_BIT = 22, REQ_F_SINGLE_POLL_BIT = 23, REQ_F_DOUBLE_POLL_BIT = 24, REQ_F_PARTIAL_IO_BIT = 25, REQ_F_APOLL_MULTISHOT_BIT = 26, REQ_F_CLEAR_POLLIN_BIT = 27, REQ_F_HASH_LOCKED_BIT = 28, REQ_F_SUPPORT_NOWAIT_BIT = 29, REQ_F_ISREG_BIT = 30, __REQ_F_LAST_BIT = 31, }; enum io_uring_op { IORING_OP_NOP = 0, IORING_OP_READV = 1, IORING_OP_WRITEV = 2, IORING_OP_FSYNC = 3, IORING_OP_READ_FIXED = 4, IORING_OP_WRITE_FIXED = 5, IORING_OP_POLL_ADD = 6, IORING_OP_POLL_REMOVE = 7, IORING_OP_SYNC_FILE_RANGE = 8, IORING_OP_SENDMSG = 9, IORING_OP_RECVMSG = 10, IORING_OP_TIMEOUT = 11, IORING_OP_TIMEOUT_REMOVE = 12, IORING_OP_ACCEPT = 13, IORING_OP_ASYNC_CANCEL = 14, IORING_OP_LINK_TIMEOUT = 15, IORING_OP_CONNECT = 16, IORING_OP_FALLOCATE = 17, IORING_OP_OPENAT = 18, IORING_OP_CLOSE = 19, IORING_OP_FILES_UPDATE = 20, IORING_OP_STATX = 21, IORING_OP_READ = 22, IORING_OP_WRITE = 23, IORING_OP_FADVISE = 24, IORING_OP_MADVISE = 25, IORING_OP_SEND = 26, IORING_OP_RECV = 27, IORING_OP_OPENAT2 = 28, IORING_OP_EPOLL_CTL = 29, IORING_OP_SPLICE = 30, IORING_OP_PROVIDE_BUFFERS = 31, IORING_OP_REMOVE_BUFFERS = 32, IORING_OP_TEE = 33, IORING_OP_SHUTDOWN = 34, IORING_OP_RENAMEAT = 35, IORING_OP_UNLINKAT = 36, IORING_OP_MKDIRAT = 37, IORING_OP_SYMLINKAT = 38, IORING_OP_LINKAT = 39, IORING_OP_MSG_RING = 40, IORING_OP_FSETXATTR = 41, IORING_OP_SETXATTR = 42, IORING_OP_FGETXATTR = 43, IORING_OP_GETXATTR = 44, IORING_OP_SOCKET = 45, IORING_OP_URING_CMD = 46, IORING_OP_SEND_ZC = 47, IORING_OP_SENDMSG_ZC = 48, IORING_OP_LAST = 49, }; enum { IOSQE_FIXED_FILE_BIT = 0, IOSQE_IO_DRAIN_BIT = 1, IOSQE_IO_LINK_BIT = 2, IOSQE_IO_HARDLINK_BIT = 3, IOSQE_ASYNC_BIT = 4, IOSQE_BUFFER_SELECT_BIT = 5, IOSQE_CQE_SKIP_SUCCESS_BIT = 6, }; enum io_wq_cancel { IO_WQ_CANCEL_OK = 0, IO_WQ_CANCEL_RUNNING = 1, IO_WQ_CANCEL_NOTFOUND = 2, }; enum { IORING_REGISTER_BUFFERS = 0, IORING_UNREGISTER_BUFFERS = 1, IORING_REGISTER_FILES = 2, IORING_UNREGISTER_FILES = 3, IORING_REGISTER_EVENTFD = 4, IORING_UNREGISTER_EVENTFD = 5, IORING_REGISTER_FILES_UPDATE = 6, IORING_REGISTER_EVENTFD_ASYNC = 7, IORING_REGISTER_PROBE = 8, IORING_REGISTER_PERSONALITY = 9, IORING_UNREGISTER_PERSONALITY = 10, IORING_REGISTER_RESTRICTIONS = 11, IORING_REGISTER_ENABLE_RINGS = 12, IORING_REGISTER_FILES2 = 13, IORING_REGISTER_FILES_UPDATE2 = 14, IORING_REGISTER_BUFFERS2 = 15, IORING_REGISTER_BUFFERS_UPDATE = 16, IORING_REGISTER_IOWQ_AFF = 17, IORING_UNREGISTER_IOWQ_AFF = 18, IORING_REGISTER_IOWQ_MAX_WORKERS = 19, IORING_REGISTER_RING_FDS = 20, IORING_UNREGISTER_RING_FDS = 21, IORING_REGISTER_PBUF_RING = 22, IORING_UNREGISTER_PBUF_RING = 23, IORING_REGISTER_SYNC_CANCEL = 24, IORING_REGISTER_FILE_ALLOC_RANGE = 25, IORING_REGISTER_LAST = 26, IORING_REGISTER_USE_REGISTERED_RING = 2147483648, }; enum { IORING_RSRC_FILE = 0, IORING_RSRC_BUFFER = 1, }; enum { IORING_RESTRICTION_REGISTER_OP = 0, IORING_RESTRICTION_SQE_OP = 1, IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, IORING_RESTRICTION_LAST = 4, }; struct trace_event_raw_io_uring_create { struct trace_entry ent; int fd; void *ctx; u32 sq_entries; u32 cq_entries; u32 flags; char __data[0]; }; struct trace_event_raw_io_uring_register { struct trace_entry ent; void *ctx; unsigned int opcode; unsigned int nr_files; unsigned int nr_bufs; long ret; char __data[0]; }; struct trace_event_raw_io_uring_file_get { struct trace_entry ent; void *ctx; void *req; u64 user_data; int fd; char __data[0]; }; struct trace_event_raw_io_uring_queue_async_work { struct trace_entry ent; void *ctx; void *req; u64 user_data; u8 opcode; unsigned int flags; struct io_wq_work *work; int rw; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_defer { struct trace_entry ent; void *ctx; void *req; unsigned long long data; u8 opcode; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_link { struct trace_entry ent; void *ctx; void *req; void *target_req; char __data[0]; }; struct trace_event_raw_io_uring_cqring_wait { struct trace_entry ent; void *ctx; int min_events; char __data[0]; }; struct trace_event_raw_io_uring_fail_link { struct trace_entry ent; void *ctx; void *req; unsigned long long user_data; u8 opcode; void *link; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_complete { struct trace_entry ent; void *ctx; void *req; u64 user_data; int res; unsigned int cflags; u64 extra1; u64 extra2; char __data[0]; }; struct trace_event_raw_io_uring_submit_req { struct trace_entry ent; void *ctx; void *req; unsigned long long user_data; u8 opcode; u32 flags; bool sq_thread; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_poll_arm { struct trace_entry ent; void *ctx; void *req; unsigned long long user_data; u8 opcode; int mask; int events; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_task_add { struct trace_entry ent; void *ctx; void *req; unsigned long long user_data; u8 opcode; int mask; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_req_failed { struct trace_entry ent; void *ctx; void *req; unsigned long long user_data; u8 opcode; u8 flags; u8 ioprio; u64 off; u64 addr; u32 len; u32 op_flags; u16 buf_index; u16 personality; u32 file_index; u64 pad1; u64 addr3; int error; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_cqe_overflow { struct trace_entry ent; void *ctx; unsigned long long user_data; s32 res; u32 cflags; void *ocqe; char __data[0]; }; struct trace_event_raw_io_uring_task_work_run { struct trace_entry ent; void *tctx; unsigned int count; unsigned int loops; char __data[0]; }; struct trace_event_raw_io_uring_short_write { struct trace_entry ent; void *ctx; u64 fpos; u64 wanted; u64 got; char __data[0]; }; struct trace_event_raw_io_uring_local_work_run { struct trace_entry ent; void *ctx; int count; unsigned int loops; char __data[0]; }; struct io_defer_entry { struct list_head list; struct io_kiocb *req; u32 seq; }; struct io_tctx_node { struct list_head ctx_node; struct task_struct *task; struct io_ring_ctx *ctx; }; struct io_overflow_cqe { struct list_head list; struct io_uring_cqe cqe; }; struct io_wait_queue { struct wait_queue_entry wq; struct io_ring_ctx *ctx; unsigned int cq_tail; unsigned int nr_timeouts; ktime_t timeout; }; struct io_tctx_exit { struct callback_head task_work; struct completion completion; struct io_ring_ctx *ctx; }; struct io_sqring_offsets { __u32 head; __u32 tail; __u32 ring_mask; __u32 ring_entries; __u32 flags; __u32 dropped; __u32 array; __u32 resv1; __u64 user_addr; }; struct io_cqring_offsets { __u32 head; __u32 tail; __u32 ring_mask; __u32 ring_entries; __u32 overflow; __u32 cqes; __u32 flags; __u32 resv1; __u64 user_addr; }; struct io_uring_params { __u32 sq_entries; __u32 cq_entries; __u32 flags; __u32 sq_thread_cpu; __u32 sq_thread_idle; __u32 features; __u32 wq_fd; __u32 resv[3]; struct io_sqring_offsets sq_off; struct io_cqring_offsets cq_off; }; struct trace_event_data_offsets_io_uring_queue_async_work { u32 op_str; }; struct trace_event_data_offsets_io_uring_defer { u32 op_str; }; struct trace_event_data_offsets_io_uring_fail_link { u32 op_str; }; struct trace_event_data_offsets_io_uring_submit_req { u32 op_str; }; struct trace_event_data_offsets_io_uring_poll_arm { u32 op_str; }; struct trace_event_data_offsets_io_uring_task_add { u32 op_str; }; struct trace_event_data_offsets_io_uring_req_failed { u32 op_str; }; struct io_cold_def { unsigned short async_size; const char *name; int (*prep_async)(struct io_kiocb *); void (*cleanup)(struct io_kiocb *); void (*fail)(struct io_kiocb *); }; struct io_issue_def { unsigned int needs_file: 1; unsigned int plug: 1; unsigned int hash_reg_file: 1; unsigned int unbound_nonreg_file: 1; unsigned int pollin: 1; unsigned int pollout: 1; unsigned int poll_exclusive: 1; unsigned int buffer_select: 1; unsigned int not_supported: 1; unsigned int audit_skip: 1; unsigned int ioprio: 1; unsigned int iopoll: 1; unsigned int iopoll_queue: 1; unsigned int manual_alloc: 1; int (*issue)(struct io_kiocb *, unsigned int); int (*prep)(struct io_kiocb *, const struct io_uring_sqe *); }; typedef bool work_cancel_fn(struct io_wq_work *, void *); struct io_uring_getevents_arg { __u64 sigmask; __u32 sigmask_sz; __u32 pad; __u64 ts; }; struct io_uring_file_index_range { __u32 off; __u32 len; __u64 resv; }; struct trace_event_data_offsets_io_uring_create {}; struct trace_event_data_offsets_io_uring_register {}; struct trace_event_data_offsets_io_uring_file_get {}; struct trace_event_data_offsets_io_uring_link {}; struct trace_event_data_offsets_io_uring_cqring_wait {}; struct trace_event_data_offsets_io_uring_complete {}; struct trace_event_data_offsets_io_uring_cqe_overflow {}; struct trace_event_data_offsets_io_uring_task_work_run {}; struct trace_event_data_offsets_io_uring_short_write {}; struct trace_event_data_offsets_io_uring_local_work_run {}; struct io_task_cancel { struct task_struct *task; bool all; }; struct io_uring_probe_op { __u8 op; __u8 resv; __u16 flags; __u32 resv2; }; struct io_uring_probe { __u8 last_op; __u8 ops_len; __u16 resv; __u32 resv2[3]; struct io_uring_probe_op ops[0]; }; struct io_uring_restriction { __u16 opcode; union { __u8 register_op; __u8 sqe_op; __u8 sqe_flags; }; __u8 resv; __u32 resv2[3]; }; struct io_xattr { struct file *file; struct xattr_ctx ctx; struct filename *filename; }; struct io_rename { struct file *file; int old_dfd; int new_dfd; struct filename *oldpath; struct filename *newpath; int flags; }; struct io_unlink { struct file *file; int dfd; int flags; struct filename *filename; }; struct io_mkdir { struct file *file; int dfd; umode_t mode; struct filename *filename; }; struct io_link { struct file *file; int old_dfd; int new_dfd; struct filename *oldpath; struct filename *newpath; int flags; }; struct io_splice { struct file *file_out; loff_t off_out; loff_t off_in; u64 len; int splice_fd_in; unsigned int flags; }; struct io_sync { struct file *file; loff_t len; loff_t off; int flags; int mode; }; struct io_madvise { struct file *file; u64 addr; u32 len; u32 advice; }; struct io_fadvise { struct file *file; u64 offset; u32 len; u32 advice; }; struct io_open { struct file *file; int dfd; u32 file_slot; struct filename *filename; struct open_how how; unsigned long nofile; }; struct io_close { struct file *file; int fd; u32 file_slot; }; enum { SOCKET_URING_OP_SIOCINQ = 0, SOCKET_URING_OP_SIOCOUTQ = 1, }; struct io_epoll { struct file *file; int epfd; int op; int fd; struct epoll_event event; }; struct io_statx { struct file *file; int dfd; unsigned int mask; unsigned int flags; struct filename *filename; struct statx __attribute__((btf_type_tag("user"))) *buffer; }; enum { SKBFL_ZEROCOPY_ENABLE = 1, SKBFL_SHARED_FRAG = 2, SKBFL_PURE_ZEROCOPY = 4, SKBFL_DONT_ORPHAN = 8, SKBFL_MANAGED_FRAG_REFS = 16, }; struct io_shutdown { struct file *file; int how; }; struct compat_msghdr; struct user_msghdr; struct io_sr_msg { struct file *file; union { struct compat_msghdr __attribute__((btf_type_tag("user"))) *umsg_compat; struct user_msghdr __attribute__((btf_type_tag("user"))) *umsg; void __attribute__((btf_type_tag("user"))) *buf; }; unsigned int len; unsigned int done_io; unsigned int msg_flags; unsigned int nr_multishot_loops; u16 flags; u16 addr_len; u16 buf_group; void __attribute__((btf_type_tag("user"))) *addr; void __attribute__((btf_type_tag("user"))) *msg_control; struct io_kiocb *notif; }; struct compat_msghdr { compat_uptr_t msg_name; compat_int_t msg_namelen; compat_uptr_t msg_iov; compat_size_t msg_iovlen; compat_uptr_t msg_control; compat_size_t msg_controllen; compat_uint_t msg_flags; }; struct user_msghdr { void __attribute__((btf_type_tag("user"))) *msg_name; int msg_namelen; struct iovec __attribute__((btf_type_tag("user"))) *msg_iov; __kernel_size_t msg_iovlen; void __attribute__((btf_type_tag("user"))) *msg_control; __kernel_size_t msg_controllen; unsigned int msg_flags; }; struct io_accept { struct file *file; struct sockaddr __attribute__((btf_type_tag("user"))) *addr; int __attribute__((btf_type_tag("user"))) *addr_len; int flags; u32 file_slot; unsigned long nofile; }; struct io_socket { struct file *file; int domain; int type; int protocol; int flags; u32 file_slot; unsigned long nofile; }; struct io_connect { struct file *file; struct sockaddr __attribute__((btf_type_tag("user"))) *addr; int addr_len; bool in_progress; bool seen_econnaborted; }; struct io_async_msghdr { union { struct iovec fast_iov[8]; struct { struct iovec fast_iov_one; __kernel_size_t controllen; int namelen; __kernel_size_t payloadlen; }; struct io_cache_entry cache; }; struct iovec *free_iov; struct sockaddr __attribute__((btf_type_tag("user"))) *uaddr; struct msghdr msg; struct __kernel_sockaddr_storage addr; }; typedef s32 compat_ssize_t; struct io_notif_data { struct file *file; struct ubuf_info uarg; unsigned long account_pages; bool zc_report; bool zc_used; bool zc_copied; }; struct io_uring_recvmsg_out { __u32 namelen; __u32 controllen; __u32 payloadlen; __u32 flags; }; struct io_recvmsg_multishot_hdr { struct io_uring_recvmsg_out msg; struct __kernel_sockaddr_storage addr; }; struct io_async_connect { struct __kernel_sockaddr_storage address; }; enum { IORING_MSG_DATA = 0, IORING_MSG_SEND_FD = 1, }; struct io_msg { struct file *file; struct file *src_file; struct callback_head tw; u64 user_data; u32 len; u32 cmd; u32 src_fd; union { u32 dst_fd; u32 cqe_flags; }; u32 flags; }; struct io_timeout { struct file *file; u32 off; u32 target_seq; u32 repeats; struct list_head list; struct io_kiocb *head; struct io_kiocb *prev; }; struct io_timeout_rem { struct file *file; u64 addr; struct timespec64 ts; u32 flags; bool ltimeout; }; struct io_timeout_data { struct io_kiocb *req; struct hrtimer timer; struct timespec64 ts; enum hrtimer_mode mode; u32 flags; }; struct io_cancel_data { struct io_ring_ctx *ctx; union { u64 data; struct file *file; }; u8 opcode; u32 flags; int seq; }; enum { IO_SQ_THREAD_SHOULD_STOP = 0, IO_SQ_THREAD_SHOULD_PARK = 1, }; typedef void io_wq_work_fn(struct io_wq_work *); typedef struct io_wq_work *free_work_fn(struct io_wq_work *); struct io_wq_data { struct io_wq_hash *hash; struct task_struct *task; io_wq_work_fn *do_work; free_work_fn *free_work; }; struct io_uring_rsrc_update { __u32 offset; __u32 resv; __u64 data; }; enum { IOU_POLL_DONE = 0, IOU_POLL_NO_ACTION = 1, IOU_POLL_REMOVE_POLL_USE_RES = 2, IOU_POLL_REISSUE = 3, IOU_POLL_REQUEUE = 4, }; struct io_poll_update { struct file *file; u64 old_user_data; u64 new_user_data; __poll_t events; bool update_events; bool update_user_data; }; struct io_poll_table { struct poll_table_struct pt; struct io_kiocb *req; int nr_entries; int error; bool owning; __poll_t result_mask; }; struct io_cancel { struct file *file; u64 addr; u32 flags; s32 fd; u8 opcode; }; struct io_uring_sync_cancel_reg { __u64 addr; __s32 fd; __u32 flags; struct __kernel_timespec timeout; __u8 opcode; __u8 pad[7]; __u64 pad2[3]; }; enum { IOU_PBUF_RING_MMAP = 1, }; struct io_provide_buf { struct file *file; __u64 addr; __u32 len; __u32 bgid; __u32 nbufs; __u16 bid; }; struct io_buf_free { struct hlist_node list; void *mem; size_t size; int inuse; }; struct io_uring_buf_reg { __u64 ring_addr; __u32 ring_entries; __u16 bgid; __u16 flags; __u64 resv[3]; }; struct io_rsrc_update { struct file *file; u64 arg; u32 nr_args; u32 offset; }; struct io_uring_rsrc_update2 { __u32 offset; __u32 resv; __u64 data; __u64 tags; __u32 nr; __u32 resv2; }; struct io_uring_rsrc_register { __u32 nr; __u32 flags; __u64 resv2; __u64 data; __u64 tags; }; struct io_rw { struct kiocb kiocb; u64 addr; u32 len; rwf_t flags; }; struct iov_iter_state { size_t iov_offset; size_t count; unsigned long nr_segs; }; struct io_rw_state { struct iov_iter iter; struct iov_iter_state iter_state; struct iovec fast_iov[8]; }; struct io_async_rw { struct io_rw_state s; const struct iovec *free_iovec; size_t bytes_done; struct wait_page_queue wpq; }; enum { IO_WQ_BIT_EXIT = 0, }; enum { IO_WORKER_F_UP = 1, IO_WORKER_F_RUNNING = 2, IO_WORKER_F_FREE = 4, IO_WORKER_F_BOUND = 8, }; enum { IO_ACCT_STALLED_BIT = 0, }; enum { IO_WQ_ACCT_BOUND = 0, IO_WQ_ACCT_UNBOUND = 1, IO_WQ_ACCT_NR = 2, }; struct io_wq_acct { unsigned int nr_workers; unsigned int max_workers; int index; atomic_t nr_running; raw_spinlock_t lock; struct io_wq_work_list work_list; unsigned long flags; }; struct io_wq { unsigned long state; free_work_fn *free_work; io_wq_work_fn *do_work; struct io_wq_hash *hash; atomic_t worker_refs; struct completion worker_done; struct hlist_node cpuhp_node; struct task_struct *task; struct io_wq_acct acct[2]; raw_spinlock_t lock; struct hlist_nulls_head free_list; struct list_head all_list; struct wait_queue_entry wait; struct io_wq_work *hash_tail[64]; cpumask_var_t cpu_mask; }; struct io_worker { refcount_t ref; unsigned int flags; struct hlist_nulls_node nulls_node; struct list_head all_list; struct task_struct *task; struct io_wq *wq; struct io_wq_work *cur_work; struct io_wq_work *next_work; raw_spinlock_t lock; struct completion ref_done; unsigned long create_state; struct callback_head create_work; int create_index; union { struct callback_head rcu; struct work_struct work; }; }; struct io_cb_cancel_data { work_cancel_fn *fn; void *data; int nr_running; int nr_pending; bool cancel_all; }; struct online_data { unsigned int cpu; bool online; }; struct wrapper { cmp_func_t cmp; swap_func_t swap; }; enum { MAX_OPT_ARGS = 3, }; enum { REG_OP_ISFREE = 0, REG_OP_ALLOC = 1, REG_OP_RELEASE = 2, }; struct region { unsigned int start; unsigned int off; unsigned int group_len; unsigned int end; unsigned int nbits; }; typedef void sg_free_fn(struct scatterlist *, unsigned int); struct sg_append_table { struct sg_table sgt; struct scatterlist *prv; unsigned int total_nents; }; typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t); struct sg_dma_page_iter { struct sg_page_iter base; }; struct csum_state { __wsum csum; size_t off; }; union nested_table { union nested_table __attribute__((btf_type_tag("rcu"))) *table; struct rhash_lock_head __attribute__((btf_type_tag("rcu"))) *bucket; }; struct rhashtable_walker { struct list_head list; struct bucket_table *tbl; }; struct rhashtable_iter { struct rhashtable *ht; struct rhash_head *p; struct rhlist_head *list; struct rhashtable_walker walker; unsigned int slot; unsigned int skip; bool end_of_table; }; struct once_work { struct work_struct work; struct static_key_true *key; struct module *module; }; struct genradix_node { union { struct genradix_node *children[512]; u8 data[4096]; }; }; struct genradix_iter { size_t offset; size_t pos; }; enum string_size_units { STRING_UNITS_10 = 0, STRING_UNITS_2 = 1, }; struct strarray { char **array; size_t n; }; struct kunit_hooks_table { void (*fail_current_test)(const char *, int, const char *, ...); void * (*get_static_stub_address)(struct kunit *, void *); }; struct reciprocal_value_adv { u32 m; u8 sh; u8 exp; bool is_wide_m; }; struct gf128mul_64k { struct gf128mul_4k *t[16]; }; enum blake2s_lengths { BLAKE2S_BLOCK_SIZE = 64, BLAKE2S_HASH_SIZE = 32, BLAKE2S_KEY_SIZE = 32, BLAKE2S_128_HASH_SIZE = 16, BLAKE2S_160_HASH_SIZE = 20, BLAKE2S_224_HASH_SIZE = 28, BLAKE2S_256_HASH_SIZE = 32, }; struct blake2s_state { u32 h[8]; u32 t[2]; u32 f[2]; u8 buf[64]; unsigned int buflen; unsigned int outlen; }; enum blake2s_iv { BLAKE2S_IV0 = 1779033703, BLAKE2S_IV1 = 3144134277, BLAKE2S_IV2 = 1013904242, BLAKE2S_IV3 = 2773480762, BLAKE2S_IV4 = 1359893119, BLAKE2S_IV5 = 2600822924, BLAKE2S_IV6 = 528734635, BLAKE2S_IV7 = 1541459225, }; typedef mpi_limb_t *mpi_ptr_t; typedef int mpi_size_t; typedef mpi_limb_t UWtype; typedef unsigned int UHWtype; struct mpi_ec_ctx; struct field_table { const char *p; void (*addm)(MPI, MPI, MPI, struct mpi_ec_ctx *); void (*subm)(MPI, MPI, MPI, struct mpi_ec_ctx *); void (*mulm)(MPI, MPI, MPI, struct mpi_ec_ctx *); void (*mul2)(MPI, MPI, struct mpi_ec_ctx *); void (*pow2)(MPI, const MPI, struct mpi_ec_ctx *); }; struct barrett_ctx_s; typedef struct barrett_ctx_s *mpi_barrett_t; enum gcry_mpi_ec_models { MPI_EC_WEIERSTRASS = 0, MPI_EC_MONTGOMERY = 1, MPI_EC_EDWARDS = 2, }; enum ecc_dialects { ECC_DIALECT_STANDARD = 0, ECC_DIALECT_ED25519 = 1, ECC_DIALECT_SAFECURVE = 2, }; struct gcry_mpi_point; typedef struct gcry_mpi_point *MPI_POINT; struct mpi_ec_ctx { enum gcry_mpi_ec_models model; enum ecc_dialects dialect; int flags; unsigned int nbits; MPI p; MPI a; MPI b; MPI_POINT G; MPI n; unsigned int h; MPI_POINT Q; MPI d; const char *name; struct { struct { unsigned int a_is_pminus3: 1; unsigned int two_inv_p: 1; } valid; int a_is_pminus3; MPI two_inv_p; mpi_barrett_t p_barrett; MPI scratch[11]; } t; void (*addm)(MPI, MPI, MPI, struct mpi_ec_ctx *); void (*subm)(MPI, MPI, MPI, struct mpi_ec_ctx *); void (*mulm)(MPI, MPI, MPI, struct mpi_ec_ctx *); void (*pow2)(MPI, const MPI, struct mpi_ec_ctx *); void (*mul2)(MPI, MPI, struct mpi_ec_ctx *); }; struct gcry_mpi_point { MPI x; MPI y; MPI z; }; enum gcry_mpi_constants { MPI_C_ZERO = 0, MPI_C_ONE = 1, MPI_C_TWO = 2, MPI_C_THREE = 3, MPI_C_FOUR = 4, MPI_C_EIGHT = 5, }; enum gcry_mpi_format { GCRYMPI_FMT_NONE = 0, GCRYMPI_FMT_STD = 1, GCRYMPI_FMT_PGP = 2, GCRYMPI_FMT_SSH = 3, GCRYMPI_FMT_HEX = 4, GCRYMPI_FMT_USG = 5, GCRYMPI_FMT_OPAQUE = 8, }; struct barrett_ctx_s { MPI m; int m_copied; int k; MPI y; MPI r1; MPI r2; MPI r3; }; struct karatsuba_ctx { struct karatsuba_ctx *next; mpi_ptr_t tspace; mpi_size_t tspace_size; mpi_ptr_t tp; mpi_size_t tp_size; }; typedef long mpi_limb_signed_t; enum devm_ioremap_type { DEVM_IOREMAP = 0, DEVM_IOREMAP_UC = 1, DEVM_IOREMAP_WC = 2, DEVM_IOREMAP_NP = 3, }; enum { PCI_STD_RESOURCES = 0, PCI_STD_RESOURCE_END = 5, PCI_ROM_RESOURCE = 6, PCI_IOV_RESOURCES = 7, PCI_IOV_RESOURCE_END = 12, PCI_BRIDGE_RESOURCES = 13, PCI_BRIDGE_RESOURCE_END = 16, PCI_NUM_RESOURCES = 17, DEVICE_COUNT_RESOURCE = 17, }; struct pcim_iomap_devres { void *table[6]; }; struct arch_io_reserve_memtype_wc_devres { resource_size_t start; resource_size_t size; }; typedef void (*btf_trace_rwmmio_write)(void *, unsigned long, unsigned long, u64, u8, volatile void *); typedef void (*btf_trace_rwmmio_post_write)(void *, unsigned long, unsigned long, u64, u8, volatile void *); typedef void (*btf_trace_rwmmio_read)(void *, unsigned long, unsigned long, u8, volatile const void *); typedef void (*btf_trace_rwmmio_post_read)(void *, unsigned long, unsigned long, u64, u8, volatile const void *); struct trace_event_raw_rwmmio_rw_template { struct trace_entry ent; unsigned long caller; unsigned long caller0; unsigned long addr; u64 val; u8 width; char __data[0]; }; struct trace_event_raw_rwmmio_read { struct trace_entry ent; unsigned long caller; unsigned long caller0; unsigned long addr; u8 width; char __data[0]; }; struct trace_event_raw_rwmmio_post_read { struct trace_entry ent; unsigned long caller; unsigned long caller0; unsigned long addr; u64 val; u8 width; char __data[0]; }; struct trace_event_data_offsets_rwmmio_rw_template {}; struct trace_event_data_offsets_rwmmio_read {}; struct trace_event_data_offsets_rwmmio_post_read {}; enum assoc_array_walk_status { assoc_array_walk_tree_empty = 0, assoc_array_walk_found_terminal_node = 1, assoc_array_walk_found_wrong_shortcut = 2, }; struct assoc_array_walk_result { struct { struct assoc_array_node *node; int level; int slot; } terminal_node; struct { struct assoc_array_shortcut *shortcut; int level; int sc_level; unsigned long sc_segments; unsigned long dissimilarity; } wrong_shortcut; }; struct assoc_array_delete_collapse_context { struct assoc_array_node *node; const void *skip_leaf; int slot; }; struct linear_range { unsigned int min; unsigned int min_sel; unsigned int max_sel; unsigned int step; }; struct xxh32_state { uint32_t total_len_32; uint32_t large_len; uint32_t v1; uint32_t v2; uint32_t v3; uint32_t v4; uint32_t mem32[4]; uint32_t memsize; }; struct gen_pool_chunk { struct list_head next_chunk; atomic_long_t avail; phys_addr_t phys_addr; void *owner; unsigned long start_addr; unsigned long end_addr; unsigned long bits[0]; }; struct genpool_data_align { int align; }; struct genpool_data_fixed { unsigned long offset; }; typedef enum { HEAD = 0, FLAGS = 1, TIME = 2, OS = 3, EXLEN = 4, EXTRA = 5, NAME = 6, COMMENT = 7, HCRC = 8, DICTID = 9, DICT = 10, TYPE = 11, TYPEDO = 12, STORED = 13, COPY = 14, TABLE = 15, LENLENS = 16, CODELENS = 17, LEN = 18, LENEXT = 19, DIST = 20, DISTEXT = 21, MATCH = 22, LIT = 23, CHECK = 24, LENGTH = 25, DONE = 26, BAD = 27, MEM = 28, SYNC = 29, } inflate_mode; typedef struct { unsigned char op; unsigned char bits; unsigned short val; } code; struct inflate_state { inflate_mode mode; int last; int wrap; int havedict; int flags; unsigned int dmax; unsigned long check; unsigned long total; unsigned int wbits; unsigned int wsize; unsigned int whave; unsigned int write; unsigned char *window; unsigned long hold; unsigned int bits; unsigned int length; unsigned int offset; unsigned int extra; const code *lencode; const code *distcode; unsigned int lenbits; unsigned int distbits; unsigned int ncode; unsigned int nlen; unsigned int ndist; unsigned int have; code *next; unsigned short lens[320]; unsigned short work[288]; code codes[2048]; }; struct inflate_workspace { struct inflate_state inflate_state; unsigned char working_window[32768]; }; typedef enum { CODES = 0, LENS = 1, DISTS = 2, } codetype; typedef unsigned int uInt; typedef unsigned short ush; typedef enum { need_more = 0, block_done = 1, finish_started = 2, finish_done = 3, } block_state; struct deflate_state; typedef struct deflate_state deflate_state; typedef block_state (*compress_func)(deflate_state *, int); struct config_s { ush good_length; ush max_lazy; ush nice_length; ush max_chain; compress_func func; }; typedef struct config_s config; typedef unsigned long ulg; typedef ush Pos; typedef unsigned int IPos; struct ct_data_s { union { ush freq; ush code; } fc; union { ush dad; ush len; } dl; }; typedef struct ct_data_s ct_data; struct static_tree_desc_s; typedef struct static_tree_desc_s static_tree_desc; struct tree_desc_s { ct_data *dyn_tree; int max_code; static_tree_desc *stat_desc; }; typedef unsigned char uch; struct deflate_state { z_streamp strm; int status; Byte *pending_buf; ulg pending_buf_size; Byte *pending_out; int pending; int noheader; Byte data_type; Byte method; int last_flush; uInt w_size; uInt w_bits; uInt w_mask; Byte *window; ulg window_size; Pos *prev; Pos *head; uInt ins_h; uInt hash_size; uInt hash_bits; uInt hash_mask; uInt hash_shift; long block_start; uInt match_length; IPos prev_match; int match_available; uInt strstart; uInt match_start; uInt lookahead; uInt prev_length; uInt max_chain_length; uInt max_lazy_match; int level; int strategy; uInt good_match; int nice_match; struct ct_data_s dyn_ltree[573]; struct ct_data_s dyn_dtree[61]; struct ct_data_s bl_tree[39]; struct tree_desc_s l_desc; struct tree_desc_s d_desc; struct tree_desc_s bl_desc; ush bl_count[16]; int heap[573]; int heap_len; int heap_max; uch depth[573]; uch *l_buf; uInt lit_bufsize; uInt last_lit; ush *d_buf; ulg opt_len; ulg static_len; ulg compressed_len; uInt matches; int last_eob_len; ush bi_buf; int bi_valid; }; struct static_tree_desc_s { const ct_data *static_tree; const int *extra_bits; int extra_base; int elems; int max_length; }; struct deflate_workspace { deflate_state deflate_memory; Byte *window_memory; Pos *prev_memory; Pos *head_memory; char *overlay_memory; }; typedef struct deflate_workspace deflate_workspace; typedef struct tree_desc_s tree_desc; enum { RS_DECODE_LAMBDA = 0, RS_DECODE_SYN = 1, RS_DECODE_B = 2, RS_DECODE_T = 3, RS_DECODE_OMEGA = 4, RS_DECODE_ROOT = 5, RS_DECODE_REG = 6, RS_DECODE_LOC = 7, RS_DECODE_NUM_BUFFERS = 8, }; typedef struct { uint32_t hashTable[4096]; uint32_t currentOffset; uint32_t initCheck; const uint8_t *dictionary; uint8_t *bufferStart; uint32_t dictSize; } LZ4_stream_t_internal; typedef union { unsigned long long table[2052]; LZ4_stream_t_internal internal_donotuse; } LZ4_stream_t; typedef uintptr_t uptrval; typedef enum { noLimit = 0, limitedOutput = 1, } limitedOutput_directive; typedef enum { byPtr = 0, byU32 = 1, byU16 = 2, } tableType_t; typedef enum { noDict = 0, withPrefix64k = 1, usingExtDict = 2, } dict_directive; typedef enum { noDictIssue = 0, dictSmall = 1, } dictIssue_directive; typedef struct { unsigned int hashTable[32768]; unsigned short chainTable[65536]; const unsigned char *end; const unsigned char *base; const unsigned char *dictBase; unsigned int dictLimit; unsigned int lowLimit; unsigned int nextToUpdate; unsigned int compressionLevel; } LZ4HC_CCtx_internal; typedef union { size_t table[32774]; LZ4HC_CCtx_internal internal_donotuse; } LZ4_streamHC_t; typedef enum { endOnOutputSize = 0, endOnInputSize = 1, } endCondition_directive; typedef enum { decode_full_block = 0, partial_decode = 1, } earlyEnd_directive; typedef struct { const uint8_t *externalDict; size_t extDictSize; const uint8_t *prefixEnd; size_t prefixSize; } LZ4_streamDecode_t_internal; typedef union { unsigned long long table[4]; LZ4_streamDecode_t_internal internal_donotuse; } LZ4_streamDecode_t; typedef enum { ZSTD_reset_session_only = 1, ZSTD_reset_parameters = 2, ZSTD_reset_session_and_parameters = 3, } ZSTD_ResetDirective; typedef enum { ZSTD_c_compressionLevel = 100, ZSTD_c_windowLog = 101, ZSTD_c_hashLog = 102, ZSTD_c_chainLog = 103, ZSTD_c_searchLog = 104, ZSTD_c_minMatch = 105, ZSTD_c_targetLength = 106, ZSTD_c_strategy = 107, ZSTD_c_enableLongDistanceMatching = 160, ZSTD_c_ldmHashLog = 161, ZSTD_c_ldmMinMatch = 162, ZSTD_c_ldmBucketSizeLog = 163, ZSTD_c_ldmHashRateLog = 164, ZSTD_c_contentSizeFlag = 200, ZSTD_c_checksumFlag = 201, ZSTD_c_dictIDFlag = 202, ZSTD_c_nbWorkers = 400, ZSTD_c_jobSize = 401, ZSTD_c_overlapLog = 402, ZSTD_c_experimentalParam1 = 500, ZSTD_c_experimentalParam2 = 10, ZSTD_c_experimentalParam3 = 1000, ZSTD_c_experimentalParam4 = 1001, ZSTD_c_experimentalParam5 = 1002, ZSTD_c_experimentalParam6 = 1003, ZSTD_c_experimentalParam7 = 1004, ZSTD_c_experimentalParam8 = 1005, ZSTD_c_experimentalParam9 = 1006, ZSTD_c_experimentalParam10 = 1007, ZSTD_c_experimentalParam11 = 1008, ZSTD_c_experimentalParam12 = 1009, ZSTD_c_experimentalParam13 = 1010, ZSTD_c_experimentalParam14 = 1011, ZSTD_c_experimentalParam15 = 1012, } ZSTD_cParameter; typedef struct { int deltaFindState; U32 deltaNbBits; } FSE_symbolCompressionTransform; typedef struct { ptrdiff_t value; const void *stateTable; const void *symbolTT; unsigned int stateLog; } FSE_CState_t; typedef struct { size_t bitContainer; unsigned int bitPos; char *startPtr; char *ptr; char *endPtr; } BIT_CStream_t; typedef enum { trustInput = 0, checkMaxSymbolValue = 1, } HIST_checkInput_e; typedef s16 int16_t; typedef int16_t S16; typedef struct { FSE_CTable CTable[59]; U32 scratchBuffer[41]; unsigned int count[13]; S16 norm[13]; } HUF_CompressWeightsWksp; typedef struct { HUF_CompressWeightsWksp wksp; BYTE bitsToWeight[13]; BYTE huffWeight[255]; } HUF_WriteCTableWksp; struct nodeElt_s { U32 count; U16 parent; BYTE byte; BYTE nbBits; }; typedef struct nodeElt_s nodeElt; typedef nodeElt huffNodeTable[512]; typedef struct { U16 base; U16 curr; } rankPos; typedef struct { huffNodeTable huffNodeTbl; rankPos rankPosition[192]; } HUF_buildCTable_wksp_tables; typedef struct { unsigned int count[256]; HUF_CElt CTable[257]; union { HUF_buildCTable_wksp_tables buildCTable_wksp; HUF_WriteCTableWksp writeCTable_wksp; U32 hist_wksp[1024]; } wksps; } HUF_compress_tables_t; typedef struct { size_t bitContainer[2]; size_t bitPos[2]; BYTE *startPtr; BYTE *ptr; BYTE *endPtr; } HUF_CStream_t; typedef enum { HUF_singleStream = 0, HUF_fourStreams = 1, } HUF_nbStreams_e; typedef size_t (*ZSTD_blockCompressor)(ZSTD_matchState_t *, seqStore_t *, U32 *, const void *, size_t); struct seqDef_s { U32 offBase; U16 litLength; U16 mlBase; }; typedef uint8_t U8; enum { ZSTDbss_compress = 0, ZSTDbss_noCompress = 1, }; struct ZSTD_CDict_s { const void *dictContent; size_t dictContentSize; ZSTD_dictContentType_e dictContentType; U32 *entropyWorkspace; ZSTD_cwksp workspace; ZSTD_matchState_t matchState; ZSTD_compressedBlockState_t cBlockState; ZSTD_customMem customMem; U32 dictID; int compressionLevel; ZSTD_paramSwitch_e useRowMatchFinder; }; typedef struct { size_t error; int lowerBound; int upperBound; } ZSTD_bounds; typedef enum { ZSTD_dlm_byCopy = 0, ZSTD_dlm_byRef = 1, } ZSTD_dictLoadMethod_e; typedef enum { ZSTD_cpm_noAttachDict = 0, ZSTD_cpm_attachDict = 1, ZSTD_cpm_createCDict = 2, ZSTD_cpm_unknown = 3, } ZSTD_cParamMode_e; typedef enum { ZSTD_e_continue = 0, ZSTD_e_flush = 1, ZSTD_e_end = 2, } ZSTD_EndDirective; typedef struct { U32 LLtype; U32 Offtype; U32 MLtype; size_t size; size_t lastCountSize; } ZSTD_symbolEncodingTypeStats_t; struct repcodes_s { U32 rep[3]; }; typedef struct repcodes_s repcodes_t; typedef struct { U32 *splitLocations; size_t idx; } seqStoreSplits; typedef enum { ZSTD_dtlm_fast = 0, ZSTD_dtlm_full = 1, } ZSTD_dictTableLoadMethod_e; typedef struct { U32 idx; U32 posInSequence; size_t posInSrc; } ZSTD_sequencePosition; typedef size_t (*ZSTD_sequenceCopier)(ZSTD_CCtx *, ZSTD_sequencePosition *, const ZSTD_Sequence * const, size_t, const void *, size_t); typedef enum { ZSTD_defaultDisallowed = 0, ZSTD_defaultAllowed = 1, } ZSTD_defaultPolicy_e; typedef enum { ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2, ZSTD_dedicatedDictSearch = 3, } ZSTD_dictMode_e; typedef enum { ZSTD_no_overlap = 0, ZSTD_overlap_src_before_dst = 1, } ZSTD_overlap_e; typedef struct { unsigned long long ingested; unsigned long long consumed; unsigned long long produced; unsigned long long flushed; unsigned int currentJobID; unsigned int nbActiveWorkers; } ZSTD_frameProgression; typedef enum { ZSTDcrp_makeClean = 0, ZSTDcrp_leaveDirty = 1, } ZSTD_compResetPolicy_e; typedef enum { ZSTDirp_continue = 0, ZSTDirp_reset = 1, } ZSTD_indexResetPolicy_e; typedef enum { ZSTD_resetTarget_CDict = 0, ZSTD_resetTarget_CCtx = 1, } ZSTD_resetTarget_e; typedef struct { S16 norm[53]; U32 wksp[285]; } ZSTD_BuildCTableWksp; typedef struct { U32 litLength; U32 matchLength; } ZSTD_sequenceLength; typedef enum { search_hashChain = 0, search_binaryTree = 1, search_rowHash = 2, } searchMethod_e; typedef U64 ZSTD_VecMask; typedef struct { U64 rolling; U64 stopMask; } ldmRollingHashState_t; typedef U32 (*ZSTD_getAllMatchesFn)(ZSTD_match_t *, ZSTD_matchState_t *, U32 *, const BYTE *, const BYTE *, const U32 *, const U32, const U32); typedef struct { rawSeqStore_t seqStore; U32 startPosInBlock; U32 endPosInBlock; U32 offset; } ZSTD_optLdm_t; typedef ZSTD_frameHeader zstd_frame_header; typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; typedef struct { U32 rankVal[13]; U32 rankStart[13]; U32 statsWksp[218]; BYTE symbols[256]; BYTE huffWeight[256]; } HUF_ReadDTableX1_Workspace; typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; typedef U32 rankValCol_t[13]; typedef struct { BYTE symbol; } sortedSymbol_t; typedef struct { rankValCol_t rankVal[12]; U32 rankStats[13]; U32 rankStart0[15]; sortedSymbol_t sortedSymbol[256]; BYTE weightList[256]; U32 calleeWksp[218]; } HUF_ReadDTableX2_Workspace; typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; typedef struct { size_t bitContainer; unsigned int bitsConsumed; const char *ptr; const char *start; const char *limitPtr; } BIT_DStream_t; typedef enum { BIT_DStream_unfinished = 0, BIT_DStream_endOfBuffer = 1, BIT_DStream_completed = 2, BIT_DStream_overflow = 3, } BIT_DStream_status; struct ZSTD_DDict_s { void *dictBuffer; const void *dictContent; size_t dictSize; ZSTD_entropyDTables_t entropy; U32 dictID; U32 entropyPresent; ZSTD_customMem cMem; }; typedef struct { size_t compressedSize; unsigned long long decompressedBound; } ZSTD_frameSizeInfo; typedef struct { blockType_e blockType; U32 lastBlock; U32 origSize; } blockProperties_t; typedef enum { not_streaming = 0, is_streaming = 1, } streaming_operation; typedef enum { ZSTD_d_windowLogMax = 100, ZSTD_d_experimentalParam1 = 1000, ZSTD_d_experimentalParam2 = 1001, ZSTD_d_experimentalParam3 = 1002, ZSTD_d_experimentalParam4 = 1003, } ZSTD_dParameter; typedef enum { ZSTDnit_frameHeader = 0, ZSTDnit_blockHeader = 1, ZSTDnit_block = 2, ZSTDnit_lastBlock = 3, ZSTDnit_checksum = 4, ZSTDnit_skippableFrame = 5, } ZSTD_nextInputType_e; typedef enum { ZSTD_lo_isRegularOffset = 0, ZSTD_lo_isLongOffset = 1, } ZSTD_longOffset_e; typedef struct { U32 fastMode; U32 tableLog; } ZSTD_seqSymbol_header; typedef struct { size_t litLength; size_t matchLength; size_t offset; } seq_t; typedef struct { size_t state; const ZSTD_seqSymbol *table; } ZSTD_fseState; typedef struct { BIT_DStream_t DStream; ZSTD_fseState stateLL; ZSTD_fseState stateOffb; ZSTD_fseState stateML; size_t prevOffset[3]; } seqState_t; typedef ZSTD_ErrorCode ERR_enum; typedef unsigned int FSE_DTable; typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; typedef struct { short ncount[256]; FSE_DTable dtable[0]; } FSE_DecompressWksp; typedef struct { size_t state; const void *table; } FSE_DState_t; enum xz_ret { XZ_OK = 0, XZ_STREAM_END = 1, XZ_UNSUPPORTED_CHECK = 2, XZ_MEM_ERROR = 3, XZ_MEMLIMIT_ERROR = 4, XZ_FORMAT_ERROR = 5, XZ_OPTIONS_ERROR = 6, XZ_DATA_ERROR = 7, XZ_BUF_ERROR = 8, }; typedef uint64_t vli_type; struct xz_dec_hash { vli_type unpadded; vli_type uncompressed; uint32_t crc32; }; enum xz_check { XZ_CHECK_NONE = 0, XZ_CHECK_CRC32 = 1, XZ_CHECK_CRC64 = 4, XZ_CHECK_SHA256 = 10, }; enum xz_mode { XZ_SINGLE = 0, XZ_PREALLOC = 1, XZ_DYNALLOC = 2, }; struct xz_dec_lzma2; struct xz_dec_bcj; struct xz_dec { enum { SEQ_STREAM_HEADER = 0, SEQ_BLOCK_START = 1, SEQ_BLOCK_HEADER = 2, SEQ_BLOCK_UNCOMPRESS = 3, SEQ_BLOCK_PADDING = 4, SEQ_BLOCK_CHECK = 5, SEQ_INDEX = 6, SEQ_INDEX_PADDING = 7, SEQ_INDEX_CRC32 = 8, SEQ_STREAM_FOOTER = 9, } sequence; uint32_t pos; vli_type vli; size_t in_start; size_t out_start; uint32_t crc32; enum xz_check check_type; enum xz_mode mode; bool allow_buf_error; struct { vli_type compressed; vli_type uncompressed; uint32_t size; } block_header; struct { vli_type compressed; vli_type uncompressed; vli_type count; struct xz_dec_hash hash; } block; struct { enum { SEQ_INDEX_COUNT = 0, SEQ_INDEX_UNPADDED = 1, SEQ_INDEX_UNCOMPRESSED = 2, } sequence; vli_type size; vli_type count; struct xz_dec_hash hash; } index; struct { size_t pos; size_t size; uint8_t buf[1024]; } temp; struct xz_dec_lzma2 *lzma2; struct xz_dec_bcj *bcj; bool bcj_active; }; struct xz_buf { const uint8_t *in; size_t in_pos; size_t in_size; uint8_t *out; size_t out_pos; size_t out_size; }; enum lzma2_seq { SEQ_CONTROL = 0, SEQ_UNCOMPRESSED_1 = 1, SEQ_UNCOMPRESSED_2 = 2, SEQ_COMPRESSED_0 = 3, SEQ_COMPRESSED_1 = 4, SEQ_PROPERTIES = 5, SEQ_LZMA_PREPARE = 6, SEQ_LZMA_RUN = 7, SEQ_COPY = 8, }; enum lzma_state { STATE_LIT_LIT = 0, STATE_MATCH_LIT_LIT = 1, STATE_REP_LIT_LIT = 2, STATE_SHORTREP_LIT_LIT = 3, STATE_MATCH_LIT = 4, STATE_REP_LIT = 5, STATE_SHORTREP_LIT = 6, STATE_LIT_MATCH = 7, STATE_LIT_LONGREP = 8, STATE_LIT_SHORTREP = 9, STATE_NONLIT_MATCH = 10, STATE_NONLIT_REP = 11, }; struct rc_dec { uint32_t range; uint32_t code; uint32_t init_bytes_left; const uint8_t *in; size_t in_pos; size_t in_limit; }; struct dictionary { uint8_t *buf; size_t start; size_t pos; size_t full; size_t limit; size_t end; uint32_t size; uint32_t size_max; uint32_t allocated; enum xz_mode mode; }; struct lzma2_dec { enum lzma2_seq sequence; enum lzma2_seq next_sequence; uint32_t uncompressed; uint32_t compressed; bool need_dict_reset; bool need_props; }; struct lzma_len_dec { uint16_t choice; uint16_t choice2; uint16_t low[128]; uint16_t mid[128]; uint16_t high[256]; }; struct lzma_dec { uint32_t rep0; uint32_t rep1; uint32_t rep2; uint32_t rep3; enum lzma_state state; uint32_t len; uint32_t lc; uint32_t literal_pos_mask; uint32_t pos_mask; uint16_t is_match[192]; uint16_t is_rep[12]; uint16_t is_rep0[12]; uint16_t is_rep1[12]; uint16_t is_rep2[12]; uint16_t is_rep0_long[192]; uint16_t dist_slot[256]; uint16_t dist_special[114]; uint16_t dist_align[16]; struct lzma_len_dec match_len_dec; struct lzma_len_dec rep_len_dec; uint16_t literal[12288]; }; struct xz_dec_lzma2 { struct rc_dec rc; struct dictionary dict; struct lzma2_dec lzma2; struct lzma_dec lzma; struct { uint32_t size; uint8_t buf[63]; } temp; }; struct xz_dec_bcj { enum { BCJ_X86 = 4, BCJ_POWERPC = 5, BCJ_IA64 = 6, BCJ_ARM = 7, BCJ_ARMTHUMB = 8, BCJ_SPARC = 9, } type; enum xz_ret ret; bool single_call; uint32_t pos; uint32_t x86_prev_mask; uint8_t *out; size_t out_pos; size_t out_size; struct { size_t filtered; size_t size; uint8_t buf[16]; } temp; }; struct ts_config; struct ts_state; struct ts_ops { const char *name; struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); unsigned int (*find)(struct ts_config *, struct ts_state *); void (*destroy)(struct ts_config *); void * (*get_pattern)(struct ts_config *); unsigned int (*get_pattern_len)(struct ts_config *); struct module *owner; struct list_head list; }; struct ts_config { struct ts_ops *ops; int flags; unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *); void (*finish)(struct ts_config *, struct ts_state *); }; struct ts_state { unsigned int offset; char cb[48]; }; struct ts_linear_state { unsigned int len; const void *data; }; struct ts_kmp { u8 *pattern; unsigned int pattern_len; unsigned int prefix_tbl[0]; }; struct ts_bm { u8 *pattern; unsigned int patlen; unsigned int bad_shift[256]; unsigned int good_shift[0]; }; enum { TS_FSM_SPECIFIC = 0, TS_FSM_WILDCARD = 1, TS_FSM_DIGIT = 2, TS_FSM_XDIGIT = 3, TS_FSM_PRINT = 4, TS_FSM_ALPHA = 5, TS_FSM_ALNUM = 6, TS_FSM_ASCII = 7, TS_FSM_CNTRL = 8, TS_FSM_GRAPH = 9, TS_FSM_LOWER = 10, TS_FSM_UPPER = 11, TS_FSM_PUNCT = 12, TS_FSM_SPACE = 13, __TS_FSM_TYPE_MAX = 14, }; enum { TS_FSM_SINGLE = 0, TS_FSM_PERHAPS = 1, TS_FSM_ANY = 2, TS_FSM_MULTI = 3, TS_FSM_HEAD_IGNORE = 4, __TS_FSM_RECUR_MAX = 5, }; struct ts_fsm_token { __u16 type; __u8 recur; __u8 value; }; struct ts_fsm { unsigned int ntokens; struct ts_fsm_token tokens[0]; }; struct ddebug_table { struct list_head link; struct list_head maps; const char *mod_name; unsigned int num_ddebugs; struct _ddebug *ddebugs; }; struct ddebug_class_param { union { unsigned long *bits; unsigned int *lvl; }; char flags[8]; const struct ddebug_class_map *map; }; struct ddebug_query { const char *filename; const char *module; const char *function; const char *format; const char *class_string; unsigned int first_lineno; unsigned int last_lineno; }; struct flag_settings { unsigned int flags; unsigned int mask; }; struct flagsbuf { char buf[8]; }; struct ddebug_iter { struct ddebug_table *table; int idx; }; enum nla_policy_validation { NLA_VALIDATE_NONE = 0, NLA_VALIDATE_RANGE = 1, NLA_VALIDATE_RANGE_WARN_TOO_LONG = 2, NLA_VALIDATE_MIN = 3, NLA_VALIDATE_MAX = 4, NLA_VALIDATE_MASK = 5, NLA_VALIDATE_RANGE_PTR = 6, NLA_VALIDATE_FUNCTION = 7, }; enum { NLA_UNSPEC = 0, NLA_U8 = 1, NLA_U16 = 2, NLA_U32 = 3, NLA_U64 = 4, NLA_STRING = 5, NLA_FLAG = 6, NLA_MSECS = 7, NLA_NESTED = 8, NLA_NESTED_ARRAY = 9, NLA_NUL_STRING = 10, NLA_BINARY = 11, NLA_S8 = 12, NLA_S16 = 13, NLA_S32 = 14, NLA_S64 = 15, NLA_BITFIELD32 = 16, NLA_REJECT = 17, NLA_BE16 = 18, NLA_BE32 = 19, __NLA_TYPE_MAX = 20, }; enum netlink_validation { NL_VALIDATE_LIBERAL = 0, NL_VALIDATE_TRAILING = 1, NL_VALIDATE_MAXTYPE = 2, NL_VALIDATE_UNSPEC = 4, NL_VALIDATE_STRICT_ATTRS = 8, NL_VALIDATE_NESTED = 16, }; struct nla_bitfield32 { __u32 value; __u32 selector; }; struct cpu_rmap { struct kref refcount; u16 size; void **obj; struct { u16 index; u16 dist; } near[0]; }; struct irq_glue { struct irq_affinity_notify notify; struct cpu_rmap *rmap; u16 index; }; struct sg_pool { size_t size; char *name; struct kmem_cache *slab; mempool_t *pool; }; union handle_parts { depot_stack_handle_t handle; struct { u32 pool_index: 16; u32 offset: 10; u32 valid: 1; u32 extra: 5; }; }; struct stack_record { struct stack_record *next; u32 hash; u32 size; union handle_parts handle; unsigned long entries[0]; }; enum asn1_opcode { ASN1_OP_MATCH = 0, ASN1_OP_MATCH_OR_SKIP = 1, ASN1_OP_MATCH_ACT = 2, ASN1_OP_MATCH_ACT_OR_SKIP = 3, ASN1_OP_MATCH_JUMP = 4, ASN1_OP_MATCH_JUMP_OR_SKIP = 5, ASN1_OP_MATCH_ANY = 8, ASN1_OP_MATCH_ANY_OR_SKIP = 9, ASN1_OP_MATCH_ANY_ACT = 10, ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11, ASN1_OP_COND_MATCH_OR_SKIP = 17, ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19, ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21, ASN1_OP_COND_MATCH_ANY = 24, ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25, ASN1_OP_COND_MATCH_ANY_ACT = 26, ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27, ASN1_OP_COND_FAIL = 28, ASN1_OP_COMPLETE = 29, ASN1_OP_ACT = 30, ASN1_OP_MAYBE_ACT = 31, ASN1_OP_END_SEQ = 32, ASN1_OP_END_SET = 33, ASN1_OP_END_SEQ_OF = 34, ASN1_OP_END_SET_OF = 35, ASN1_OP_END_SEQ_ACT = 36, ASN1_OP_END_SET_ACT = 37, ASN1_OP_END_SEQ_OF_ACT = 38, ASN1_OP_END_SET_OF_ACT = 39, ASN1_OP_RETURN = 40, ASN1_OP__NR = 41, }; enum asn1_method { ASN1_PRIM = 0, ASN1_CONS = 1, }; struct font_desc { int idx; const char *name; unsigned int width; unsigned int height; unsigned int charcount; const void *data; int pref; }; struct font_data { unsigned int extra[4]; const unsigned char data[0]; }; typedef u16 ucs2_char_t; enum ubsan_checks { ubsan_add_overflow = 0, ubsan_builtin_unreachable = 1, ubsan_cfi_check_fail = 2, ubsan_divrem_overflow = 3, ubsan_dynamic_type_cache_miss = 4, ubsan_float_cast_overflow = 5, ubsan_function_type_mismatch = 6, ubsan_implicit_conversion = 7, ubsan_invalid_builtin = 8, ubsan_invalid_objc_cast = 9, ubsan_load_invalid_value = 10, ubsan_missing_return = 11, ubsan_mul_overflow = 12, ubsan_negate_overflow = 13, ubsan_nullability_arg = 14, ubsan_nullability_return = 15, ubsan_nonnull_arg = 16, ubsan_nonnull_return = 17, ubsan_out_of_bounds = 18, ubsan_pointer_overflow = 19, ubsan_shift_out_of_bounds = 20, ubsan_sub_overflow = 21, ubsan_type_mismatch = 22, ubsan_alignment_assumption = 23, ubsan_vla_bound_not_positive = 24, }; struct node_groups { unsigned int id; union { unsigned int ngroups; unsigned int ncpus; }; }; typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); struct tegra_ictlr_soc { unsigned int num_ictlrs; }; struct tegra_ictlr_info { void *base[6]; u32 cop_ier[6]; u32 cop_iep[6]; u32 cpu_ier[6]; u32 cpu_iep[6]; u32 ictlr_wake_mask[6]; }; struct sun6i_r_intc_variant { u32 first_mux_irq; u32 nr_mux_irqs; u32 mux_valid[4]; }; struct sunxi_sc_nmi_reg_offs { u32 ctrl; u32 pend; u32 enable; }; enum { SUNXI_SRC_TYPE_LEVEL_LOW = 0, SUNXI_SRC_TYPE_EDGE_FALLING = 1, SUNXI_SRC_TYPE_LEVEL_HIGH = 2, SUNXI_SRC_TYPE_EDGE_RISING = 3, }; union gic_base { void *common_base; void __attribute__((btf_type_tag("percpu"))) **percpu_base; }; struct gic_chip_data { union gic_base dist_base; union gic_base cpu_base; void *raw_dist_base; void *raw_cpu_base; u32 percpu_offset; u32 saved_spi_enable[32]; u32 saved_spi_active[32]; u32 saved_spi_conf[64]; u32 saved_spi_target[255]; u32 __attribute__((btf_type_tag("percpu"))) *saved_ppi_enable; u32 __attribute__((btf_type_tag("percpu"))) *saved_ppi_active; u32 __attribute__((btf_type_tag("percpu"))) *saved_ppi_conf; struct irq_domain *domain; unsigned int gic_irqs; }; struct gic_quirk { const char *desc; const char *compatible; const char *property; bool (*init)(void *); u32 iidr; u32 mask; }; struct gic_clk_data { unsigned int num_clocks; const char * const *clocks; }; struct clk_bulk_data { const char *id; struct clk *clk; }; struct gic_chip_data; struct gic_chip_pm { struct gic_chip_data *chip_data; const struct gic_clk_data *clk_data; struct clk_bulk_data *clks; }; struct v2m_data { struct list_head entry; struct fwnode_handle *fwnode; struct resource res; void *base; u32 spi_start; u32 nr_spis; u32 spi_offset; unsigned long *bm; u32 flags; }; struct rdists { struct { raw_spinlock_t rd_lock; void *rd_base; struct page *pend_page; phys_addr_t phys_base; u64 flags; cpumask_t *vpe_table_mask; void *vpe_l1_base; } *rdist; phys_addr_t prop_table_pa; void *prop_table_va; u64 flags; u32 gicd_typer; u32 gicd_typer2; int cpuhp_memreserve_state; bool has_vlpis; bool has_rvpeid; bool has_direct_lpi; bool has_vpend_valid_dirty; }; struct redist_region; struct partition_desc; struct gic_chip_data___2 { struct fwnode_handle *fwnode; phys_addr_t dist_phys_base; void *dist_base; struct redist_region *redist_regions; struct rdists rdists; struct irq_domain *domain; u64 redist_stride; u32 nr_redist_regions; u64 flags; bool has_rss; unsigned int ppi_nr; struct partition_desc **ppi_descs; }; struct redist_region { void *redist_base; phys_addr_t phys_base; bool single_redist; }; enum gic_intid_range { SGI_RANGE = 0, PPI_RANGE = 1, SPI_RANGE = 2, EPPI_RANGE = 3, ESPI_RANGE = 4, LPI_RANGE = 5, __INVALID_RANGE__ = 6, }; struct partition_affinity { cpumask_t mask; void *partition_id; }; struct mbi_range { u32 spi_start; u32 nr_spis; unsigned long *bm; }; struct event_lpi_map { unsigned long *lpi_map; u16 *col_map; irq_hw_number_t lpi_base; int nr_lpis; raw_spinlock_t vlpi_lock; struct its_vm *vm; struct its_vlpi_map *vlpi_maps; int nr_vlpis; }; struct its_node; struct its_device___2 { struct list_head entry; struct its_node *its; struct event_lpi_map event_map; void *itt; u32 nr_ites; u32 device_id; bool shared; }; struct its_baser { void *base; u64 val; u32 order; u32 psz; }; struct its_cmd_block; struct its_collection___2; struct its_node { raw_spinlock_t lock; struct mutex dev_alloc_lock; struct list_head entry; void *base; void *sgir_base; phys_addr_t phys_base; struct its_cmd_block *cmd_base; struct its_cmd_block *cmd_write; struct its_baser tables[8]; struct its_collection___2 *collections; struct fwnode_handle *fwnode_handle; u64 (*get_msi_base)(struct its_device___2 *); u64 typer; u64 cbaser_save; u32 ctlr_save; u32 mpidr; struct list_head its_device_list; u64 flags; unsigned long list_nr; int numa_node; unsigned int msi_domain_flags; u32 pre_its_base; int vlpi_redist_offset; }; struct its_cmd_block { union { u64 raw_cmd[4]; __le64 raw_cmd_le[4]; }; }; struct its_collection___2 { u64 target_address; u16 col_id; }; struct cpu_lpi_count { atomic_t managed; atomic_t unmanaged; }; enum its_vcpu_info_cmd_type { MAP_VLPI = 0, GET_VLPI = 1, PROP_UPDATE_VLPI = 2, PROP_UPDATE_AND_INV_VLPI = 3, SCHEDULE_VPE = 4, DESCHEDULE_VPE = 5, COMMIT_VPE = 6, INVALL_VPE = 7, PROP_UPDATE_VSGI = 8, }; struct lpi_range { struct list_head entry; u32 base_id; u32 span; }; struct its_cmd_desc { union { struct { struct its_device___2 *dev; u32 event_id; } its_inv_cmd; struct { struct its_device___2 *dev; u32 event_id; } its_clear_cmd; struct { struct its_device___2 *dev; u32 event_id; } its_int_cmd; struct { struct its_device___2 *dev; int valid; } its_mapd_cmd; struct { struct its_collection___2 *col; int valid; } its_mapc_cmd; struct { struct its_device___2 *dev; u32 phys_id; u32 event_id; } its_mapti_cmd; struct { struct its_device___2 *dev; struct its_collection___2 *col; u32 event_id; } its_movi_cmd; struct { struct its_device___2 *dev; u32 event_id; } its_discard_cmd; struct { struct its_collection___2 *col; } its_invall_cmd; struct { struct its_vpe *vpe; } its_vinvall_cmd; struct { struct its_vpe *vpe; struct its_collection___2 *col; bool valid; } its_vmapp_cmd; struct { struct its_vpe *vpe; struct its_device___2 *dev; u32 virt_id; u32 event_id; bool db_enabled; } its_vmapti_cmd; struct { struct its_vpe *vpe; struct its_device___2 *dev; u32 event_id; bool db_enabled; } its_vmovi_cmd; struct { struct its_vpe *vpe; struct its_collection___2 *col; u16 seq_num; u16 its_list; } its_vmovp_cmd; struct { struct its_vpe *vpe; } its_invdb_cmd; struct { struct its_vpe *vpe; u8 sgi; u8 priority; bool enable; bool group; bool clear; } its_vsgi_cmd; }; }; struct its_cmd_info { enum its_vcpu_info_cmd_type cmd_type; union { struct its_vlpi_map *map; u8 config; bool req_db; struct { bool g0en; bool g1en; }; struct { u8 priority; bool group; }; }; }; typedef struct its_collection___2 * (*its_cmd_builder_t)(struct its_node *, struct its_cmd_block *, struct its_cmd_desc *); typedef struct its_vpe * (*its_cmd_vbuilder_t)(struct its_node *, struct its_cmd_block *, struct its_cmd_desc *); struct partition_desc { int nr_parts; struct partition_affinity *parts; struct irq_domain *domain; struct irq_desc *chained_desc; unsigned long *bitmap; struct irq_domain_ops ops; }; struct mbigen_device { struct platform_device *pdev; void *base; }; typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *); struct cs_data { u32 enable_mask; u16 slow_cfg; u16 fast_cfg; }; struct ebi2_xmem_prop { const char *prop; u32 max; bool slowreg; u16 shift; }; struct of_dev_auxdata { char *compatible; resource_size_t phys_addr; char *name; void *platform_data; }; struct simple_pm_bus { struct clk_bulk_data *clks; int num_clks; }; enum phy_mode { PHY_MODE_INVALID = 0, PHY_MODE_USB_HOST = 1, PHY_MODE_USB_HOST_LS = 2, PHY_MODE_USB_HOST_FS = 3, PHY_MODE_USB_HOST_HS = 4, PHY_MODE_USB_HOST_SS = 5, PHY_MODE_USB_DEVICE = 6, PHY_MODE_USB_DEVICE_LS = 7, PHY_MODE_USB_DEVICE_FS = 8, PHY_MODE_USB_DEVICE_HS = 9, PHY_MODE_USB_DEVICE_SS = 10, PHY_MODE_USB_OTG = 11, PHY_MODE_UFS_HS_A = 12, PHY_MODE_UFS_HS_B = 13, PHY_MODE_PCIE = 14, PHY_MODE_ETHERNET = 15, PHY_MODE_MIPI_DPHY = 16, PHY_MODE_SATA = 17, PHY_MODE_LVDS = 18, PHY_MODE_DP = 19, }; enum phy_media { PHY_MEDIA_DEFAULT = 0, PHY_MEDIA_SR = 1, PHY_MEDIA_DAC = 2, }; enum device_link_state { DL_STATE_NONE = -1, DL_STATE_DORMANT = 0, DL_STATE_AVAILABLE = 1, DL_STATE_CONSUMER_PROBE = 2, DL_STATE_ACTIVE = 3, DL_STATE_SUPPLIER_UNBIND = 4, }; struct phy; struct phy_lookup { struct list_head node; const char *dev_id; const char *con_id; struct phy *phy; }; struct phy_attrs { u32 bus_width; u32 max_link_rate; enum phy_mode mode; }; struct phy_ops; struct regulator; struct phy { struct device dev; int id; const struct phy_ops *ops; struct mutex mutex; int init_count; int power_count; struct phy_attrs attrs; struct regulator *pwr; struct dentry *debugfs; }; union phy_configure_opts; struct phy_ops { int (*init)(struct phy *); int (*exit)(struct phy *); int (*power_on)(struct phy *); int (*power_off)(struct phy *); int (*set_mode)(struct phy *, enum phy_mode, int); int (*set_media)(struct phy *, enum phy_media); int (*set_speed)(struct phy *, int); int (*configure)(struct phy *, union phy_configure_opts *); int (*validate)(struct phy *, enum phy_mode, int, union phy_configure_opts *); int (*reset)(struct phy *); int (*calibrate)(struct phy *); void (*release)(struct phy *); struct module *owner; }; struct phy_configure_opts_mipi_dphy { unsigned int clk_miss; unsigned int clk_post; unsigned int clk_pre; unsigned int clk_prepare; unsigned int clk_settle; unsigned int clk_term_en; unsigned int clk_trail; unsigned int clk_zero; unsigned int d_term_en; unsigned int eot; unsigned int hs_exit; unsigned int hs_prepare; unsigned int hs_settle; unsigned int hs_skip; unsigned int hs_trail; unsigned int hs_zero; unsigned int init; unsigned int lpx; unsigned int ta_get; unsigned int ta_go; unsigned int ta_sure; unsigned int wakeup; unsigned long hs_clk_rate; unsigned long lp_clk_rate; unsigned char lanes; }; struct phy_configure_opts_dp { unsigned int link_rate; unsigned int lanes; unsigned int voltage[4]; unsigned int pre[4]; u8 ssc: 1; u8 set_rate: 1; u8 set_lanes: 1; u8 set_voltages: 1; }; struct phy_configure_opts_lvds { unsigned int bits_per_lane_and_dclk_cycle; unsigned long differential_clk_rate; unsigned int lanes; bool is_slave; }; union phy_configure_opts { struct phy_configure_opts_mipi_dphy mipi_dphy; struct phy_configure_opts_dp dp; struct phy_configure_opts_lvds lvds; }; struct phy_provider { struct device *dev; struct device_node *children; struct module *owner; struct list_head list; struct phy * (*of_xlate)(struct device *, struct of_phandle_args *); }; struct device_link { struct device *supplier; struct list_head s_node; struct device *consumer; struct list_head c_node; struct device link_dev; enum device_link_state status; u32 flags; refcount_t rpm_active; struct kref kref; struct work_struct rm_work; bool supplier_preactivated; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; enum pinctrl_map_type { PIN_MAP_TYPE_INVALID = 0, PIN_MAP_TYPE_DUMMY_STATE = 1, PIN_MAP_TYPE_MUX_GROUP = 2, PIN_MAP_TYPE_CONFIGS_PIN = 3, PIN_MAP_TYPE_CONFIGS_GROUP = 4, }; enum pin_config_param { PIN_CONFIG_BIAS_BUS_HOLD = 0, PIN_CONFIG_BIAS_DISABLE = 1, PIN_CONFIG_BIAS_HIGH_IMPEDANCE = 2, PIN_CONFIG_BIAS_PULL_DOWN = 3, PIN_CONFIG_BIAS_PULL_PIN_DEFAULT = 4, PIN_CONFIG_BIAS_PULL_UP = 5, PIN_CONFIG_DRIVE_OPEN_DRAIN = 6, PIN_CONFIG_DRIVE_OPEN_SOURCE = 7, PIN_CONFIG_DRIVE_PUSH_PULL = 8, PIN_CONFIG_DRIVE_STRENGTH = 9, PIN_CONFIG_DRIVE_STRENGTH_UA = 10, PIN_CONFIG_INPUT_DEBOUNCE = 11, PIN_CONFIG_INPUT_ENABLE = 12, PIN_CONFIG_INPUT_SCHMITT = 13, PIN_CONFIG_INPUT_SCHMITT_ENABLE = 14, PIN_CONFIG_MODE_LOW_POWER = 15, PIN_CONFIG_MODE_PWM = 16, PIN_CONFIG_OUTPUT = 17, PIN_CONFIG_OUTPUT_ENABLE = 18, PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS = 19, PIN_CONFIG_PERSIST_STATE = 20, PIN_CONFIG_POWER_SOURCE = 21, PIN_CONFIG_SKEW_DELAY = 22, PIN_CONFIG_SLEEP_HARDWARE_STATE = 23, PIN_CONFIG_SLEW_RATE = 24, PIN_CONFIG_END = 127, PIN_CONFIG_MAX = 255, }; struct pinctrl_desc; struct pinctrl; struct pinctrl_state; struct pinctrl_dev { struct list_head node; struct pinctrl_desc *desc; struct xarray pin_desc_tree; struct xarray pin_group_tree; unsigned int num_groups; struct xarray pin_function_tree; unsigned int num_functions; struct list_head gpio_ranges; struct device *dev; struct module *owner; void *driver_data; struct pinctrl *p; struct pinctrl_state *hog_default; struct pinctrl_state *hog_sleep; struct mutex mutex; struct dentry *device_root; }; struct pinctrl_pin_desc; struct pinctrl_ops; struct pinmux_ops; struct pinconf_ops; struct pinconf_generic_params; struct pin_config_item; struct pinctrl_desc { const char *name; const struct pinctrl_pin_desc *pins; unsigned int npins; const struct pinctrl_ops *pctlops; const struct pinmux_ops *pmxops; const struct pinconf_ops *confops; struct module *owner; unsigned int num_custom_params; const struct pinconf_generic_params *custom_params; const struct pin_config_item *custom_conf_items; bool link_consumers; }; struct pinctrl_pin_desc { unsigned int number; const char *name; void *drv_data; }; struct pinctrl_map; struct pinctrl_ops { int (*get_groups_count)(struct pinctrl_dev *); const char * (*get_group_name)(struct pinctrl_dev *, unsigned int); int (*get_group_pins)(struct pinctrl_dev *, unsigned int, const unsigned int **, unsigned int *); void (*pin_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); int (*dt_node_to_map)(struct pinctrl_dev *, struct device_node *, struct pinctrl_map **, unsigned int *); void (*dt_free_map)(struct pinctrl_dev *, struct pinctrl_map *, unsigned int); }; struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; }; struct pinctrl { struct list_head node; struct device *dev; struct list_head states; struct pinctrl_state *state; struct list_head dt_maps; struct kref users; }; struct pinctrl_state { struct list_head node; const char *name; struct list_head settings; }; struct pinctrl_map_mux { const char *group; const char *function; }; struct pinctrl_map_configs { const char *group_or_pin; unsigned long *configs; unsigned int num_configs; }; struct pinctrl_map { const char *dev_name; const char *name; enum pinctrl_map_type type; const char *ctrl_dev_name; union { struct pinctrl_map_mux mux; struct pinctrl_map_configs configs; } data; }; struct pinctrl_gpio_range; struct pinmux_ops { int (*request)(struct pinctrl_dev *, unsigned int); int (*free)(struct pinctrl_dev *, unsigned int); int (*get_functions_count)(struct pinctrl_dev *); const char * (*get_function_name)(struct pinctrl_dev *, unsigned int); int (*get_function_groups)(struct pinctrl_dev *, unsigned int, const char * const **, unsigned int *); int (*set_mux)(struct pinctrl_dev *, unsigned int, unsigned int); int (*gpio_request_enable)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int); void (*gpio_disable_free)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int); int (*gpio_set_direction)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int, bool); bool strict; }; struct pinconf_ops { bool is_generic; int (*pin_config_get)(struct pinctrl_dev *, unsigned int, unsigned long *); int (*pin_config_set)(struct pinctrl_dev *, unsigned int, unsigned long *, unsigned int); int (*pin_config_group_get)(struct pinctrl_dev *, unsigned int, unsigned long *); int (*pin_config_group_set)(struct pinctrl_dev *, unsigned int, unsigned long *, unsigned int); void (*pin_config_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); void (*pin_config_group_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); void (*pin_config_config_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned long); }; struct pinconf_generic_params { const char * const property; enum pin_config_param param; u32 default_value; }; struct pin_config_item { const enum pin_config_param param; const char * const display; const char * const format; bool has_arg; }; struct gpio_chip; struct pinctrl_gpio_range { struct list_head node; const char *name; unsigned int id; unsigned int base; unsigned int pin_base; unsigned int npins; const unsigned int *pins; struct gpio_chip *gc; }; union gpio_irq_fwspec; struct gpio_irq_chip { struct irq_chip *chip; struct irq_domain *domain; struct fwnode_handle *fwnode; struct irq_domain *parent_domain; int (*child_to_parent_hwirq)(struct gpio_chip *, unsigned int, unsigned int, unsigned int *, unsigned int *); int (*populate_parent_alloc_arg)(struct gpio_chip *, union gpio_irq_fwspec *, unsigned int, unsigned int); unsigned int (*child_offset_to_irq)(struct gpio_chip *, unsigned int); struct irq_domain_ops child_irq_domain_ops; irq_flow_handler_t handler; unsigned int default_type; struct lock_class_key *lock_key; struct lock_class_key *request_key; irq_flow_handler_t parent_handler; union { void *parent_handler_data; void **parent_handler_data_array; }; unsigned int num_parents; unsigned int *parents; unsigned int *map; bool threaded; bool per_parent_data; bool initialized; bool domain_is_allocated_externally; int (*init_hw)(struct gpio_chip *); void (*init_valid_mask)(struct gpio_chip *, unsigned long *, unsigned int); unsigned long *valid_mask; unsigned int first; void (*irq_enable)(struct irq_data *); void (*irq_disable)(struct irq_data *); void (*irq_unmask)(struct irq_data *); void (*irq_mask)(struct irq_data *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct gpio_device; struct gpio_chip { const char *label; struct gpio_device *gpiodev; struct device *parent; struct fwnode_handle *fwnode; struct module *owner; int (*request)(struct gpio_chip *, unsigned int); void (*free)(struct gpio_chip *, unsigned int); int (*get_direction)(struct gpio_chip *, unsigned int); int (*direction_input)(struct gpio_chip *, unsigned int); int (*direction_output)(struct gpio_chip *, unsigned int, int); int (*get)(struct gpio_chip *, unsigned int); int (*get_multiple)(struct gpio_chip *, unsigned long *, unsigned long *); void (*set)(struct gpio_chip *, unsigned int, int); void (*set_multiple)(struct gpio_chip *, unsigned long *, unsigned long *); int (*set_config)(struct gpio_chip *, unsigned int, unsigned long); int (*to_irq)(struct gpio_chip *, unsigned int); void (*dbg_show)(struct seq_file *, struct gpio_chip *); int (*init_valid_mask)(struct gpio_chip *, unsigned long *, unsigned int); int (*add_pin_ranges)(struct gpio_chip *); int (*en_hw_timestamp)(struct gpio_chip *, u32, unsigned long); int (*dis_hw_timestamp)(struct gpio_chip *, u32, unsigned long); int base; u16 ngpio; u16 offset; const char * const *names; bool can_sleep; unsigned long (*read_reg)(void *); void (*write_reg)(void *, unsigned long); bool be_bits; void *reg_dat; void *reg_set; void *reg_clr; void *reg_dir_out; void *reg_dir_in; bool bgpio_dir_unreadable; int bgpio_bits; raw_spinlock_t bgpio_lock; unsigned long bgpio_data; unsigned long bgpio_dir; struct gpio_irq_chip irq; unsigned long *valid_mask; unsigned int of_gpio_n_cells; int (*of_xlate)(struct gpio_chip *, const struct of_phandle_args *, u32 *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct gpio_desc; struct gpio_device { struct device dev; struct cdev chrdev; int id; struct device *mockdev; struct module *owner; struct gpio_chip *chip; struct gpio_desc *descs; int base; u16 ngpio; const char *label; void *data; struct list_head list; struct blocking_notifier_head line_state_notifier; struct blocking_notifier_head device_notifier; struct rw_semaphore sem; struct list_head pin_ranges; u64 android_kabi_reserved1; }; struct gpio_desc { struct gpio_device *gdev; unsigned long flags; const char *label; const char *name; unsigned int debounce_period_us; u64 android_kabi_reserved1; }; union gpio_irq_fwspec { struct irq_fwspec fwspec; msi_alloc_info_t msiinfo; }; struct pinctrl_maps { struct list_head node; const struct pinctrl_map *maps; unsigned int num_maps; }; struct pinctrl_setting_mux { unsigned int group; unsigned int func; }; struct pinctrl_setting_configs { unsigned int group_or_pin; unsigned long *configs; unsigned int num_configs; }; struct pinctrl_setting { struct list_head node; enum pinctrl_map_type type; struct pinctrl_dev *pctldev; const char *dev_name; union { struct pinctrl_setting_mux mux; struct pinctrl_setting_configs configs; } data; }; struct pin_desc { struct pinctrl_dev *pctldev; const char *name; bool dynamic_name; void *drv_data; unsigned int mux_usecount; const char *mux_owner; const struct pinctrl_setting_mux *mux_setting; const char *gpio_owner; }; struct pctldev; struct group_desc { const char *name; int *pins; int num_pins; void *data; }; struct function_desc { const char *name; const char * const *group_names; int num_group_names; void *data; }; struct pinctrl_dt_map { struct list_head node; struct pinctrl_dev *pctldev; struct pinctrl_map *map; unsigned int num_maps; }; struct pcs_conf_type { const char *name; enum pin_config_param param; }; struct pcs_soc_data { unsigned int flags; int irq; unsigned int irq_enable_mask; unsigned int irq_status_mask; void (*rearm)(); }; struct pcs_gpiofunc_range { unsigned int offset; unsigned int npins; unsigned int gpiofunc; struct list_head node; }; struct pcs_data { struct pinctrl_pin_desc *pa; int cur; }; struct pcs_device { struct resource *res; void *base; void *saved_vals; unsigned int size; struct device *dev; struct device_node *np; struct pinctrl_dev *pctl; unsigned int flags; struct property *missing_nr_pinctrl_cells; struct pcs_soc_data socdata; raw_spinlock_t lock; struct mutex mutex; unsigned int width; unsigned int fmask; unsigned int fshift; unsigned int foff; unsigned int fmax; bool bits_per_mux; unsigned int bits_per_pin; struct pcs_data pins; struct list_head gpiofuncs; struct list_head irqs; struct irq_chip chip; struct irq_domain *domain; struct pinctrl_desc desc; unsigned int (*read)(void *); void (*write)(unsigned int, void *); }; struct pcs_interrupt { void *reg; irq_hw_number_t hwirq; unsigned int irq; struct list_head node; }; struct pcs_func_vals { void *reg; unsigned int val; unsigned int mask; }; struct pcs_conf_vals; struct pcs_function { const char *name; struct pcs_func_vals *vals; unsigned int nvals; const char **pgnames; int npgnames; struct pcs_conf_vals *conf; int nconfs; struct list_head node; }; struct pcs_conf_vals { enum pin_config_param param; unsigned int val; unsigned int enable; unsigned int disable; unsigned int mask; }; struct pcs_pdata { int irq; void (*rearm)(); }; enum tegra_pinconf_param { TEGRA_PINCONF_PARAM_PULL = 0, TEGRA_PINCONF_PARAM_TRISTATE = 1, TEGRA_PINCONF_PARAM_ENABLE_INPUT = 2, TEGRA_PINCONF_PARAM_OPEN_DRAIN = 3, TEGRA_PINCONF_PARAM_LOCK = 4, TEGRA_PINCONF_PARAM_IORESET = 5, TEGRA_PINCONF_PARAM_RCV_SEL = 6, TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE = 7, TEGRA_PINCONF_PARAM_SCHMITT = 8, TEGRA_PINCONF_PARAM_LOW_POWER_MODE = 9, TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH = 10, TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH = 11, TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING = 12, TEGRA_PINCONF_PARAM_SLEW_RATE_RISING = 13, TEGRA_PINCONF_PARAM_DRIVE_TYPE = 14, }; struct cfg_param { const char *property; enum tegra_pinconf_param param; }; struct tegra_pinctrl_soc_data; struct tegra_function; struct tegra_pmx { struct device *dev; struct pinctrl_dev *pctl; const struct tegra_pinctrl_soc_data *soc; struct tegra_function *functions; const char **group_pins; struct pinctrl_gpio_range gpio_range; struct pinctrl_desc desc; int nbanks; void **regs; u32 *backup_regs; }; struct tegra_pingroup; struct tegra_pinctrl_soc_data { unsigned int ngpios; const char *gpio_compatible; const struct pinctrl_pin_desc *pins; unsigned int npins; const char * const *functions; unsigned int nfunctions; const struct tegra_pingroup *groups; unsigned int ngroups; bool hsm_in_mux; bool schmitt_in_mux; bool drvtype_in_mux; bool sfsel_in_mux; }; struct tegra_pingroup { const char *name; const unsigned int *pins; u8 npins; u8 funcs[4]; s32 mux_reg; s32 pupd_reg; s32 tri_reg; s32 drv_reg; u32 mux_bank: 2; u32 pupd_bank: 2; u32 tri_bank: 2; u32 drv_bank: 2; s32 mux_bit: 6; s32 pupd_bit: 6; s32 tri_bit: 6; s32 einput_bit: 6; s32 odrain_bit: 6; s32 lock_bit: 6; s32 ioreset_bit: 6; s32 rcv_sel_bit: 6; s32 hsm_bit: 6; long: 2; s32 sfsel_bit: 6; s32 schmitt_bit: 6; s32 lpmd_bit: 6; s32 drvdn_bit: 6; s32 drvup_bit: 6; int: 2; s32 slwr_bit: 6; s32 slwf_bit: 6; s32 lpdr_bit: 6; s32 drvtype_bit: 6; s32 drvdn_width: 6; long: 2; s32 drvup_width: 6; s32 slwr_width: 6; s32 slwf_width: 6; u32 parked_bitmask; }; struct tegra_function { const char *name; const char **groups; unsigned int ngroups; }; struct tegra_xusb_padctl_function; struct tegra_xusb_padctl_lane; struct tegra_xusb_padctl_soc { const struct pinctrl_pin_desc *pins; unsigned int num_pins; const struct tegra_xusb_padctl_function *functions; unsigned int num_functions; const struct tegra_xusb_padctl_lane *lanes; unsigned int num_lanes; }; struct tegra_xusb_padctl_function { const char *name; const char * const *groups; unsigned int num_groups; }; struct tegra_xusb_padctl_lane { const char *name; unsigned int offset; unsigned int shift; unsigned int mask; unsigned int iddq; const unsigned int *funcs; unsigned int num_funcs; }; enum tegra_xusb_padctl_param { TEGRA_XUSB_PADCTL_IDDQ = 0, }; struct tegra_xusb_padctl_property { const char *name; enum tegra_xusb_padctl_param param; }; struct reset_control; struct tegra_xusb_padctl { struct device *dev; void *regs; struct mutex lock; struct reset_control *rst; const struct tegra_xusb_padctl_soc *soc; struct pinctrl_dev *pinctrl; struct pinctrl_desc desc; struct phy_provider *provider; struct phy *phys[2]; unsigned int enable; }; typedef void (*btf_trace_gpio_direction)(void *, unsigned int, int, int); typedef void (*btf_trace_gpio_value)(void *, unsigned int, int, int); enum gpio_lookup_flags { GPIO_ACTIVE_HIGH = 0, GPIO_ACTIVE_LOW = 1, GPIO_OPEN_DRAIN = 2, GPIO_OPEN_SOURCE = 4, GPIO_PERSISTENT = 0, GPIO_TRANSITORY = 8, GPIO_PULL_UP = 16, GPIO_PULL_DOWN = 32, GPIO_PULL_DISABLE = 64, GPIO_LOOKUP_FLAGS_DEFAULT = 0, }; enum gpiod_flags { GPIOD_ASIS = 0, GPIOD_IN = 1, GPIOD_OUT_LOW = 3, GPIOD_OUT_HIGH = 7, GPIOD_OUT_LOW_OPEN_DRAIN = 11, GPIOD_OUT_HIGH_OPEN_DRAIN = 15, }; enum { GPIOLINE_CHANGED_REQUESTED = 1, GPIOLINE_CHANGED_RELEASED = 2, GPIOLINE_CHANGED_CONFIG = 3, }; struct gpio_pin_range { struct list_head node; struct pinctrl_dev *pctldev; struct pinctrl_gpio_range range; }; struct trace_event_raw_gpio_direction { struct trace_entry ent; unsigned int gpio; int in; int err; char __data[0]; }; struct trace_event_raw_gpio_value { struct trace_entry ent; unsigned int gpio; int get; int value; char __data[0]; }; struct gpiod_hog { struct list_head list; const char *chip_label; u16 chip_hwnum; const char *line_name; unsigned long lflags; int dflags; }; struct gpiod_lookup { const char *key; u16 chip_hwnum; const char *con_id; unsigned int idx; unsigned long flags; }; struct gpiod_lookup_table { struct list_head list; const char *dev_id; struct gpiod_lookup table[0]; }; typedef struct { spinlock_t *lock; unsigned long flags; } class_spinlock_irqsave_t; struct gpio_array; struct gpio_descs { struct gpio_array *info; unsigned int ndescs; struct gpio_desc *desc[0]; }; struct gpio_array { struct gpio_desc **desc; unsigned int size; struct gpio_chip *chip; unsigned long *get_mask; unsigned long *set_mask; u64 android_kabi_reserved1; unsigned long invert_mask[0]; }; struct trace_event_data_offsets_gpio_direction {}; struct trace_event_data_offsets_gpio_value {}; struct gpio { unsigned int gpio; unsigned long flags; const char *label; }; enum of_gpio_flags { OF_GPIO_ACTIVE_LOW = 1, OF_GPIO_SINGLE_ENDED = 2, OF_GPIO_OPEN_DRAIN = 4, OF_GPIO_TRANSITORY = 8, OF_GPIO_PULL_UP = 16, OF_GPIO_PULL_DOWN = 32, OF_GPIO_PULL_DISABLE = 64, }; typedef struct gpio_desc * (*of_find_gpio_quirk)(struct device_node *, const char *, unsigned int, enum of_gpio_flags *); struct of_rename_gpio { const char *con_id; const char *legacy_id; const char *compatible; }; enum gpio_v2_line_flag { GPIO_V2_LINE_FLAG_USED = 1, GPIO_V2_LINE_FLAG_ACTIVE_LOW = 2, GPIO_V2_LINE_FLAG_INPUT = 4, GPIO_V2_LINE_FLAG_OUTPUT = 8, GPIO_V2_LINE_FLAG_EDGE_RISING = 16, GPIO_V2_LINE_FLAG_EDGE_FALLING = 32, GPIO_V2_LINE_FLAG_OPEN_DRAIN = 64, GPIO_V2_LINE_FLAG_OPEN_SOURCE = 128, GPIO_V2_LINE_FLAG_BIAS_PULL_UP = 256, GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN = 512, GPIO_V2_LINE_FLAG_BIAS_DISABLED = 1024, GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME = 2048, GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE = 4096, }; enum gpio_v2_line_changed_type { GPIO_V2_LINE_CHANGED_REQUESTED = 1, GPIO_V2_LINE_CHANGED_RELEASED = 2, GPIO_V2_LINE_CHANGED_CONFIG = 3, }; enum gpio_v2_line_attr_id { GPIO_V2_LINE_ATTR_ID_FLAGS = 1, GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES = 2, GPIO_V2_LINE_ATTR_ID_DEBOUNCE = 3, }; enum gpio_v2_line_event_id { GPIO_V2_LINE_EVENT_RISING_EDGE = 1, GPIO_V2_LINE_EVENT_FALLING_EDGE = 2, }; struct gpioevent_data { __u64 timestamp; __u32 id; }; struct lineevent_state { struct gpio_device *gdev; const char *label; struct gpio_desc *desc; u32 eflags; int irq; wait_queue_head_t wait; struct notifier_block device_unregistered_nb; struct { union { struct __kfifo kfifo; struct gpioevent_data *type; const struct gpioevent_data *const_type; char (*rectype)[0]; struct gpioevent_data *ptr; const struct gpioevent_data *ptr_const; }; struct gpioevent_data buf[16]; } events; u64 timestamp; }; struct linereq; struct line { struct gpio_desc *desc; struct linereq *req; unsigned int irq; u64 edflags; u64 timestamp_ns; u32 req_seqno; u32 line_seqno; struct delayed_work work; unsigned int sw_debounced; unsigned int level; }; struct gpio_v2_line_event { __u64 timestamp_ns; __u32 id; __u32 offset; __u32 seqno; __u32 line_seqno; __u32 padding[6]; }; struct linereq { struct gpio_device *gdev; const char *label; u32 num_lines; wait_queue_head_t wait; struct notifier_block device_unregistered_nb; u32 event_buffer_size; struct { union { struct __kfifo kfifo; struct gpio_v2_line_event *type; const struct gpio_v2_line_event *const_type; char (*rectype)[0]; struct gpio_v2_line_event *ptr; const struct gpio_v2_line_event *ptr_const; }; struct gpio_v2_line_event buf[0]; } events; atomic_t seqno; struct mutex config_mutex; struct line lines[0]; }; struct gpio_v2_line_attribute { __u32 id; __u32 padding; union { __u64 flags; __u64 values; __u32 debounce_period_us; }; }; struct gpio_v2_line_info { char name[32]; char consumer[32]; __u32 offset; __u32 num_attrs; __u64 flags; struct gpio_v2_line_attribute attrs[10]; __u32 padding[4]; }; struct gpio_v2_line_info_changed { struct gpio_v2_line_info info; __u64 timestamp_ns; __u32 event_type; __u32 padding[5]; }; struct gpio_chardev_data { struct gpio_device *gdev; wait_queue_head_t wait; struct { union { struct __kfifo kfifo; struct gpio_v2_line_info_changed *type; const struct gpio_v2_line_info_changed *const_type; char (*rectype)[0]; struct gpio_v2_line_info_changed *ptr; const struct gpio_v2_line_info_changed *ptr_const; }; struct gpio_v2_line_info_changed buf[32]; } events; struct notifier_block lineinfo_changed_nb; struct notifier_block device_unregistered_nb; unsigned long *watched_lines; atomic_t watch_abi_version; }; typedef ssize_t (*read_fn)(struct file *, char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); struct gpioline_info { __u32 line_offset; __u32 flags; char name[32]; char consumer[32]; }; struct gpioline_info_changed { struct gpioline_info info; __u64 timestamp; __u32 event_type; __u32 padding[5]; }; typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *); typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long); struct gpio_v2_line_config_attribute { struct gpio_v2_line_attribute attr; __u64 mask; }; struct gpio_v2_line_config { __u64 flags; __u32 num_attrs; __u32 padding[5]; struct gpio_v2_line_config_attribute attrs[10]; }; struct gpio_v2_line_request { __u32 offsets[64]; char consumer[32]; struct gpio_v2_line_config config; __u32 num_lines; __u32 event_buffer_size; __u32 padding[5]; __s32 fd; }; struct gpiochip_info { char name[32]; char label[32]; __u32 lines; }; struct gpioevent_request { __u32 lineoffset; __u32 handleflags; __u32 eventflags; char consumer_label[32]; int fd; }; struct gpiohandle_request { __u32 lineoffsets[64]; __u32 flags; __u8 default_values[64]; char consumer_label[32]; __u32 lines; int fd; }; struct linehandle_state { struct gpio_device *gdev; const char *label; struct gpio_desc *descs[64]; u32 num_descs; }; struct gpiohandle_data { __u8 values[64]; }; struct gpiohandle_config { __u32 flags; __u8 default_values[64]; __u32 padding[4]; }; struct gpio_v2_line_values { __u64 bits; __u64 mask; }; struct software_node { const char *name; const struct software_node *parent; const struct property_entry *properties; }; struct bgpio_pdata { const char *label; int base; int ngpio; }; struct amba_device; struct amba_id; struct amba_driver { struct device_driver drv; int (*probe)(struct amba_device *, const struct amba_id *); void (*remove)(struct amba_device *); void (*shutdown)(struct amba_device *); const struct amba_id *id_table; bool driver_managed_dma; u64 android_kabi_reserved1; }; struct amba_cs_uci_id { unsigned int devarch; unsigned int devarch_mask; unsigned int devtype; void *data; }; struct amba_device { struct device dev; struct resource res; struct clk *pclk; struct device_dma_parameters dma_parms; unsigned int periphid; struct mutex periphid_lock; unsigned int cid; struct amba_cs_uci_id uci; unsigned int irq[9]; const char *driver_override; u64 android_kabi_reserved1; }; struct amba_id { unsigned int id; unsigned int mask; void *data; }; struct pl061_context_save_regs { u8 gpio_data; u8 gpio_dir; u8 gpio_is; u8 gpio_ibe; u8 gpio_iev; u8 gpio_ie; }; struct pl061 { raw_spinlock_t lock; void *base; struct gpio_chip gc; int parent_irq; struct pl061_context_save_regs csave_regs; }; struct pwm_device; struct pwm_state; typedef void (*btf_trace_pwm_apply)(void *, struct pwm_device *, const struct pwm_state *, int); enum pwm_polarity { PWM_POLARITY_NORMAL = 0, PWM_POLARITY_INVERSED = 1, }; struct pwm_args { u64 period; enum pwm_polarity polarity; }; struct pwm_state { u64 period; u64 duty_cycle; enum pwm_polarity polarity; bool enabled; bool usage_power; }; struct pwm_chip; struct pwm_device { const char *label; unsigned long flags; unsigned int hwpwm; unsigned int pwm; struct pwm_chip *chip; void *chip_data; struct pwm_args args; struct pwm_state state; struct pwm_state last; u64 android_kabi_reserved1; }; struct pwm_ops; struct pwm_chip { struct device *dev; const struct pwm_ops *ops; int base; unsigned int npwm; struct pwm_device * (*of_xlate)(struct pwm_chip *, const struct of_phandle_args *); unsigned int of_pwm_n_cells; struct list_head list; struct pwm_device *pwms; u64 android_kabi_reserved1; }; struct pwm_capture; struct pwm_ops { int (*request)(struct pwm_chip *, struct pwm_device *); void (*free)(struct pwm_chip *, struct pwm_device *); int (*capture)(struct pwm_chip *, struct pwm_device *, struct pwm_capture *, unsigned long); int (*apply)(struct pwm_chip *, struct pwm_device *, const struct pwm_state *); int (*get_state)(struct pwm_chip *, struct pwm_device *, struct pwm_state *); struct module *owner; u64 android_kabi_reserved1; }; struct pwm_capture { unsigned int period; unsigned int duty_cycle; }; typedef void (*btf_trace_pwm_get)(void *, struct pwm_device *, const struct pwm_state *, int); enum { PWMF_REQUESTED = 0, PWMF_EXPORTED = 1, }; struct pwm_lookup { struct list_head list; const char *provider; unsigned int index; const char *dev_id; const char *con_id; unsigned int period; enum pwm_polarity polarity; const char *module; }; struct trace_event_raw_pwm { struct trace_entry ent; struct pwm_device *pwm; u64 period; u64 duty_cycle; enum pwm_polarity polarity; bool enabled; int err; char __data[0]; }; struct trace_event_data_offsets_pwm {}; struct pwm_export { struct device child; struct pwm_device *pwm; struct mutex lock; struct pwm_state suspend; }; enum { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3, }; struct rcec_ea { u8 nextbusn; u8 lastbusn; u32 bitmap; }; struct pci_sriov { int pos; int nres; u32 cap; u16 ctrl; u16 total_VFs; u16 initial_VFs; u16 num_VFs; u16 offset; u16 stride; u16 vf_device; u32 pgsz; u8 link; u8 max_VF_buses; u16 driver_max_VFs; struct pci_dev *dev; struct pci_dev *self; u32 class; u8 hdr_type; u16 subsystem_vendor; u16 subsystem_device; resource_size_t barsz[6]; bool drivers_autoprobe; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; typedef u64 pci_bus_addr_t; struct pci_bus_region { pci_bus_addr_t start; pci_bus_addr_t end; }; enum pci_fixup_pass { pci_fixup_early = 0, pci_fixup_header = 1, pci_fixup_final = 2, pci_fixup_enable = 3, pci_fixup_resume = 4, pci_fixup_suspend = 5, pci_fixup_resume_early = 6, pci_fixup_suspend_late = 7, }; struct pci_bus_resource { struct list_head list; struct resource *res; unsigned int flags; }; enum pci_bar_type { pci_bar_unknown = 0, pci_bar_io = 1, pci_bar_mem32 = 2, pci_bar_mem64 = 3, }; enum pci_bus_speed { PCI_SPEED_33MHz = 0, PCI_SPEED_66MHz = 1, PCI_SPEED_66MHz_PCIX = 2, PCI_SPEED_100MHz_PCIX = 3, PCI_SPEED_133MHz_PCIX = 4, PCI_SPEED_66MHz_PCIX_ECC = 5, PCI_SPEED_100MHz_PCIX_ECC = 6, PCI_SPEED_133MHz_PCIX_ECC = 7, PCI_SPEED_66MHz_PCIX_266 = 9, PCI_SPEED_100MHz_PCIX_266 = 10, PCI_SPEED_133MHz_PCIX_266 = 11, AGP_UNKNOWN = 12, AGP_1X = 13, AGP_2X = 14, AGP_4X = 15, AGP_8X = 16, PCI_SPEED_66MHz_PCIX_533 = 17, PCI_SPEED_100MHz_PCIX_533 = 18, PCI_SPEED_133MHz_PCIX_533 = 19, PCIE_SPEED_2_5GT = 20, PCIE_SPEED_5_0GT = 21, PCIE_SPEED_8_0GT = 22, PCIE_SPEED_16_0GT = 23, PCIE_SPEED_32_0GT = 24, PCIE_SPEED_64_0GT = 25, PCI_SPEED_UNKNOWN = 255, }; enum pci_bus_flags { PCI_BUS_FLAGS_NO_MSI = 1, PCI_BUS_FLAGS_NO_MMRBC = 2, PCI_BUS_FLAGS_NO_AERSID = 4, PCI_BUS_FLAGS_NO_EXTCFG = 8, }; enum pcie_bus_config_types { PCIE_BUS_TUNE_OFF = 0, PCIE_BUS_DEFAULT = 1, PCIE_BUS_SAFE = 2, PCIE_BUS_PERFORMANCE = 3, PCIE_BUS_PEER2PEER = 4, }; enum { PCI_REASSIGN_ALL_RSRC = 1, PCI_REASSIGN_ALL_BUS = 2, PCI_PROBE_ONLY = 4, PCI_CAN_SKIP_ISA_ALIGN = 8, PCI_ENABLE_PROC_DOMAINS = 16, PCI_COMPAT_DOMAIN_0 = 32, PCI_SCAN_ALL_PCIE_DEVS = 64, }; enum pci_dev_flags { PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1, PCI_DEV_FLAGS_NO_D3 = 2, PCI_DEV_FLAGS_ASSIGNED = 4, PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8, PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32, PCI_DEV_FLAGS_NO_BUS_RESET = 64, PCI_DEV_FLAGS_NO_PM_RESET = 128, PCI_DEV_FLAGS_VPD_REF_F0 = 256, PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512, PCI_DEV_FLAGS_NO_FLR_RESET = 1024, PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048, PCI_DEV_FLAGS_HAS_MSI_MASKING = 4096, }; struct hotplug_slot_ops; struct hotplug_slot { const struct hotplug_slot_ops *ops; struct list_head slot_list; struct pci_slot *pci_slot; struct module *owner; const char *mod_name; }; struct hotplug_slot_ops { int (*enable_slot)(struct hotplug_slot *); int (*disable_slot)(struct hotplug_slot *); int (*set_attention_status)(struct hotplug_slot *, u8); int (*hardware_test)(struct hotplug_slot *, u32); int (*get_power_status)(struct hotplug_slot *, u8 *); int (*get_attention_status)(struct hotplug_slot *, u8 *); int (*get_latch_status)(struct hotplug_slot *, u8 *); int (*get_adapter_status)(struct hotplug_slot *, u8 *); int (*reset_slot)(struct hotplug_slot *, bool); }; struct pci_host_bridge { struct device dev; struct pci_bus *bus; struct pci_ops *ops; struct pci_ops *child_ops; void *sysdata; int busnr; int domain_nr; struct list_head windows; struct list_head dma_ranges; u8 (*swizzle_irq)(struct pci_dev *, u8 *); int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); void *release_data; unsigned int ignore_reset_delay: 1; unsigned int no_ext_tags: 1; unsigned int no_inc_mrrs: 1; unsigned int native_aer: 1; unsigned int native_pcie_hotplug: 1; unsigned int native_shpc_hotplug: 1; unsigned int native_pme: 1; unsigned int native_ltr: 1; unsigned int native_dpc: 1; unsigned int native_cxl_error: 1; unsigned int preserve_config: 1; unsigned int size_windows: 1; unsigned int msi_domain: 1; resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t); u64 android_kabi_reserved1; u64 android_kabi_reserved2; long: 64; long: 64; unsigned long private[0]; }; struct pci_domain_busn_res { struct list_head list; struct resource res; int domain_nr; }; typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32); struct dmi_strmatch { unsigned char slot: 7; unsigned char exact_match: 1; char substr[79]; }; struct dmi_system_id { int (*callback)(const struct dmi_system_id *); const char *ident; struct dmi_strmatch matches[4]; void *driver_data; }; struct pci_reset_fn_method { int (*reset_fn)(struct pci_dev *, bool); char *name; }; struct bus_attribute { struct attribute attr; ssize_t (*show)(const struct bus_type *, char *); ssize_t (*store)(const struct bus_type *, const char *, size_t); }; enum pcie_reset_state { pcie_deassert_reset = 1, pcie_warm_reset = 2, pcie_hot_reset = 3, }; enum { LOGIC_PIO_INDIRECT = 0, LOGIC_PIO_CPU_MMIO = 1, }; enum pcie_link_width { PCIE_LNK_WIDTH_RESRV = 0, PCIE_LNK_X1 = 1, PCIE_LNK_X2 = 2, PCIE_LNK_X4 = 4, PCIE_LNK_X8 = 8, PCIE_LNK_X12 = 12, PCIE_LNK_X16 = 16, PCIE_LNK_X32 = 32, PCIE_LNK_WIDTH_UNKNOWN = 255, }; struct pci_cap_saved_data { u16 cap_nr; bool cap_extended; unsigned int size; u32 data[0]; }; struct pci_cap_saved_state { struct hlist_node next; struct pci_cap_saved_data cap; }; struct pci_pme_device { struct list_head list; struct pci_dev *dev; }; struct pci_devres { unsigned int enabled: 1; unsigned int pinned: 1; unsigned int orig_intx: 1; unsigned int restore_intx: 1; unsigned int mwi: 1; u32 region_mask; }; struct logic_pio_host_ops; struct logic_pio_hwaddr { struct list_head list; struct fwnode_handle *fwnode; resource_size_t hw_start; resource_size_t io_start; resource_size_t size; unsigned long flags; void *hostdata; const struct logic_pio_host_ops *ops; }; struct logic_pio_host_ops { u32 (*in)(void *, unsigned long, size_t); void (*out)(void *, unsigned long, u32, size_t); u32 (*ins)(void *, unsigned long, void *, size_t, unsigned int); void (*outs)(void *, unsigned long, const void *, size_t, unsigned int); }; struct pci_saved_state { u32 config_space[16]; struct pci_cap_saved_data cap[0]; }; struct driver_attribute { struct attribute attr; ssize_t (*show)(struct device_driver *, char *); ssize_t (*store)(struct device_driver *, const char *, size_t); }; enum pci_ers_result { PCI_ERS_RESULT_NONE = 1, PCI_ERS_RESULT_CAN_RECOVER = 2, PCI_ERS_RESULT_NEED_RESET = 3, PCI_ERS_RESULT_DISCONNECT = 4, PCI_ERS_RESULT_RECOVERED = 5, PCI_ERS_RESULT_NO_AER_DRIVER = 6, }; struct pci_dynid { struct list_head node; struct pci_device_id id; }; struct pcie_device { int irq; struct pci_dev *port; u32 service; void *priv_data; struct device device; }; struct pcie_port_service_driver { const char *name; int (*probe)(struct pcie_device *); void (*remove)(struct pcie_device *); int (*suspend)(struct pcie_device *); int (*resume_noirq)(struct pcie_device *); int (*resume)(struct pcie_device *); int (*runtime_suspend)(struct pcie_device *); int (*runtime_resume)(struct pcie_device *); int (*slot_reset)(struct pcie_device *); int port_type; u32 service; struct device_driver driver; }; struct drv_dev_and_id { struct pci_driver *drv; struct pci_dev *dev; const struct pci_device_id *id; }; enum pci_mmap_api { PCI_MMAP_SYSFS = 0, PCI_MMAP_PROCFS = 1, }; enum pci_mmap_state { pci_mmap_io = 0, pci_mmap_mem = 1, }; enum enable_type { undefined = -1, user_disabled = 0, auto_disabled = 1, user_enabled = 2, auto_enabled = 3, }; enum release_type { leaf_only = 0, whole_subtree = 1, }; struct pci_dev_resource { struct list_head list; struct resource *res; struct pci_dev *dev; resource_size_t start; resource_size_t end; resource_size_t add_size; resource_size_t min_align; unsigned long flags; }; enum support_mode { ALLOW_LEGACY = 0, DENY_LEGACY = 1, }; struct msix_entry { u32 vector; u16 entry; }; struct portdrv_service_data { struct pcie_port_service_driver *drv; struct device *dev; u32 service; }; typedef int (*pcie_callback_t)(struct pcie_device *); struct walk_rcec_data { struct pci_dev *rcec; int (*user_callback)(struct pci_dev *, void *); void *user_data; }; struct pcie_link_state { struct pci_dev *pdev; struct pci_dev *downstream; struct pcie_link_state *root; struct pcie_link_state *parent; struct list_head sibling; u32 aspm_support: 7; u32 aspm_enabled: 7; u32 aspm_capable: 7; u32 aspm_default: 7; int: 4; u32 aspm_disable: 7; u32 clkpm_capable: 1; u32 clkpm_enabled: 1; u32 clkpm_default: 1; u32 clkpm_disable: 1; }; struct aer_stats { u64 dev_cor_errs[16]; u64 dev_fatal_errs[27]; u64 dev_nonfatal_errs[27]; u64 dev_total_cor_errs; u64 dev_total_fatal_errs; u64 dev_total_nonfatal_errs; u64 rootport_total_cor_errs; u64 rootport_total_fatal_errs; u64 rootport_total_nonfatal_errs; }; struct aer_err_source { unsigned int status; unsigned int id; }; struct aer_header_log_regs { unsigned int dw0; unsigned int dw1; unsigned int dw2; unsigned int dw3; }; struct aer_err_info { struct pci_dev *dev[5]; int error_dev_num; unsigned int id: 16; unsigned int severity: 2; unsigned int __pad1: 5; unsigned int multi_error_valid: 1; unsigned int first_error: 5; unsigned int __pad2: 2; unsigned int tlp_header_valid: 1; unsigned int status; unsigned int mask; struct aer_header_log_regs tlp; }; struct aer_rpc { struct pci_dev *rpd; struct { union { struct __kfifo kfifo; struct aer_err_source *type; const struct aer_err_source *const_type; char (*rectype)[0]; struct aer_err_source *ptr; const struct aer_err_source *ptr_const; }; struct aer_err_source buf[128]; } aer_fifo; }; struct pcie_pme_service_data { spinlock_t lock; struct pcie_device *srv; struct work_struct work; bool noirq; }; struct pci_slot_attribute { struct attribute attr; ssize_t (*show)(struct pci_slot *, char *); ssize_t (*store)(struct pci_slot *, const char *, size_t); }; struct of_bus; struct of_pci_range_parser { struct device_node *node; struct of_bus *bus; const __be32 *range; const __be32 *end; int na; int ns; int pna; bool dma; }; struct of_pci_range { union { u64 pci_addr; u64 bus_addr; }; u64 cpu_addr; u64 size; u32 flags; }; struct pci_dev_reset_methods { u16 vendor; u16 device; int (*reset)(struct pci_dev *, bool); }; struct pci_dev_acs_enabled { u16 vendor; u16 device; int (*acs_enabled)(struct pci_dev *, u16); }; struct pci_dev_acs_ops { u16 vendor; u16 device; int (*enable_acs)(struct pci_dev *); int (*disable_acs_redir)(struct pci_dev *); }; enum dmi_field { DMI_NONE = 0, DMI_BIOS_VENDOR = 1, DMI_BIOS_VERSION = 2, DMI_BIOS_DATE = 3, DMI_BIOS_RELEASE = 4, DMI_EC_FIRMWARE_RELEASE = 5, DMI_SYS_VENDOR = 6, DMI_PRODUCT_NAME = 7, DMI_PRODUCT_VERSION = 8, DMI_PRODUCT_SERIAL = 9, DMI_PRODUCT_UUID = 10, DMI_PRODUCT_SKU = 11, DMI_PRODUCT_FAMILY = 12, DMI_BOARD_VENDOR = 13, DMI_BOARD_NAME = 14, DMI_BOARD_VERSION = 15, DMI_BOARD_SERIAL = 16, DMI_BOARD_ASSET_TAG = 17, DMI_CHASSIS_VENDOR = 18, DMI_CHASSIS_TYPE = 19, DMI_CHASSIS_VERSION = 20, DMI_CHASSIS_SERIAL = 21, DMI_CHASSIS_ASSET_TAG = 22, DMI_STRING_MAX = 23, DMI_OEM_STRING = 24, }; enum { NVME_REG_CAP = 0, NVME_REG_VS = 8, NVME_REG_INTMS = 12, NVME_REG_INTMC = 16, NVME_REG_CC = 20, NVME_REG_CSTS = 28, NVME_REG_NSSR = 32, NVME_REG_AQA = 36, NVME_REG_ASQ = 40, NVME_REG_ACQ = 48, NVME_REG_CMBLOC = 56, NVME_REG_CMBSZ = 60, NVME_REG_BPINFO = 64, NVME_REG_BPRSEL = 68, NVME_REG_BPMBL = 72, NVME_REG_CMBMSC = 80, NVME_REG_CRTO = 104, NVME_REG_PMRCAP = 3584, NVME_REG_PMRCTL = 3588, NVME_REG_PMRSTS = 3592, NVME_REG_PMREBS = 3596, NVME_REG_PMRSWTP = 3600, NVME_REG_DBS = 4096, }; enum { NVME_CC_ENABLE = 1, NVME_CC_EN_SHIFT = 0, NVME_CC_CSS_SHIFT = 4, NVME_CC_MPS_SHIFT = 7, NVME_CC_AMS_SHIFT = 11, NVME_CC_SHN_SHIFT = 14, NVME_CC_IOSQES_SHIFT = 16, NVME_CC_IOCQES_SHIFT = 20, NVME_CC_CSS_NVM = 0, NVME_CC_CSS_CSI = 96, NVME_CC_CSS_MASK = 112, NVME_CC_AMS_RR = 0, NVME_CC_AMS_WRRU = 2048, NVME_CC_AMS_VS = 14336, NVME_CC_SHN_NONE = 0, NVME_CC_SHN_NORMAL = 16384, NVME_CC_SHN_ABRUPT = 32768, NVME_CC_SHN_MASK = 49152, NVME_CC_IOSQES = 393216, NVME_CC_IOCQES = 4194304, NVME_CC_CRIME = 16777216, }; enum { NVME_CSTS_RDY = 1, NVME_CSTS_CFS = 2, NVME_CSTS_NSSRO = 16, NVME_CSTS_PP = 32, NVME_CSTS_SHST_NORMAL = 0, NVME_CSTS_SHST_OCCUR = 4, NVME_CSTS_SHST_CMPLT = 8, NVME_CSTS_SHST_MASK = 12, }; enum { SWITCHTEC_GAS_MRPC_OFFSET = 0, SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096, SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144, SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192, SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704, SWITCHTEC_GAS_PART_CFG_OFFSET = 16384, SWITCHTEC_GAS_NTB_OFFSET = 65536, SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568, }; enum { SWITCHTEC_NTB_REG_INFO_OFFSET = 0, SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384, SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600, }; struct pci_fixup { u16 vendor; u16 device; u32 class; unsigned int class_shift; int hook_offset; }; struct ntb_ctrl_regs { u32 partition_status; u32 partition_op; u32 partition_ctrl; u32 bar_setup; u32 bar_error; u16 lut_table_entries; u16 lut_table_offset; u32 lut_error; u16 req_id_table_size; u16 req_id_table_offset; u32 req_id_error; u32 reserved1[7]; struct { u32 ctl; u32 win_size; u64 xlate_addr; } bar_entry[6]; struct { u32 win_size; u32 reserved[3]; } bar_ext_entry[6]; u32 reserved2[192]; u32 req_id_table[512]; u32 reserved3[256]; u64 lut_entry[512]; }; struct nt_partition_info { u32 xlink_enabled; u32 target_part_low; u32 target_part_high; u32 reserved; }; struct ntb_info_regs { u8 partition_count; u8 partition_id; u16 reserved1; u64 ep_map; u16 requester_id; u16 reserved2; u32 reserved3[4]; struct nt_partition_info ntp_info[48]; } __attribute__((packed)); struct pci_config_window; struct pci_ecam_ops { unsigned int bus_shift; struct pci_ops pci_ops; int (*init)(struct pci_config_window *); }; struct pci_config_window { struct resource res; struct resource busr; unsigned int bus_shift; void *priv; const struct pci_ecam_ops *ops; union { void *win; void **winp; }; struct device *parent; }; enum pci_interrupt_pin { PCI_INTERRUPT_UNKNOWN = 0, PCI_INTERRUPT_INTA = 1, PCI_INTERRUPT_INTB = 2, PCI_INTERRUPT_INTC = 3, PCI_INTERRUPT_INTD = 4, }; enum pci_barno { NO_BAR = -1, BAR_0 = 0, BAR_1 = 1, BAR_2 = 2, BAR_3 = 3, BAR_4 = 4, BAR_5 = 5, }; enum pci_epc_irq_type { PCI_EPC_IRQ_UNKNOWN = 0, PCI_EPC_IRQ_LEGACY = 1, PCI_EPC_IRQ_MSI = 2, PCI_EPC_IRQ_MSIX = 3, }; enum pci_epc_interface_type { UNKNOWN_INTERFACE = -1, PRIMARY_INTERFACE = 0, SECONDARY_INTERFACE = 1, }; struct pci_epc_ops; struct pci_epc_mem; struct pci_epc { struct device dev; struct list_head pci_epf; struct mutex list_lock; const struct pci_epc_ops *ops; struct pci_epc_mem **windows; struct pci_epc_mem *mem; unsigned int num_windows; u8 max_functions; u8 *max_vfs; struct config_group *group; struct mutex lock; unsigned long function_num_map; }; struct pci_epf_header; struct pci_epf_bar; struct pci_epc_features; struct pci_epc_ops { int (*write_header)(struct pci_epc *, u8, u8, struct pci_epf_header *); int (*set_bar)(struct pci_epc *, u8, u8, struct pci_epf_bar *); void (*clear_bar)(struct pci_epc *, u8, u8, struct pci_epf_bar *); int (*map_addr)(struct pci_epc *, u8, u8, phys_addr_t, u64, size_t); void (*unmap_addr)(struct pci_epc *, u8, u8, phys_addr_t); int (*set_msi)(struct pci_epc *, u8, u8, u8); int (*get_msi)(struct pci_epc *, u8, u8); int (*set_msix)(struct pci_epc *, u8, u8, u16, enum pci_barno, u32); int (*get_msix)(struct pci_epc *, u8, u8); int (*raise_irq)(struct pci_epc *, u8, u8, enum pci_epc_irq_type, u16); int (*map_msi_irq)(struct pci_epc *, u8, u8, phys_addr_t, u8, u32, u32 *, u32 *); int (*start)(struct pci_epc *); void (*stop)(struct pci_epc *); const struct pci_epc_features * (*get_features)(struct pci_epc *, u8, u8); struct module *owner; }; struct pci_epf_header { u16 vendorid; u16 deviceid; u8 revid; u8 progif_code; u8 subclass_code; u8 baseclass_code; u8 cache_line_size; u16 subsys_vendor_id; u16 subsys_id; enum pci_interrupt_pin interrupt_pin; }; struct pci_epf_bar { dma_addr_t phys_addr; void *addr; size_t size; enum pci_barno barno; int flags; }; struct pci_epc_features { unsigned int linkup_notifier: 1; unsigned int core_init_notifier: 1; unsigned int msi_capable: 1; unsigned int msix_capable: 1; u8 reserved_bar; u8 bar_fixed_64bit; u64 bar_fixed_size[6]; size_t align; }; struct pci_epc_mem_window { phys_addr_t phys_base; size_t size; size_t page_size; }; struct pci_epc_mem { struct pci_epc_mem_window window; unsigned long *bitmap; int pages; struct mutex lock; }; struct pci_epf_driver; struct pci_epf_device_id; struct pci_epc_event_ops; struct pci_epf { struct device dev; const char *name; struct pci_epf_header *header; struct pci_epf_bar bar[6]; u8 msi_interrupts; u16 msix_interrupts; u8 func_no; u8 vfunc_no; struct pci_epc *epc; struct pci_epf *epf_pf; struct pci_epf_driver *driver; const struct pci_epf_device_id *id; struct list_head list; struct mutex lock; struct pci_epc *sec_epc; struct list_head sec_epc_list; struct pci_epf_bar sec_epc_bar[6]; u8 sec_epc_func_no; struct config_group *group; unsigned int is_bound; unsigned int is_vf; unsigned long vfunction_num_map; struct list_head pci_vepf; const struct pci_epc_event_ops *event_ops; }; struct pci_epf_ops; struct pci_epf_driver { int (*probe)(struct pci_epf *, const struct pci_epf_device_id *); void (*remove)(struct pci_epf *); struct device_driver driver; struct pci_epf_ops *ops; struct module *owner; struct list_head epf_group; const struct pci_epf_device_id *id_table; }; struct pci_epf_device_id { char name[20]; kernel_ulong_t driver_data; }; struct pci_epf_ops { int (*bind)(struct pci_epf *); void (*unbind)(struct pci_epf *); struct config_group * (*add_cfs)(struct pci_epf *, struct config_group *); }; struct pci_epc_event_ops { int (*core_init)(struct pci_epf *); int (*link_up)(struct pci_epf *); int (*link_down)(struct pci_epf *); int (*bme)(struct pci_epf *); }; struct dw_edma_plat_ops { int (*irq_vector)(struct device *, unsigned int); u64 (*pci_address)(struct device *, phys_addr_t); }; enum dw_pcie_ltssm { DW_PCIE_LTSSM_DETECT_QUIET = 0, DW_PCIE_LTSSM_DETECT_ACT = 1, DW_PCIE_LTSSM_L0 = 17, DW_PCIE_LTSSM_L2_IDLE = 21, DW_PCIE_LTSSM_UNKNOWN = 4294967295, }; enum dw_edma_map_format { EDMA_MF_EDMA_LEGACY = 0, EDMA_MF_EDMA_UNROLL = 1, EDMA_MF_HDMA_COMPAT = 5, EDMA_MF_HDMA_NATIVE = 7, }; enum dw_pcie_app_clk { DW_PCIE_DBI_CLK = 0, DW_PCIE_MSTR_CLK = 1, DW_PCIE_SLV_CLK = 2, DW_PCIE_NUM_APP_CLKS = 3, }; enum dw_pcie_core_clk { DW_PCIE_PIPE_CLK = 0, DW_PCIE_CORE_CLK = 1, DW_PCIE_AUX_CLK = 2, DW_PCIE_REF_CLK = 3, DW_PCIE_NUM_CORE_CLKS = 4, }; enum dw_pcie_app_rst { DW_PCIE_DBI_RST = 0, DW_PCIE_MSTR_RST = 1, DW_PCIE_SLV_RST = 2, DW_PCIE_NUM_APP_RSTS = 3, }; enum dw_pcie_core_rst { DW_PCIE_NON_STICKY_RST = 0, DW_PCIE_STICKY_RST = 1, DW_PCIE_CORE_RST = 2, DW_PCIE_PIPE_RST = 3, DW_PCIE_PHY_RST = 4, DW_PCIE_HOT_RST = 5, DW_PCIE_PWR_RST = 6, DW_PCIE_NUM_CORE_RSTS = 7, }; enum dw_edma_chip_flags { DW_EDMA_CHIP_LOCAL = 1, }; struct dw_pcie_host_ops; struct dw_pcie_rp { bool has_msi_ctrl: 1; bool cfg0_io_shared: 1; u64 cfg0_base; void *va_cfg0_base; u32 cfg0_size; resource_size_t io_base; phys_addr_t io_bus_addr; u32 io_size; int irq; const struct dw_pcie_host_ops *ops; int msi_irq[8]; struct irq_domain *irq_domain; struct irq_domain *msi_domain; dma_addr_t msi_data; struct irq_chip *msi_irq_chip; u32 num_vectors; u32 irq_mask[8]; struct pci_host_bridge *bridge; raw_spinlock_t lock; unsigned long msi_irq_in_use[4]; }; struct dw_pcie_ep_ops; struct dw_pcie_ep { struct pci_epc *epc; struct list_head func_list; const struct dw_pcie_ep_ops *ops; phys_addr_t phys_base; size_t addr_size; size_t page_size; u8 bar_to_atu[6]; phys_addr_t *outbound_addr; unsigned long *ib_window_map; unsigned long *ob_window_map; void *msi_mem; phys_addr_t msi_mem_phys; struct pci_epf_bar *epf_bar[6]; }; struct dw_edma_region { u64 paddr; union { void *mem; void *io; } vaddr; size_t sz; }; struct dw_edma; struct dw_edma_chip { struct device *dev; int nr_irqs; const struct dw_edma_plat_ops *ops; u32 flags; void *reg_base; u16 ll_wr_cnt; u16 ll_rd_cnt; struct dw_edma_region ll_region_wr[8]; struct dw_edma_region ll_region_rd[8]; struct dw_edma_region dt_region_wr[8]; struct dw_edma_region dt_region_rd[8]; enum dw_edma_map_format mf; struct dw_edma *dw; }; struct reset_control_bulk_data { const char *id; struct reset_control *rstc; }; struct dw_pcie_ops; struct dw_pcie { struct device *dev; void *dbi_base; void *dbi_base2; void *atu_base; size_t atu_size; u32 num_ib_windows; u32 num_ob_windows; u32 region_align; u64 region_limit; struct dw_pcie_rp pp; struct dw_pcie_ep ep; const struct dw_pcie_ops *ops; u32 version; u32 type; unsigned long caps; int num_lanes; int link_gen; u8 n_fts[2]; struct dw_edma_chip edma; struct clk_bulk_data app_clks[3]; struct clk_bulk_data core_clks[4]; struct reset_control_bulk_data app_rsts[3]; struct reset_control_bulk_data core_rsts[7]; struct gpio_desc *pe_rst; bool suspended; }; struct dw_pcie_host_ops { int (*host_init)(struct dw_pcie_rp *); void (*host_deinit)(struct dw_pcie_rp *); int (*msi_host_init)(struct dw_pcie_rp *); void (*pme_turn_off)(struct dw_pcie_rp *); }; struct dw_pcie_ep_ops { void (*ep_init)(struct dw_pcie_ep *); int (*raise_irq)(struct dw_pcie_ep *, u8, enum pci_epc_irq_type, u16); const struct pci_epc_features * (*get_features)(struct dw_pcie_ep *); unsigned int (*func_conf_select)(struct dw_pcie_ep *, u8); }; struct dw_pcie_ops { u64 (*cpu_addr_fixup)(struct dw_pcie *, u64); u32 (*read_dbi)(struct dw_pcie *, void *, u32, size_t); void (*write_dbi)(struct dw_pcie *, void *, u32, size_t, u32); void (*write_dbi2)(struct dw_pcie *, void *, u32, size_t, u32); int (*link_up)(struct dw_pcie *); enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *); int (*start_link)(struct dw_pcie *); void (*stop_link)(struct dw_pcie *); }; struct dw_pcie_ep_func { struct list_head list; u8 func_no; u8 msi_cap; u8 msix_cap; }; struct pci_epf_msix_tbl { u64 msg_addr; u32 msg_data; u32 vector_ctrl; }; enum dw_pcie_device_mode { DW_PCIE_UNKNOWN_TYPE = 0, DW_PCIE_EP_TYPE = 1, DW_PCIE_LEG_EP_TYPE = 2, DW_PCIE_RC_TYPE = 3, }; struct dw_plat_pcie_of_data { enum dw_pcie_device_mode mode; }; struct dw_plat_pcie { struct dw_pcie *pci; enum dw_pcie_device_mode mode; }; struct qcom_pcie_ops; struct qcom_pcie_cfg { const struct qcom_pcie_ops *ops; }; struct qcom_pcie; struct qcom_pcie_ops { int (*get_resources)(struct qcom_pcie *); int (*init)(struct qcom_pcie *); int (*post_init)(struct qcom_pcie *); void (*deinit)(struct qcom_pcie *); void (*ltssm_enable)(struct qcom_pcie *); int (*config_sid)(struct qcom_pcie *); }; struct qcom_pcie_resources_1_0_0 { struct clk_bulk_data clks[4]; struct reset_control *core; struct regulator *vdda; }; struct regulator_bulk_data { const char *supply; int init_load_uA; struct regulator *consumer; int ret; }; struct qcom_pcie_resources_2_1_0 { struct clk_bulk_data clks[5]; struct reset_control_bulk_data resets[6]; int num_resets; struct regulator_bulk_data supplies[3]; }; struct qcom_pcie_resources_2_3_2 { struct clk_bulk_data clks[4]; struct regulator_bulk_data supplies[2]; }; struct qcom_pcie_resources_2_3_3 { struct clk_bulk_data clks[5]; struct reset_control_bulk_data rst[7]; }; struct qcom_pcie_resources_2_4_0 { struct clk_bulk_data clks[4]; int num_clks; struct reset_control_bulk_data resets[12]; int num_resets; }; struct qcom_pcie_resources_2_7_0 { struct clk_bulk_data clks[15]; int num_clks; struct regulator_bulk_data supplies[2]; struct reset_control *rst; }; struct qcom_pcie_resources_2_9_0 { struct clk_bulk_data clks[5]; struct reset_control *rst; }; union qcom_pcie_resources { struct qcom_pcie_resources_1_0_0 v1_0_0; struct qcom_pcie_resources_2_1_0 v2_1_0; struct qcom_pcie_resources_2_3_2 v2_3_2; struct qcom_pcie_resources_2_3_3 v2_3_3; struct qcom_pcie_resources_2_4_0 v2_4_0; struct qcom_pcie_resources_2_7_0 v2_7_0; struct qcom_pcie_resources_2_9_0 v2_9_0; }; struct icc_path; struct qcom_pcie { struct dw_pcie *pci; void *parf; void *elbi; void *mhi; union qcom_pcie_resources res; struct phy *phy; struct gpio_desc *reset; struct icc_path *icc_mem; const struct qcom_pcie_cfg *cfg; struct dentry *debugfs; bool suspended; }; typedef void (*regmap_lock)(void *); typedef void (*regmap_unlock)(void *); enum regcache_type { REGCACHE_NONE = 0, REGCACHE_RBTREE = 1, REGCACHE_FLAT = 2, REGCACHE_MAPLE = 3, }; enum regmap_endian { REGMAP_ENDIAN_DEFAULT = 0, REGMAP_ENDIAN_BIG = 1, REGMAP_ENDIAN_LITTLE = 2, REGMAP_ENDIAN_NATIVE = 3, }; struct regmap_access_table; struct reg_default; struct regmap_range_cfg; struct regmap_config { const char *name; int reg_bits; int reg_stride; int reg_shift; unsigned int reg_base; int pad_bits; int val_bits; bool (*writeable_reg)(struct device *, unsigned int); bool (*readable_reg)(struct device *, unsigned int); bool (*volatile_reg)(struct device *, unsigned int); bool (*precious_reg)(struct device *, unsigned int); bool (*writeable_noinc_reg)(struct device *, unsigned int); bool (*readable_noinc_reg)(struct device *, unsigned int); bool disable_locking; regmap_lock lock; regmap_unlock unlock; void *lock_arg; int (*reg_read)(void *, unsigned int, unsigned int *); int (*reg_write)(void *, unsigned int, unsigned int); int (*reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); int (*read)(void *, const void *, size_t, void *, size_t); int (*write)(void *, const void *, size_t); size_t max_raw_read; size_t max_raw_write; bool fast_io; bool io_port; unsigned int max_register; const struct regmap_access_table *wr_table; const struct regmap_access_table *rd_table; const struct regmap_access_table *volatile_table; const struct regmap_access_table *precious_table; const struct regmap_access_table *wr_noinc_table; const struct regmap_access_table *rd_noinc_table; const struct reg_default *reg_defaults; unsigned int num_reg_defaults; enum regcache_type cache_type; const void *reg_defaults_raw; unsigned int num_reg_defaults_raw; unsigned long read_flag_mask; unsigned long write_flag_mask; bool zero_flag_mask; bool use_single_read; bool use_single_write; bool use_relaxed_mmio; bool can_multi_write; enum regmap_endian reg_format_endian; enum regmap_endian val_format_endian; const struct regmap_range_cfg *ranges; unsigned int num_ranges; bool use_hwlock; bool use_raw_spinlock; unsigned int hwlock_id; unsigned int hwlock_mode; bool can_sleep; u64 android_kabi_reserved1; }; struct regmap_range; struct regmap_access_table { const struct regmap_range *yes_ranges; unsigned int n_yes_ranges; const struct regmap_range *no_ranges; unsigned int n_no_ranges; }; struct regmap_range { unsigned int range_min; unsigned int range_max; }; struct reg_default { unsigned int reg; unsigned int def; }; struct regmap_range_cfg { const char *name; unsigned int range_min; unsigned int range_max; unsigned int selector_reg; unsigned int selector_mask; int selector_shift; unsigned int window_start; unsigned int window_len; u64 android_kabi_reserved1; }; enum pcie_kirin_phy_type { PCIE_KIRIN_INTERNAL_PHY = 0, PCIE_KIRIN_EXTERNAL_PHY = 1, }; struct kirin_pcie_data { enum pcie_kirin_phy_type phy_type; }; struct regmap; struct kirin_pcie { enum pcie_kirin_phy_type type; struct dw_pcie *pci; struct regmap *apb; struct phy *phy; void *phy_priv; int gpio_id_dwc_perst; int num_slots; int gpio_id_reset[3]; const char *reset_names[3]; int n_gpio_clkreq; int gpio_id_clkreq[3]; const char *clkreq_names[3]; }; struct hi3660_pcie_phy { struct device *dev; void *base; struct regmap *crgctrl; struct regmap *sysctrl; struct clk *apb_sys_clk; struct clk *apb_phy_clk; struct clk *phy_ref_clk; struct clk *aclk; struct clk *aux_clk; }; enum hdmi_infoframe_type { HDMI_INFOFRAME_TYPE_VENDOR = 129, HDMI_INFOFRAME_TYPE_AVI = 130, HDMI_INFOFRAME_TYPE_SPD = 131, HDMI_INFOFRAME_TYPE_AUDIO = 132, HDMI_INFOFRAME_TYPE_DRM = 135, }; enum hdmi_colorspace { HDMI_COLORSPACE_RGB = 0, HDMI_COLORSPACE_YUV422 = 1, HDMI_COLORSPACE_YUV444 = 2, HDMI_COLORSPACE_YUV420 = 3, HDMI_COLORSPACE_RESERVED4 = 4, HDMI_COLORSPACE_RESERVED5 = 5, HDMI_COLORSPACE_RESERVED6 = 6, HDMI_COLORSPACE_IDO_DEFINED = 7, }; enum hdmi_scan_mode { HDMI_SCAN_MODE_NONE = 0, HDMI_SCAN_MODE_OVERSCAN = 1, HDMI_SCAN_MODE_UNDERSCAN = 2, HDMI_SCAN_MODE_RESERVED = 3, }; enum hdmi_colorimetry { HDMI_COLORIMETRY_NONE = 0, HDMI_COLORIMETRY_ITU_601 = 1, HDMI_COLORIMETRY_ITU_709 = 2, HDMI_COLORIMETRY_EXTENDED = 3, }; enum hdmi_picture_aspect { HDMI_PICTURE_ASPECT_NONE = 0, HDMI_PICTURE_ASPECT_4_3 = 1, HDMI_PICTURE_ASPECT_16_9 = 2, HDMI_PICTURE_ASPECT_64_27 = 3, HDMI_PICTURE_ASPECT_256_135 = 4, HDMI_PICTURE_ASPECT_RESERVED = 5, }; enum hdmi_active_aspect { HDMI_ACTIVE_ASPECT_16_9_TOP = 2, HDMI_ACTIVE_ASPECT_14_9_TOP = 3, HDMI_ACTIVE_ASPECT_16_9_CENTER = 4, HDMI_ACTIVE_ASPECT_PICTURE = 8, HDMI_ACTIVE_ASPECT_4_3 = 9, HDMI_ACTIVE_ASPECT_16_9 = 10, HDMI_ACTIVE_ASPECT_14_9 = 11, HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13, HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14, HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15, }; enum hdmi_extended_colorimetry { HDMI_EXTENDED_COLORIMETRY_XV_YCC_601 = 0, HDMI_EXTENDED_COLORIMETRY_XV_YCC_709 = 1, HDMI_EXTENDED_COLORIMETRY_S_YCC_601 = 2, HDMI_EXTENDED_COLORIMETRY_OPYCC_601 = 3, HDMI_EXTENDED_COLORIMETRY_OPRGB = 4, HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM = 5, HDMI_EXTENDED_COLORIMETRY_BT2020 = 6, HDMI_EXTENDED_COLORIMETRY_RESERVED = 7, }; enum hdmi_quantization_range { HDMI_QUANTIZATION_RANGE_DEFAULT = 0, HDMI_QUANTIZATION_RANGE_LIMITED = 1, HDMI_QUANTIZATION_RANGE_FULL = 2, HDMI_QUANTIZATION_RANGE_RESERVED = 3, }; enum hdmi_nups { HDMI_NUPS_UNKNOWN = 0, HDMI_NUPS_HORIZONTAL = 1, HDMI_NUPS_VERTICAL = 2, HDMI_NUPS_BOTH = 3, }; enum hdmi_ycc_quantization_range { HDMI_YCC_QUANTIZATION_RANGE_LIMITED = 0, HDMI_YCC_QUANTIZATION_RANGE_FULL = 1, }; enum hdmi_content_type { HDMI_CONTENT_TYPE_GRAPHICS = 0, HDMI_CONTENT_TYPE_PHOTO = 1, HDMI_CONTENT_TYPE_CINEMA = 2, HDMI_CONTENT_TYPE_GAME = 3, }; enum hdmi_spd_sdi { HDMI_SPD_SDI_UNKNOWN = 0, HDMI_SPD_SDI_DSTB = 1, HDMI_SPD_SDI_DVDP = 2, HDMI_SPD_SDI_DVHS = 3, HDMI_SPD_SDI_HDDVR = 4, HDMI_SPD_SDI_DVC = 5, HDMI_SPD_SDI_DSC = 6, HDMI_SPD_SDI_VCD = 7, HDMI_SPD_SDI_GAME = 8, HDMI_SPD_SDI_PC = 9, HDMI_SPD_SDI_BD = 10, HDMI_SPD_SDI_SACD = 11, HDMI_SPD_SDI_HDDVD = 12, HDMI_SPD_SDI_PMP = 13, }; enum hdmi_audio_coding_type { HDMI_AUDIO_CODING_TYPE_STREAM = 0, HDMI_AUDIO_CODING_TYPE_PCM = 1, HDMI_AUDIO_CODING_TYPE_AC3 = 2, HDMI_AUDIO_CODING_TYPE_MPEG1 = 3, HDMI_AUDIO_CODING_TYPE_MP3 = 4, HDMI_AUDIO_CODING_TYPE_MPEG2 = 5, HDMI_AUDIO_CODING_TYPE_AAC_LC = 6, HDMI_AUDIO_CODING_TYPE_DTS = 7, HDMI_AUDIO_CODING_TYPE_ATRAC = 8, HDMI_AUDIO_CODING_TYPE_DSD = 9, HDMI_AUDIO_CODING_TYPE_EAC3 = 10, HDMI_AUDIO_CODING_TYPE_DTS_HD = 11, HDMI_AUDIO_CODING_TYPE_MLP = 12, HDMI_AUDIO_CODING_TYPE_DST = 13, HDMI_AUDIO_CODING_TYPE_WMA_PRO = 14, HDMI_AUDIO_CODING_TYPE_CXT = 15, }; enum hdmi_audio_sample_size { HDMI_AUDIO_SAMPLE_SIZE_STREAM = 0, HDMI_AUDIO_SAMPLE_SIZE_16 = 1, HDMI_AUDIO_SAMPLE_SIZE_20 = 2, HDMI_AUDIO_SAMPLE_SIZE_24 = 3, }; enum hdmi_audio_sample_frequency { HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM = 0, HDMI_AUDIO_SAMPLE_FREQUENCY_32000 = 1, HDMI_AUDIO_SAMPLE_FREQUENCY_44100 = 2, HDMI_AUDIO_SAMPLE_FREQUENCY_48000 = 3, HDMI_AUDIO_SAMPLE_FREQUENCY_88200 = 4, HDMI_AUDIO_SAMPLE_FREQUENCY_96000 = 5, HDMI_AUDIO_SAMPLE_FREQUENCY_176400 = 6, HDMI_AUDIO_SAMPLE_FREQUENCY_192000 = 7, }; enum hdmi_audio_coding_type_ext { HDMI_AUDIO_CODING_TYPE_EXT_CT = 0, HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC = 1, HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2 = 2, HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND = 3, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC = 4, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2 = 5, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC = 6, HDMI_AUDIO_CODING_TYPE_EXT_DRA = 7, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND = 8, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10, }; enum hdmi_3d_structure { HDMI_3D_STRUCTURE_INVALID = -1, HDMI_3D_STRUCTURE_FRAME_PACKING = 0, HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE = 1, HDMI_3D_STRUCTURE_LINE_ALTERNATIVE = 2, HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL = 3, HDMI_3D_STRUCTURE_L_DEPTH = 4, HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH = 5, HDMI_3D_STRUCTURE_TOP_AND_BOTTOM = 6, HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8, }; enum hdmi_eotf { HDMI_EOTF_TRADITIONAL_GAMMA_SDR = 0, HDMI_EOTF_TRADITIONAL_GAMMA_HDR = 1, HDMI_EOTF_SMPTE_ST2084 = 2, HDMI_EOTF_BT_2100_HLG = 3, }; enum hdmi_metadata_type { HDMI_STATIC_METADATA_TYPE1 = 0, }; struct hdmi_any_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; }; struct hdmi_avi_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; bool itc; unsigned char pixel_repeat; enum hdmi_colorspace colorspace; enum hdmi_scan_mode scan_mode; enum hdmi_colorimetry colorimetry; enum hdmi_picture_aspect picture_aspect; enum hdmi_active_aspect active_aspect; enum hdmi_extended_colorimetry extended_colorimetry; enum hdmi_quantization_range quantization_range; enum hdmi_nups nups; unsigned char video_code; enum hdmi_ycc_quantization_range ycc_quantization_range; enum hdmi_content_type content_type; unsigned short top_bar; unsigned short bottom_bar; unsigned short left_bar; unsigned short right_bar; }; struct hdmi_spd_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; char vendor[8]; char product[16]; enum hdmi_spd_sdi sdi; }; struct hdmi_audio_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; unsigned char channels; enum hdmi_audio_coding_type coding_type; enum hdmi_audio_sample_size sample_size; enum hdmi_audio_sample_frequency sample_frequency; enum hdmi_audio_coding_type_ext coding_type_ext; unsigned char channel_allocation; unsigned char level_shift_value; bool downmix_inhibit; }; struct hdmi_vendor_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; unsigned int oui; u8 vic; enum hdmi_3d_structure s3d_struct; unsigned int s3d_ext_data; }; struct hdmi_drm_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; enum hdmi_eotf eotf; enum hdmi_metadata_type metadata_type; struct { u16 x; u16 y; } display_primaries[3]; struct { u16 x; u16 y; } white_point; u16 max_display_mastering_luminance; u16 min_display_mastering_luminance; u16 max_cll; u16 max_fall; }; union hdmi_vendor_any_infoframe { struct { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; unsigned int oui; } any; struct hdmi_vendor_infoframe hdmi; }; struct dp_sdp_header { u8 HB0; u8 HB1; u8 HB2; u8 HB3; }; struct dp_sdp { struct dp_sdp_header sdp_header; u8 db[32]; }; union hdmi_infoframe { struct hdmi_any_infoframe any; struct hdmi_avi_infoframe avi; struct hdmi_spd_infoframe spd; union hdmi_vendor_any_infoframe vendor; struct hdmi_audio_infoframe audio; struct hdmi_drm_infoframe drm; }; enum backlight_type { BACKLIGHT_RAW = 1, BACKLIGHT_PLATFORM = 2, BACKLIGHT_FIRMWARE = 3, BACKLIGHT_TYPE_MAX = 4, }; enum backlight_scale { BACKLIGHT_SCALE_UNKNOWN = 0, BACKLIGHT_SCALE_LINEAR = 1, BACKLIGHT_SCALE_NON_LINEAR = 2, }; enum backlight_update_reason { BACKLIGHT_UPDATE_HOTKEY = 0, BACKLIGHT_UPDATE_SYSFS = 1, }; enum backlight_notification { BACKLIGHT_REGISTERED = 0, BACKLIGHT_UNREGISTERED = 1, }; struct backlight_properties { int brightness; int max_brightness; int power; int fb_blank; enum backlight_type type; unsigned int state; enum backlight_scale scale; }; struct backlight_ops; struct backlight_device { struct backlight_properties props; struct mutex update_lock; struct mutex ops_lock; const struct backlight_ops *ops; struct notifier_block fb_notif; struct list_head entry; struct device dev; bool fb_bl_on[32]; int use_count; }; struct fb_info; struct backlight_ops { unsigned int options; int (*update_status)(struct backlight_device *); int (*get_brightness)(struct backlight_device *); int (*check_fb)(struct backlight_device *, struct fb_info *); }; struct fb_bitfield { __u32 offset; __u32 length; __u32 msb_right; }; struct fb_var_screeninfo { __u32 xres; __u32 yres; __u32 xres_virtual; __u32 yres_virtual; __u32 xoffset; __u32 yoffset; __u32 bits_per_pixel; __u32 grayscale; struct fb_bitfield red; struct fb_bitfield green; struct fb_bitfield blue; struct fb_bitfield transp; __u32 nonstd; __u32 activate; __u32 height; __u32 width; __u32 accel_flags; __u32 pixclock; __u32 left_margin; __u32 right_margin; __u32 upper_margin; __u32 lower_margin; __u32 hsync_len; __u32 vsync_len; __u32 sync; __u32 vmode; __u32 rotate; __u32 colorspace; __u32 reserved[4]; }; struct fb_fix_screeninfo { char id[16]; unsigned long smem_start; __u32 smem_len; __u32 type; __u32 type_aux; __u32 visual; __u16 xpanstep; __u16 ypanstep; __u16 ywrapstep; __u32 line_length; unsigned long mmio_start; __u32 mmio_len; __u32 accel; __u16 capabilities; __u16 reserved[2]; }; struct fb_chroma { __u32 redx; __u32 greenx; __u32 bluex; __u32 whitex; __u32 redy; __u32 greeny; __u32 bluey; __u32 whitey; }; struct fb_videomode; struct fb_monspecs { struct fb_chroma chroma; struct fb_videomode *modedb; __u8 manufacturer[4]; __u8 monitor[14]; __u8 serial_no[14]; __u8 ascii[14]; __u32 modedb_len; __u32 model; __u32 serial; __u32 year; __u32 week; __u32 hfmin; __u32 hfmax; __u32 dclkmin; __u32 dclkmax; __u16 input; __u16 dpms; __u16 signal; __u16 vfmin; __u16 vfmax; __u16 gamma; __u16 gtf: 1; __u16 misc; __u8 version; __u8 revision; __u8 max_x; __u8 max_y; }; struct fb_pixmap { u8 *addr; u32 size; u32 offset; u32 buf_align; u32 scan_align; u32 access_align; u32 flags; u32 blit_x; u32 blit_y; void (*writeio)(struct fb_info *, void *, void *, unsigned int); void (*readio)(struct fb_info *, void *, void *, unsigned int); }; struct fb_cmap { __u32 start; __u32 len; __u16 *red; __u16 *green; __u16 *blue; __u16 *transp; }; struct fb_ops; struct fb_info { refcount_t count; int node; int flags; int fbcon_rotate_hint; struct mutex lock; struct mutex mm_lock; struct fb_var_screeninfo var; struct fb_fix_screeninfo fix; struct fb_monspecs monspecs; struct fb_pixmap pixmap; struct fb_pixmap sprite; struct fb_cmap cmap; struct list_head modelist; struct fb_videomode *mode; const struct fb_ops *fbops; struct device *device; struct device *dev; int class_flag; union { char *screen_base; char *screen_buffer; }; unsigned long screen_size; void *pseudo_palette; u32 state; void *fbcon_par; void *par; bool skip_vt_switch; }; struct fb_videomode { const char *name; u32 refresh; u32 xres; u32 yres; u32 pixclock; u32 left_margin; u32 right_margin; u32 upper_margin; u32 lower_margin; u32 hsync_len; u32 vsync_len; u32 sync; u32 vmode; u32 flag; }; struct fb_fillrect; struct fb_copyarea; struct fb_image; struct fb_cursor; struct fb_blit_caps; struct fb_ops { struct module *owner; int (*fb_open)(struct fb_info *, int); int (*fb_release)(struct fb_info *, int); ssize_t (*fb_read)(struct fb_info *, char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); ssize_t (*fb_write)(struct fb_info *, const char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *); int (*fb_set_par)(struct fb_info *); int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *); int (*fb_setcmap)(struct fb_cmap *, struct fb_info *); int (*fb_blank)(int, struct fb_info *); int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *); void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *); void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *); void (*fb_imageblit)(struct fb_info *, const struct fb_image *); int (*fb_cursor)(struct fb_info *, struct fb_cursor *); int (*fb_sync)(struct fb_info *); int (*fb_ioctl)(struct fb_info *, unsigned int, unsigned long); int (*fb_compat_ioctl)(struct fb_info *, unsigned int, unsigned long); int (*fb_mmap)(struct fb_info *, struct vm_area_struct *); void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *); void (*fb_destroy)(struct fb_info *); int (*fb_debug_enter)(struct fb_info *); int (*fb_debug_leave)(struct fb_info *); }; struct fb_fillrect { __u32 dx; __u32 dy; __u32 width; __u32 height; __u32 color; __u32 rop; }; struct fb_copyarea { __u32 dx; __u32 dy; __u32 width; __u32 height; __u32 sx; __u32 sy; }; struct fb_image { __u32 dx; __u32 dy; __u32 width; __u32 height; __u32 fg_color; __u32 bg_color; __u8 depth; const char *data; struct fb_cmap cmap; }; struct fbcurpos { __u16 x; __u16 y; }; struct fb_cursor { __u16 set; __u16 enable; __u16 rop; const char *mask; struct fbcurpos hot; struct fb_image image; }; struct fb_blit_caps { u32 x; u32 y; u32 len; u32 flags; }; enum display_flags { DISPLAY_FLAGS_HSYNC_LOW = 1, DISPLAY_FLAGS_HSYNC_HIGH = 2, DISPLAY_FLAGS_VSYNC_LOW = 4, DISPLAY_FLAGS_VSYNC_HIGH = 8, DISPLAY_FLAGS_DE_LOW = 16, DISPLAY_FLAGS_DE_HIGH = 32, DISPLAY_FLAGS_PIXDATA_POSEDGE = 64, DISPLAY_FLAGS_PIXDATA_NEGEDGE = 128, DISPLAY_FLAGS_INTERLACED = 256, DISPLAY_FLAGS_DOUBLESCAN = 512, DISPLAY_FLAGS_DOUBLECLK = 1024, DISPLAY_FLAGS_SYNC_POSEDGE = 2048, DISPLAY_FLAGS_SYNC_NEGEDGE = 4096, }; struct display_timing; struct display_timings { unsigned int num_timings; unsigned int native_mode; struct display_timing **timings; }; struct timing_entry { u32 min; u32 typ; u32 max; }; struct display_timing { struct timing_entry pixelclock; struct timing_entry hactive; struct timing_entry hfront_porch; struct timing_entry hback_porch; struct timing_entry hsync_len; struct timing_entry vactive; struct timing_entry vfront_porch; struct timing_entry vback_porch; struct timing_entry vsync_len; enum display_flags flags; }; struct videomode { unsigned long pixelclock; u32 hactive; u32 hfront_porch; u32 hback_porch; u32 hsync_len; u32 vactive; u32 vfront_porch; u32 vback_porch; u32 vsync_len; enum display_flags flags; }; struct pm_domain_data { struct list_head list_node; struct device *dev; }; struct devm_clk_state { struct clk *clk; void (*exit)(struct clk *); }; struct clk_bulk_devres { struct clk_bulk_data *clks; int num_clks; }; struct clk_hw; struct clk_lookup { struct list_head node; const char *dev_id; const char *con_id; struct clk *clk; struct clk_hw *clk_hw; }; struct clk_core; struct clk_init_data; struct clk_hw { struct clk_core *core; struct clk *clk; const struct clk_init_data *init; }; struct clk_ops; struct clk_parent_data; struct clk_init_data { const char *name; const struct clk_ops *ops; const char * const *parent_names; const struct clk_parent_data *parent_data; const struct clk_hw **parent_hws; u8 num_parents; unsigned long flags; }; struct clk_rate_request; struct clk_duty; struct clk_ops { int (*prepare)(struct clk_hw *); void (*unprepare)(struct clk_hw *); int (*is_prepared)(struct clk_hw *); void (*unprepare_unused)(struct clk_hw *); int (*enable)(struct clk_hw *); void (*disable)(struct clk_hw *); int (*is_enabled)(struct clk_hw *); void (*disable_unused)(struct clk_hw *); int (*save_context)(struct clk_hw *); void (*restore_context)(struct clk_hw *); unsigned long (*recalc_rate)(struct clk_hw *, unsigned long); long (*round_rate)(struct clk_hw *, unsigned long, unsigned long *); int (*determine_rate)(struct clk_hw *, struct clk_rate_request *); int (*set_parent)(struct clk_hw *, u8); u8 (*get_parent)(struct clk_hw *); int (*set_rate)(struct clk_hw *, unsigned long, unsigned long); int (*set_rate_and_parent)(struct clk_hw *, unsigned long, unsigned long, u8); unsigned long (*recalc_accuracy)(struct clk_hw *, unsigned long); int (*get_phase)(struct clk_hw *); int (*set_phase)(struct clk_hw *, int); int (*get_duty_cycle)(struct clk_hw *, struct clk_duty *); int (*set_duty_cycle)(struct clk_hw *, struct clk_duty *); int (*init)(struct clk_hw *); void (*terminate)(struct clk_hw *); void (*debug_init)(struct clk_hw *, struct dentry *); int (*pre_rate_change)(struct clk_hw *, unsigned long, unsigned long); int (*post_rate_change)(struct clk_hw *, unsigned long, unsigned long); }; struct clk_rate_request { struct clk_core *core; unsigned long rate; unsigned long min_rate; unsigned long max_rate; unsigned long best_parent_rate; struct clk_hw *best_parent_hw; }; struct clk_duty { unsigned int num; unsigned int den; }; struct clk_parent_data { const struct clk_hw *hw; const char *fw_name; const char *name; int index; }; struct clk_lookup_alloc { struct clk_lookup cl; char dev_id[20]; char con_id[16]; }; typedef void (*btf_trace_clk_enable)(void *, struct clk_core *); struct clk_parent_map; struct clk_core { const char *name; const struct clk_ops *ops; struct clk_hw *hw; struct module *owner; struct device *dev; struct hlist_node rpm_node; struct device_node *of_node; struct clk_core *parent; struct clk_parent_map *parents; u8 num_parents; u8 new_parent_index; unsigned long rate; unsigned long req_rate; unsigned long new_rate; struct clk_core *new_parent; struct clk_core *new_child; unsigned long flags; bool orphan; bool rpm_enabled; bool need_sync; bool boot_enabled; unsigned int enable_count; unsigned int prepare_count; unsigned int protect_count; unsigned long min_rate; unsigned long max_rate; unsigned long accuracy; int phase; struct clk_duty duty; struct hlist_head children; struct hlist_node child_node; struct hlist_head clks; unsigned int notifier_count; struct dentry *dentry; struct hlist_node debug_node; struct kref ref; }; struct clk { struct clk_core *core; struct device *dev; const char *dev_id; const char *con_id; unsigned long min_rate; unsigned long max_rate; unsigned int exclusive_count; struct hlist_node clks_node; }; struct clk_parent_map { const struct clk_hw *hw; struct clk_core *core; const char *fw_name; const char *name; int index; }; typedef void (*btf_trace_clk_enable_complete)(void *, struct clk_core *); typedef void (*btf_trace_clk_disable)(void *, struct clk_core *); typedef void (*btf_trace_clk_disable_complete)(void *, struct clk_core *); typedef void (*btf_trace_clk_prepare)(void *, struct clk_core *); typedef void (*btf_trace_clk_prepare_complete)(void *, struct clk_core *); typedef void (*btf_trace_clk_unprepare)(void *, struct clk_core *); typedef void (*btf_trace_clk_unprepare_complete)(void *, struct clk_core *); typedef void (*btf_trace_clk_set_rate)(void *, struct clk_core *, unsigned long); typedef void (*btf_trace_clk_set_rate_complete)(void *, struct clk_core *, unsigned long); typedef void (*btf_trace_clk_set_min_rate)(void *, struct clk_core *, unsigned long); typedef void (*btf_trace_clk_set_max_rate)(void *, struct clk_core *, unsigned long); typedef void (*btf_trace_clk_set_rate_range)(void *, struct clk_core *, unsigned long, unsigned long); typedef void (*btf_trace_clk_set_parent)(void *, struct clk_core *, struct clk_core *); typedef void (*btf_trace_clk_set_parent_complete)(void *, struct clk_core *, struct clk_core *); typedef void (*btf_trace_clk_set_phase)(void *, struct clk_core *, int); typedef void (*btf_trace_clk_set_phase_complete)(void *, struct clk_core *, int); typedef void (*btf_trace_clk_set_duty_cycle)(void *, struct clk_core *, struct clk_duty *); typedef void (*btf_trace_clk_set_duty_cycle_complete)(void *, struct clk_core *, struct clk_duty *); typedef void (*btf_trace_clk_rate_request_start)(void *, struct clk_rate_request *); typedef void (*btf_trace_clk_rate_request_done)(void *, struct clk_rate_request *); struct clk_notifier { struct clk *clk; struct srcu_notifier_head notifier_head; struct list_head node; }; struct of_clk_provider { struct list_head link; struct device_node *node; struct clk * (*get)(struct of_phandle_args *, void *); struct clk_hw * (*get_hw)(struct of_phandle_args *, void *); void *data; }; struct clock_provider { void (*clk_init_cb)(struct device_node *); struct device_node *np; struct list_head node; }; struct trace_event_raw_clk { struct trace_entry ent; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_clk_rate { struct trace_entry ent; u32 __data_loc_name; unsigned long rate; char __data[0]; }; struct trace_event_raw_clk_rate_range { struct trace_entry ent; u32 __data_loc_name; unsigned long min; unsigned long max; char __data[0]; }; struct trace_event_raw_clk_parent { struct trace_entry ent; u32 __data_loc_name; u32 __data_loc_pname; char __data[0]; }; struct trace_event_raw_clk_phase { struct trace_entry ent; u32 __data_loc_name; int phase; char __data[0]; }; struct trace_event_raw_clk_duty_cycle { struct trace_entry ent; u32 __data_loc_name; unsigned int num; unsigned int den; char __data[0]; }; struct trace_event_raw_clk_rate_request { struct trace_entry ent; u32 __data_loc_name; u32 __data_loc_pname; unsigned long min; unsigned long max; unsigned long prate; char __data[0]; }; struct trace_event_data_offsets_clk { u32 name; }; struct trace_event_data_offsets_clk_rate { u32 name; }; struct trace_event_data_offsets_clk_rate_range { u32 name; }; struct trace_event_data_offsets_clk_phase { u32 name; }; struct trace_event_data_offsets_clk_duty_cycle { u32 name; }; struct clk_notifier_data { struct clk *clk; unsigned long old_rate; unsigned long new_rate; }; struct trace_event_data_offsets_clk_parent { u32 name; u32 pname; }; struct trace_event_data_offsets_clk_rate_request { u32 name; u32 pname; }; struct clk_notifier_devres { struct clk *clk; struct notifier_block *nb; }; struct clk_onecell_data { struct clk **clks; unsigned int clk_num; }; struct clk_hw_onecell_data { unsigned int num; struct clk_hw *hws[0]; }; struct clk_div_table; struct clk_divider { struct clk_hw hw; void *reg; u8 shift; u8 width; u8 flags; const struct clk_div_table *table; spinlock_t *lock; }; struct clk_div_table { unsigned int val; unsigned int div; }; struct clk_fixed_factor { struct clk_hw hw; unsigned int mult; unsigned int div; }; struct clk_fixed_rate { struct clk_hw hw; unsigned long fixed_rate; unsigned long fixed_accuracy; unsigned long flags; }; struct clk_gate { struct clk_hw hw; void *reg; u8 bit_idx; u8 flags; spinlock_t *lock; }; struct clk_multiplier { struct clk_hw hw; void *reg; u8 shift; u8 width; u8 flags; spinlock_t *lock; }; struct clk_mux { struct clk_hw hw; void *reg; const u32 *table; u32 mask; u8 shift; u8 flags; spinlock_t *lock; }; struct clk_composite { struct clk_hw hw; struct clk_ops ops; struct clk_hw *mux_hw; struct clk_hw *rate_hw; struct clk_hw *gate_hw; const struct clk_ops *mux_ops; const struct clk_ops *rate_ops; const struct clk_ops *gate_ops; }; struct clk_fractional_divider { struct clk_hw hw; void *reg; u8 mshift; u8 mwidth; u8 nshift; u8 nwidth; u8 flags; void (*approximation)(struct clk_hw *, unsigned long, unsigned long *, unsigned long *, unsigned long *); spinlock_t *lock; }; struct u32_fract { __u32 numerator; __u32 denominator; }; struct clk_gpio { struct clk_hw hw; struct gpio_desc *gpiod; }; struct scpi_dvfs_info; struct scpi_ops; struct scpi_clk { u32 id; struct clk_hw hw; struct scpi_dvfs_info *info; struct scpi_ops *scpi_ops; }; struct scpi_opp; struct scpi_dvfs_info { unsigned int count; unsigned int latency; struct scpi_opp *opps; }; struct scpi_opp { u32 freq; u32 m_volt; }; struct scpi_sensor_info; struct scpi_ops { u32 (*get_version)(); int (*clk_get_range)(u16, unsigned long *, unsigned long *); unsigned long (*clk_get_val)(u16); int (*clk_set_val)(u16, unsigned long); int (*dvfs_get_idx)(u8); int (*dvfs_set_idx)(u8, u8); struct scpi_dvfs_info * (*dvfs_get_info)(u8); int (*device_domain_id)(struct device *); int (*get_transition_latency)(struct device *); int (*add_opps_to_device)(struct device *); int (*sensor_get_capability)(u16 *); int (*sensor_get_info)(u16, struct scpi_sensor_info *); int (*sensor_get_value)(u16, u64 *); int (*device_get_power_state)(u16); int (*device_set_power_state)(u16, u8); }; struct scpi_sensor_info { u16 sensor_id; u8 class; u8 trigger_type; char name[20]; }; struct scpi_clk_data { struct scpi_clk **clk; unsigned int clk_num; }; struct hisi_phase_clock { unsigned int id; const char *name; const char *parent_names; unsigned long flags; unsigned long offset; u8 shift; u8 width; u32 *phase_degrees; u32 *phase_regvals; u8 phase_num; }; struct hisi_clock_data { struct clk_onecell_data clk_data; void *base; }; struct hisi_fixed_rate_clock { unsigned int id; char *name; const char *parent_name; unsigned long flags; unsigned long fixed_rate; }; struct hisi_fixed_factor_clock { unsigned int id; char *name; const char *parent_name; unsigned long mult; unsigned long div; unsigned long flags; }; struct hisi_mux_clock { unsigned int id; const char *name; const char * const *parent_names; u8 num_parents; unsigned long flags; unsigned long offset; u8 shift; u8 width; u8 mux_flags; const u32 *table; const char *alias; }; struct hisi_divider_clock { unsigned int id; const char *name; const char *parent_name; unsigned long flags; unsigned long offset; u8 shift; u8 width; u8 div_flags; struct clk_div_table *table; const char *alias; }; struct hisi_gate_clock { unsigned int id; const char *name; const char *parent_name; unsigned long flags; unsigned long offset; u8 bit_idx; u8 gate_flags; const char *alias; }; struct hi6220_divider_clock { unsigned int id; const char *name; const char *parent_name; unsigned long flags; unsigned long offset; u8 shift; u8 width; u32 mask_bit; const char *alias; }; struct clkgate_separated { struct clk_hw hw; void *enable; u8 bit_idx; u8 flags; spinlock_t *lock; }; struct hi6220_clk_divider { struct clk_hw hw; void *reg; u8 shift; u8 width; u32 mask; const struct clk_div_table *table; spinlock_t *lock; }; struct clk_hisi_phase { struct clk_hw hw; void *reg; u32 *phase_degrees; u32 *phase_regvals; u8 phase_num; u32 mask; u8 shift; u8 flags; spinlock_t *lock; }; struct hisi_crg_funcs { struct hisi_clock_data * (*register_clks)(struct platform_device *); void (*unregister_clks)(struct platform_device *); }; struct hisi_reset_controller; struct hisi_crg_dev { struct hisi_clock_data *clk_data; struct hisi_reset_controller *rstc; const struct hisi_crg_funcs *funcs; }; struct hi3519_crg_data { struct hisi_clock_data *clk_data; struct hisi_reset_controller *rstc; }; struct hi3559av100_pll_clock { u32 id; const char *name; const char *parent_name; const u32 ctrl_reg1; const u8 frac_shift; const u8 frac_width; const u8 postdiv1_shift; const u8 postdiv1_width; const u8 postdiv2_shift; const u8 postdiv2_width; const u32 ctrl_reg2; const u8 fbdiv_shift; const u8 fbdiv_width; const u8 refdiv_shift; const u8 refdiv_width; }; struct hi3559av100_clk_pll { struct clk_hw hw; u32 id; void *ctrl_reg1; u8 frac_shift; u8 frac_width; u8 postdiv1_shift; u8 postdiv1_width; u8 postdiv2_shift; u8 postdiv2_width; void *ctrl_reg2; u8 fbdiv_shift; u8 fbdiv_width; u8 refdiv_shift; u8 refdiv_width; }; struct reset_controller_dev; struct reset_control_ops { int (*reset)(struct reset_controller_dev *, unsigned long); int (*assert)(struct reset_controller_dev *, unsigned long); int (*deassert)(struct reset_controller_dev *, unsigned long); int (*status)(struct reset_controller_dev *, unsigned long); }; struct reset_controller_dev { const struct reset_control_ops *ops; struct module *owner; struct list_head list; struct list_head reset_control_head; struct device *dev; struct device_node *of_node; int of_reset_n_cells; int (*of_xlate)(struct reset_controller_dev *, const struct of_phandle_args *); unsigned int nr_resets; }; struct hisi_reset_controller { spinlock_t lock; void *membase; struct reset_controller_dev rcdev; }; struct mbox_client { struct device *dev; bool tx_block; unsigned long tx_tout; bool knows_txdone; void (*rx_callback)(struct mbox_client *, void *); void (*tx_prepare)(struct mbox_client *, void *); void (*tx_done)(struct mbox_client *, void *, int); }; struct mbox_chan; struct hi6220_stub_clk { u32 id; struct device *dev; struct clk_hw hw; struct regmap *dfs_map; struct mbox_client cl; struct mbox_chan *mbox; }; struct hi6220_mbox_msg { unsigned char type; unsigned char cmd; unsigned char obj; unsigned char src; unsigned char para[4]; }; union hi6220_mbox_data { unsigned int data[8]; struct hi6220_mbox_msg msg; }; struct hi3660_stub_clk_chan { struct mbox_client cl; struct mbox_chan *mbox; }; struct hi3660_stub_clk { unsigned int id; struct clk_hw hw; unsigned int cmd; unsigned int msg[8]; unsigned int rate; }; struct tegra_cpu_car_ops { void (*wait_for_reset)(u32); void (*put_in_reset)(u32); void (*out_of_reset)(u32); void (*enable_clock)(u32); void (*disable_clock)(u32); bool (*rail_off_ready)(); void (*suspend)(); void (*resume)(); }; typedef void (*tegra_clk_apply_init_table_func)(); struct tegra_clk_periph_regs { u32 enb_reg; u32 enb_set_reg; u32 enb_clr_reg; u32 rst_reg; u32 rst_set_reg; u32 rst_clr_reg; }; struct tegra_clk_duplicate { int clk_id; struct clk_lookup lookup; }; struct tegra_clk_init_table { unsigned int clk_id; unsigned int parent_id; unsigned long rate; int state; }; struct tegra_devclk { int dt_id; char *dev_id; char *con_id; }; struct tegra_clk { int dt_id; bool present; }; struct tegra_clk_sync_source { struct clk_hw hw; unsigned long rate; unsigned long max_rate; }; struct tegra_clk_device { struct notifier_block clk_nb; struct device *dev; struct clk_hw *hw; struct mutex lock; }; struct tegra_core_opp_params { bool init_state; }; enum dfll_ctrl_mode { DFLL_UNINITIALIZED = 0, DFLL_DISABLED = 1, DFLL_OPEN_LOOP = 2, DFLL_CLOSED_LOOP = 3, }; enum dfll_tune_range { DFLL_TUNE_UNINITIALIZED = 0, DFLL_TUNE_LOW = 1, }; enum tegra_dfll_pmu_if { TEGRA_DFLL_PMU_I2C = 0, TEGRA_DFLL_PMU_PWM = 1, }; struct i2c_adapter; struct i2c_client { unsigned short flags; unsigned short addr; char name[20]; struct i2c_adapter *adapter; struct device dev; int init_irq; int irq; struct list_head detected; void *devres_group_id; }; struct i2c_algorithm; struct i2c_lock_operations; struct i2c_bus_recovery_info; struct i2c_adapter_quirks; struct i2c_adapter { struct module *owner; unsigned int class; const struct i2c_algorithm *algo; void *algo_data; const struct i2c_lock_operations *lock_ops; struct rt_mutex bus_lock; struct rt_mutex mux_lock; int timeout; int retries; struct device dev; unsigned long locked_flags; int nr; char name[48]; struct completion dev_released; struct mutex userspace_clients_lock; struct list_head userspace_clients; struct i2c_bus_recovery_info *bus_recovery_info; const struct i2c_adapter_quirks *quirks; struct irq_domain *host_notify_domain; struct regulator *bus_regulator; }; struct i2c_msg; union i2c_smbus_data; struct i2c_algorithm { int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); int (*master_xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); int (*smbus_xfer)(struct i2c_adapter *, u16, unsigned short, char, u8, int, union i2c_smbus_data *); int (*smbus_xfer_atomic)(struct i2c_adapter *, u16, unsigned short, char, u8, int, union i2c_smbus_data *); u32 (*functionality)(struct i2c_adapter *); }; struct i2c_msg { __u16 addr; __u16 flags; __u16 len; __u8 *buf; }; union i2c_smbus_data { __u8 byte; __u16 word; __u8 block[34]; }; struct i2c_lock_operations { void (*lock_bus)(struct i2c_adapter *, unsigned int); int (*trylock_bus)(struct i2c_adapter *, unsigned int); void (*unlock_bus)(struct i2c_adapter *, unsigned int); }; struct i2c_bus_recovery_info { int (*recover_bus)(struct i2c_adapter *); int (*get_scl)(struct i2c_adapter *); void (*set_scl)(struct i2c_adapter *, int); int (*get_sda)(struct i2c_adapter *); void (*set_sda)(struct i2c_adapter *, int); int (*get_bus_free)(struct i2c_adapter *); void (*prepare_recovery)(struct i2c_adapter *); void (*unprepare_recovery)(struct i2c_adapter *); struct gpio_desc *scl_gpiod; struct gpio_desc *sda_gpiod; struct pinctrl *pinctrl; struct pinctrl_state *pins_default; struct pinctrl_state *pins_gpio; }; struct i2c_adapter_quirks { u64 flags; int max_num_msgs; u16 max_write_len; u16 max_read_len; u16 max_comb_1st_msg_len; u16 max_comb_2nd_msg_len; }; struct dfll_rate_req { unsigned long rate; unsigned long dvco_target_rate; int lut_index; u8 mult_bits; u8 scale_bits; }; struct tegra_dfll_soc_data; struct tegra_dfll { struct device *dev; struct tegra_dfll_soc_data *soc; void *base; void *i2c_base; void *i2c_controller_base; void *lut_base; struct regulator *vdd_reg; struct clk *soc_clk; struct clk *ref_clk; struct clk *i2c_clk; struct clk *dfll_clk; struct reset_control *dfll_rst; struct reset_control *dvco_rst; unsigned long ref_rate; unsigned long i2c_clk_rate; unsigned long dvco_rate_min; enum dfll_ctrl_mode mode; enum dfll_tune_range tune_range; struct dentry *debugfs_dir; struct clk_hw dfll_clk_hw; const char *output_clock_name; struct dfll_rate_req last_req; unsigned long last_unrounded_rate; u32 droop_ctrl; u32 sample_rate; u32 force_mode; u32 cf; u32 ci; u32 cg; bool cg_scale; u32 i2c_fs_rate; u32 i2c_reg; u32 i2c_slave_addr; unsigned int lut[33]; unsigned long lut_uv[33]; int lut_size; u8 lut_bottom; u8 lut_min; u8 lut_max; u8 lut_safe; enum tegra_dfll_pmu_if pmu_if; unsigned long pwm_rate; struct pinctrl *pwm_pin; struct pinctrl_state *pwm_enable_state; struct pinctrl_state *pwm_disable_state; u32 reg_init_uV; }; struct rail_alignment { int offset_uv; int step_uv; }; struct cvb_table; struct tegra_dfll_soc_data { struct device *dev; unsigned long max_freq; const struct cvb_table *cvb; struct rail_alignment alignment; void (*init_clock_trimmers)(); void (*set_clock_trimmers_high)(); void (*set_clock_trimmers_low)(); }; struct cvb_coefficients { int c0; int c1; int c2; }; struct cvb_table_freq_entry { unsigned long freq; struct cvb_coefficients coefficients; }; struct cvb_cpu_dfll_data { u32 tune0_low; u32 tune0_high; u32 tune1; unsigned int tune_high_min_millivolts; }; struct cvb_table { int speedo_id; int process_id; int min_millivolts; int max_millivolts; int speedo_scale; int voltage_scale; struct cvb_table_freq_entry entries[40]; struct cvb_cpu_dfll_data cpu_dfll_data; }; struct tegra_clk_frac_div { struct clk_hw hw; void *reg; u8 flags; u8 shift; u8 width; u8 frac_width; spinlock_t *lock; }; struct tegra_clk_periph_gate { u32 magic; struct clk_hw hw; void *clk_base; u8 flags; int clk_num; int *enable_refcnt; const struct tegra_clk_periph_regs *regs; }; struct tegra_clk_periph { u32 magic; struct clk_hw hw; struct clk_mux mux; struct tegra_clk_frac_div divider; struct tegra_clk_periph_gate gate; const struct clk_ops *mux_ops; const struct clk_ops *div_ops; const struct clk_ops *gate_ops; }; struct tegra_periph_init_data { const char *name; int clk_id; union { const char * const *parent_names; const char *parent_name; } p; int num_parents; struct tegra_clk_periph periph; u32 offset; const char *con_id; const char *dev_id; unsigned long flags; }; struct tegra_clk_periph_fixed { struct clk_hw hw; void *base; const struct tegra_clk_periph_regs *regs; unsigned int mul; unsigned int div; unsigned int num; }; struct div_nmp { u8 divn_shift; u8 divn_width; u8 divm_shift; u8 divm_width; u8 divp_shift; u8 divp_width; u8 override_divn_shift; u8 override_divm_shift; u8 override_divp_shift; }; struct utmi_clk_param { u32 osc_frequency; u8 enable_delay_count; u8 stable_count; u8 active_delay_count; u8 xtal_freq_count; }; struct tegra_clk_pll_params; struct tegra_clk_pll { struct clk_hw hw; void *clk_base; void *pmc; spinlock_t *lock; struct tegra_clk_pll_params *params; }; struct pdiv_map; struct tegra_clk_pll_freq_table; struct tegra_clk_pll_params { unsigned long input_min; unsigned long input_max; unsigned long cf_min; unsigned long cf_max; unsigned long vco_min; unsigned long vco_max; u32 base_reg; u32 misc_reg; u32 lock_reg; u32 lock_mask; u32 lock_enable_bit_idx; u32 iddq_reg; u32 iddq_bit_idx; u32 reset_reg; u32 reset_bit_idx; u32 sdm_din_reg; u32 sdm_din_mask; u32 sdm_ctrl_reg; u32 sdm_ctrl_en_mask; u32 ssc_ctrl_reg; u32 ssc_ctrl_en_mask; u32 aux_reg; u32 dyn_ramp_reg; u32 ext_misc_reg[6]; u32 pmc_divnm_reg; u32 pmc_divp_reg; u32 flags; int stepa_shift; int stepb_shift; int lock_delay; int max_p; bool defaults_set; const struct pdiv_map *pdiv_tohw; struct div_nmp *div_nmp; struct tegra_clk_pll_freq_table *freq_table; unsigned long fixed_rate; u16 mdiv_default; u32 (*round_p_to_pdiv)(u32, u32 *); void (*set_gain)(struct tegra_clk_pll_freq_table *); int (*calc_rate)(struct clk_hw *, struct tegra_clk_pll_freq_table *, unsigned long, unsigned long); unsigned long (*adjust_vco)(struct tegra_clk_pll_params *, unsigned long); void (*set_defaults)(struct tegra_clk_pll *); int (*dyn_ramp)(struct tegra_clk_pll *, struct tegra_clk_pll_freq_table *); int (*pre_rate_change)(); void (*post_rate_change)(); }; struct pdiv_map { u8 pdiv; u8 hw_val; }; struct tegra_clk_pll_freq_table { unsigned long input_rate; unsigned long output_rate; u32 n; u32 m; u8 p; u8 cpcon; u16 sdm_data; }; struct tegra_clk_pll_out { struct clk_hw hw; void *reg; u8 enb_bit_idx; u8 rst_bit_idx; spinlock_t *lock; u8 flags; }; struct tegra_sdmmc_mux { struct clk_hw hw; void *reg; spinlock_t *lock; const struct clk_ops *gate_ops; struct tegra_clk_periph_gate gate; u8 div_flags; }; struct tegra_clk_super_mux { struct clk_hw hw; void *reg; struct tegra_clk_frac_div frac_div; const struct clk_ops *div_ops; u8 width; u8 flags; u8 div2_index; u8 pllx_index; spinlock_t *lock; }; struct tegra_sync_source_initdata { char *name; unsigned long rate; unsigned long max_rate; int clk_id; }; struct tegra_audio_clk_initdata { char *gate_name; char *mux_name; u32 offset; int gate_clk_id; int mux_clk_id; }; struct tegra_audio2x_clk_initdata { char *parent; char *gate_name; char *name_2x; char *div_name; int clk_id; int clk_num; u8 div_offset; }; enum clk_id { tegra_clk_actmon = 0, tegra_clk_adx = 1, tegra_clk_adx1 = 2, tegra_clk_afi = 3, tegra_clk_amx = 4, tegra_clk_amx1 = 5, tegra_clk_apb2ape = 6, tegra_clk_ahbdma = 7, tegra_clk_apbdma = 8, tegra_clk_apbif = 9, tegra_clk_ape = 10, tegra_clk_audio0 = 11, tegra_clk_audio0_2x = 12, tegra_clk_audio0_mux = 13, tegra_clk_audio1 = 14, tegra_clk_audio1_2x = 15, tegra_clk_audio1_mux = 16, tegra_clk_audio2 = 17, tegra_clk_audio2_2x = 18, tegra_clk_audio2_mux = 19, tegra_clk_audio3 = 20, tegra_clk_audio3_2x = 21, tegra_clk_audio3_mux = 22, tegra_clk_audio4 = 23, tegra_clk_audio4_2x = 24, tegra_clk_audio4_mux = 25, tegra_clk_bsea = 26, tegra_clk_bsev = 27, tegra_clk_cclk_g = 28, tegra_clk_cclk_lp = 29, tegra_clk_cilab = 30, tegra_clk_cilcd = 31, tegra_clk_cile = 32, tegra_clk_clk_32k = 33, tegra_clk_clk72Mhz = 34, tegra_clk_clk72Mhz_8 = 35, tegra_clk_clk_m = 36, tegra_clk_osc = 37, tegra_clk_osc_div2 = 38, tegra_clk_osc_div4 = 39, tegra_clk_cml0 = 40, tegra_clk_cml1 = 41, tegra_clk_csi = 42, tegra_clk_csite = 43, tegra_clk_csite_8 = 44, tegra_clk_csus = 45, tegra_clk_cve = 46, tegra_clk_dam0 = 47, tegra_clk_dam1 = 48, tegra_clk_dam2 = 49, tegra_clk_d_audio = 50, tegra_clk_dbgapb = 51, tegra_clk_dds = 52, tegra_clk_dfll_ref = 53, tegra_clk_dfll_soc = 54, tegra_clk_disp1 = 55, tegra_clk_disp1_8 = 56, tegra_clk_disp2 = 57, tegra_clk_disp2_8 = 58, tegra_clk_dp2 = 59, tegra_clk_dpaux = 60, tegra_clk_dpaux1 = 61, tegra_clk_dsialp = 62, tegra_clk_dsia_mux = 63, tegra_clk_dsiblp = 64, tegra_clk_dsib_mux = 65, tegra_clk_dtv = 66, tegra_clk_emc = 67, tegra_clk_entropy = 68, tegra_clk_entropy_8 = 69, tegra_clk_epp = 70, tegra_clk_epp_8 = 71, tegra_clk_extern1 = 72, tegra_clk_extern2 = 73, tegra_clk_extern3 = 74, tegra_clk_fuse = 75, tegra_clk_fuse_burn = 76, tegra_clk_gpu = 77, tegra_clk_gr2d = 78, tegra_clk_gr2d_8 = 79, tegra_clk_gr3d = 80, tegra_clk_gr3d_8 = 81, tegra_clk_hclk = 82, tegra_clk_hda = 83, tegra_clk_hda_8 = 84, tegra_clk_hda2codec_2x = 85, tegra_clk_hda2codec_2x_8 = 86, tegra_clk_hda2hdmi = 87, tegra_clk_hdmi = 88, tegra_clk_hdmi_audio = 89, tegra_clk_host1x = 90, tegra_clk_host1x_8 = 91, tegra_clk_host1x_9 = 92, tegra_clk_hsic_trk = 93, tegra_clk_i2c1 = 94, tegra_clk_i2c2 = 95, tegra_clk_i2c3 = 96, tegra_clk_i2c4 = 97, tegra_clk_i2c5 = 98, tegra_clk_i2c6 = 99, tegra_clk_i2cslow = 100, tegra_clk_i2s0 = 101, tegra_clk_i2s0_sync = 102, tegra_clk_i2s1 = 103, tegra_clk_i2s1_sync = 104, tegra_clk_i2s2 = 105, tegra_clk_i2s2_sync = 106, tegra_clk_i2s3 = 107, tegra_clk_i2s3_sync = 108, tegra_clk_i2s4 = 109, tegra_clk_i2s4_sync = 110, tegra_clk_isp = 111, tegra_clk_isp_8 = 112, tegra_clk_isp_9 = 113, tegra_clk_ispb = 114, tegra_clk_kbc = 115, tegra_clk_kfuse = 116, tegra_clk_la = 117, tegra_clk_maud = 118, tegra_clk_mipi = 119, tegra_clk_mipibif = 120, tegra_clk_mipi_cal = 121, tegra_clk_mpe = 122, tegra_clk_mselect = 123, tegra_clk_msenc = 124, tegra_clk_ndflash = 125, tegra_clk_ndflash_8 = 126, tegra_clk_ndspeed = 127, tegra_clk_ndspeed_8 = 128, tegra_clk_nor = 129, tegra_clk_nvdec = 130, tegra_clk_nvenc = 131, tegra_clk_nvjpg = 132, tegra_clk_owr = 133, tegra_clk_owr_8 = 134, tegra_clk_pcie = 135, tegra_clk_pclk = 136, tegra_clk_pll_a = 137, tegra_clk_pll_a_out0 = 138, tegra_clk_pll_a1 = 139, tegra_clk_pll_c = 140, tegra_clk_pll_c2 = 141, tegra_clk_pll_c3 = 142, tegra_clk_pll_c4 = 143, tegra_clk_pll_c4_out0 = 144, tegra_clk_pll_c4_out1 = 145, tegra_clk_pll_c4_out2 = 146, tegra_clk_pll_c4_out3 = 147, tegra_clk_pll_c_out1 = 148, tegra_clk_pll_d = 149, tegra_clk_pll_d2 = 150, tegra_clk_pll_d2_out0 = 151, tegra_clk_pll_d_out0 = 152, tegra_clk_pll_dp = 153, tegra_clk_pll_e_out0 = 154, tegra_clk_pll_g_ref = 155, tegra_clk_pll_m = 156, tegra_clk_pll_m_out1 = 157, tegra_clk_pll_mb = 158, tegra_clk_pll_p = 159, tegra_clk_pll_p_out1 = 160, tegra_clk_pll_p_out2 = 161, tegra_clk_pll_p_out2_int = 162, tegra_clk_pll_p_out3 = 163, tegra_clk_pll_p_out4 = 164, tegra_clk_pll_p_out4_cpu = 165, tegra_clk_pll_p_out5 = 166, tegra_clk_pll_p_out_hsio = 167, tegra_clk_pll_p_out_xusb = 168, tegra_clk_pll_p_out_cpu = 169, tegra_clk_pll_p_out_adsp = 170, tegra_clk_pll_ref = 171, tegra_clk_pll_re_out = 172, tegra_clk_pll_re_vco = 173, tegra_clk_pll_u = 174, tegra_clk_pll_u_out = 175, tegra_clk_pll_u_out1 = 176, tegra_clk_pll_u_out2 = 177, tegra_clk_pll_u_12m = 178, tegra_clk_pll_u_480m = 179, tegra_clk_pll_u_48m = 180, tegra_clk_pll_u_60m = 181, tegra_clk_pll_x = 182, tegra_clk_pll_x_out0 = 183, tegra_clk_pwm = 184, tegra_clk_qspi = 185, tegra_clk_rtc = 186, tegra_clk_sata = 187, tegra_clk_sata_8 = 188, tegra_clk_sata_cold = 189, tegra_clk_sata_oob = 190, tegra_clk_sata_oob_8 = 191, tegra_clk_sbc1 = 192, tegra_clk_sbc1_8 = 193, tegra_clk_sbc1_9 = 194, tegra_clk_sbc2 = 195, tegra_clk_sbc2_8 = 196, tegra_clk_sbc2_9 = 197, tegra_clk_sbc3 = 198, tegra_clk_sbc3_8 = 199, tegra_clk_sbc3_9 = 200, tegra_clk_sbc4 = 201, tegra_clk_sbc4_8 = 202, tegra_clk_sbc4_9 = 203, tegra_clk_sbc5 = 204, tegra_clk_sbc5_8 = 205, tegra_clk_sbc6 = 206, tegra_clk_sbc6_8 = 207, tegra_clk_sclk = 208, tegra_clk_sdmmc_legacy = 209, tegra_clk_sdmmc1 = 210, tegra_clk_sdmmc1_8 = 211, tegra_clk_sdmmc1_9 = 212, tegra_clk_sdmmc2 = 213, tegra_clk_sdmmc2_8 = 214, tegra_clk_sdmmc3 = 215, tegra_clk_sdmmc3_8 = 216, tegra_clk_sdmmc3_9 = 217, tegra_clk_sdmmc4 = 218, tegra_clk_sdmmc4_8 = 219, tegra_clk_se = 220, tegra_clk_se_10 = 221, tegra_clk_soc_therm = 222, tegra_clk_soc_therm_8 = 223, tegra_clk_sor0 = 224, tegra_clk_sor0_out = 225, tegra_clk_sor1 = 226, tegra_clk_sor1_out = 227, tegra_clk_spdif = 228, tegra_clk_spdif_2x = 229, tegra_clk_spdif_in = 230, tegra_clk_spdif_in_8 = 231, tegra_clk_spdif_in_sync = 232, tegra_clk_spdif_mux = 233, tegra_clk_spdif_out = 234, tegra_clk_timer = 235, tegra_clk_trace = 236, tegra_clk_tsec = 237, tegra_clk_tsec_8 = 238, tegra_clk_tsecb = 239, tegra_clk_tsensor = 240, tegra_clk_tvdac = 241, tegra_clk_tvo = 242, tegra_clk_uarta = 243, tegra_clk_uarta_8 = 244, tegra_clk_uartb = 245, tegra_clk_uartb_8 = 246, tegra_clk_uartc = 247, tegra_clk_uartc_8 = 248, tegra_clk_uartd = 249, tegra_clk_uartd_8 = 250, tegra_clk_uarte = 251, tegra_clk_uarte_8 = 252, tegra_clk_uartape = 253, tegra_clk_usb2 = 254, tegra_clk_usb2_hsic_trk = 255, tegra_clk_usb2_trk = 256, tegra_clk_usb3 = 257, tegra_clk_usbd = 258, tegra_clk_vcp = 259, tegra_clk_vde = 260, tegra_clk_vde_8 = 261, tegra_clk_vfir = 262, tegra_clk_vi = 263, tegra_clk_vi_8 = 264, tegra_clk_vi_9 = 265, tegra_clk_vi_10 = 266, tegra_clk_vi_i2c = 267, tegra_clk_vic03 = 268, tegra_clk_vic03_8 = 269, tegra_clk_vim2_clk = 270, tegra_clk_vimclk_sync = 271, tegra_clk_vi_sensor = 272, tegra_clk_vi_sensor_8 = 273, tegra_clk_vi_sensor_9 = 274, tegra_clk_vi_sensor2 = 275, tegra_clk_vi_sensor2_8 = 276, tegra_clk_xusb_dev = 277, tegra_clk_xusb_dev_src = 278, tegra_clk_xusb_dev_src_8 = 279, tegra_clk_xusb_falcon_src = 280, tegra_clk_xusb_falcon_src_8 = 281, tegra_clk_xusb_fs_src = 282, tegra_clk_xusb_gate = 283, tegra_clk_xusb_host = 284, tegra_clk_xusb_host_src = 285, tegra_clk_xusb_host_src_8 = 286, tegra_clk_xusb_hs_src = 287, tegra_clk_xusb_hs_src_4 = 288, tegra_clk_xusb_ss = 289, tegra_clk_xusb_ss_src = 290, tegra_clk_xusb_ss_src_8 = 291, tegra_clk_xusb_ss_div2 = 292, tegra_clk_xusb_ssp_src = 293, tegra_clk_sclk_mux = 294, tegra_clk_sor_safe = 295, tegra_clk_cec = 296, tegra_clk_ispa = 297, tegra_clk_dmic1 = 298, tegra_clk_dmic2 = 299, tegra_clk_dmic3 = 300, tegra_clk_dmic1_sync_clk = 301, tegra_clk_dmic2_sync_clk = 302, tegra_clk_dmic3_sync_clk = 303, tegra_clk_dmic1_sync_clk_mux = 304, tegra_clk_dmic2_sync_clk_mux = 305, tegra_clk_dmic3_sync_clk_mux = 306, tegra_clk_iqc1 = 307, tegra_clk_iqc2 = 308, tegra_clk_pll_a_out_adsp = 309, tegra_clk_pll_a_out0_out_adsp = 310, tegra_clk_adsp = 311, tegra_clk_adsp_neon = 312, tegra_clk_max = 313, }; struct tegra_audio_clk_info { char *name; struct tegra_clk_pll_params *pll_params; int clk_id; char *parent; }; struct pll_out_data { char *div_name; char *pll_out_name; u32 offset; int clk_id; u8 div_shift; u8 div_flags; u8 rst_shift; spinlock_t *lock; }; enum tegra_super_gen { gen4 = 4, gen5 = 5, }; struct tegra_super_gen_info { enum tegra_super_gen gen; const char **sclk_parents; const char **cclk_g_parents; const char **cclk_lp_parents; int num_sclk_parents; int num_cclk_g_parents; int num_cclk_lp_parents; }; enum { DOWN___2 = 0, UP___2 = 1, }; struct dev_pm_opp_data { unsigned int level; unsigned long freq; unsigned long u_volt; }; enum gpd_status { GENPD_STATE_ON = 0, GENPD_STATE_OFF = 1, }; enum { CMD_CLK_GET_RATE = 1, CMD_CLK_SET_RATE = 2, CMD_CLK_ROUND_RATE = 3, CMD_CLK_GET_PARENT = 4, CMD_CLK_SET_PARENT = 5, CMD_CLK_IS_ENABLED = 6, CMD_CLK_ENABLE = 7, CMD_CLK_DISABLE = 8, CMD_CLK_PROPERTIES = 9, CMD_CLK_POSSIBLE_PARENTS = 10, CMD_CLK_NUM_POSSIBLE_PARENTS = 11, CMD_CLK_GET_POSSIBLE_PARENT = 12, CMD_CLK_RESET_REFCOUNTS = 13, CMD_CLK_GET_ALL_INFO = 14, CMD_CLK_GET_MAX_CLK_ID = 15, CMD_CLK_GET_FMAX_AT_VMIN = 16, CMD_CLK_MAX = 17, }; struct tegra_bpmp; struct tegra_bpmp_clk { struct clk_hw hw; struct tegra_bpmp *bpmp; unsigned int id; unsigned int num_parents; unsigned int *parents; }; struct generic_pm_domain; typedef struct generic_pm_domain * (*genpd_xlate_t)(struct of_phandle_args *, void *); struct genpd_onecell_data { struct generic_pm_domain **domains; unsigned int num_domains; genpd_xlate_t xlate; }; struct tegra_bpmp_soc; struct tegra_bpmp_channel; struct tegra_bpmp { const struct tegra_bpmp_soc *soc; struct device *dev; void *priv; struct { struct mbox_client client; struct mbox_chan *channel; } mbox; spinlock_t atomic_tx_lock; struct tegra_bpmp_channel *tx_channel; struct tegra_bpmp_channel *rx_channel; struct tegra_bpmp_channel *threaded_channels; struct { unsigned long *allocated; unsigned long *busy; unsigned int count; struct semaphore lock; } threaded; struct list_head mrqs; spinlock_t lock; struct tegra_bpmp_clk **clocks; unsigned int num_clocks; struct reset_controller_dev rstc; struct genpd_onecell_data genpd; struct dentry *debugfs_mirror; bool suspended; }; struct tegra_bpmp_ops; struct tegra_bpmp_soc { struct { struct { unsigned int offset; unsigned int count; unsigned int timeout; } cpu_tx; struct { unsigned int offset; unsigned int count; unsigned int timeout; } thread; struct { unsigned int offset; unsigned int count; unsigned int timeout; } cpu_rx; } channels; const struct tegra_bpmp_ops *ops; unsigned int num_resets; }; struct tegra_bpmp_ops { int (*init)(struct tegra_bpmp *); void (*deinit)(struct tegra_bpmp *); bool (*is_response_ready)(struct tegra_bpmp_channel *); bool (*is_request_ready)(struct tegra_bpmp_channel *); int (*ack_response)(struct tegra_bpmp_channel *); int (*ack_request)(struct tegra_bpmp_channel *); bool (*is_response_channel_free)(struct tegra_bpmp_channel *); bool (*is_request_channel_free)(struct tegra_bpmp_channel *); int (*post_response)(struct tegra_bpmp_channel *); int (*post_request)(struct tegra_bpmp_channel *); int (*ring_doorbell)(struct tegra_bpmp *); int (*resume)(struct tegra_bpmp *); }; struct iosys_map { union { void *vaddr_iomem; void *vaddr; }; bool is_iomem; }; struct tegra_ivc; struct tegra_bpmp_channel { struct tegra_bpmp *bpmp; struct iosys_map ib; struct iosys_map ob; struct completion completion; struct tegra_ivc *ivc; unsigned int index; }; struct gpd_dev_ops { int (*start)(struct device *); int (*stop)(struct device *); }; struct dev_power_governor; struct genpd_governor_data; struct opp_table; struct dev_pm_opp; struct genpd_power_state; struct genpd_lock_ops; struct generic_pm_domain { struct device dev; struct dev_pm_domain domain; struct list_head gpd_list_node; struct list_head parent_links; struct list_head child_links; struct list_head dev_list; struct dev_power_governor *gov; struct genpd_governor_data *gd; struct work_struct power_off_work; struct fwnode_handle *provider; bool has_provider; const char *name; atomic_t sd_count; enum gpd_status status; unsigned int device_count; unsigned int suspended_count; unsigned int prepared_count; unsigned int performance_state; cpumask_var_t cpus; bool synced_poweroff; int (*power_off)(struct generic_pm_domain *); int (*power_on)(struct generic_pm_domain *); struct raw_notifier_head power_notifiers; struct opp_table *opp_table; unsigned int (*opp_to_performance_state)(struct generic_pm_domain *, struct dev_pm_opp *); int (*set_performance_state)(struct generic_pm_domain *, unsigned int); struct gpd_dev_ops dev_ops; int (*attach_dev)(struct generic_pm_domain *, struct device *); void (*detach_dev)(struct generic_pm_domain *, struct device *); unsigned int flags; struct genpd_power_state *states; void (*free_states)(struct genpd_power_state *, unsigned int); unsigned int state_count; unsigned int state_idx; u64 on_time; u64 accounting_time; const struct genpd_lock_ops *lock_ops; union { struct mutex mlock; struct { spinlock_t slock; unsigned long lock_flags; }; }; }; struct dev_power_governor { bool (*power_down_ok)(struct dev_pm_domain *); bool (*suspend_ok)(struct device *); }; struct genpd_governor_data { s64 max_off_time_ns; bool max_off_time_changed; ktime_t next_wakeup; ktime_t next_hrtimer; bool cached_power_down_ok; bool cached_power_down_state_idx; }; struct genpd_power_state { s64 power_off_latency_ns; s64 power_on_latency_ns; s64 residency_ns; u64 usage; u64 rejected; struct fwnode_handle *fwnode; u64 idle_time; void *data; }; struct genpd_lock_ops { void (*lock)(struct generic_pm_domain *); void (*lock_nested)(struct generic_pm_domain *, int); int (*lock_interruptible)(struct generic_pm_domain *); void (*unlock)(struct generic_pm_domain *); }; struct tegra_bpmp_clk_message { unsigned int cmd; unsigned int id; struct { const void *data; size_t size; } tx; struct { void *data; size_t size; int ret; } rx; }; struct tegra_bpmp_message { unsigned int mrq; struct { const void *data; size_t size; } tx; struct { void *data; size_t size; int ret; } rx; unsigned long flags; }; struct cmd_clk_get_rate_request {}; struct cmd_clk_set_rate_request { int32_t unused; int64_t rate; } __attribute__((packed)); struct cmd_clk_round_rate_request { int32_t unused; int64_t rate; } __attribute__((packed)); struct cmd_clk_get_parent_request {}; struct cmd_clk_set_parent_request { uint32_t parent_id; }; struct cmd_clk_enable_request {}; struct cmd_clk_disable_request {}; struct cmd_clk_is_enabled_request {}; struct cmd_clk_properties_request {}; struct cmd_clk_possible_parents_request {}; struct cmd_clk_num_possible_parents_request {}; struct cmd_clk_get_possible_parent_request { uint8_t parent_idx; }; struct cmd_clk_get_all_info_request {}; struct cmd_clk_get_max_clk_id_request {}; struct cmd_clk_get_fmax_at_vmin_request {}; struct mrq_clk_request { uint32_t cmd_and_id; union { struct cmd_clk_get_rate_request clk_get_rate; struct cmd_clk_set_rate_request clk_set_rate; struct cmd_clk_round_rate_request clk_round_rate; struct cmd_clk_get_parent_request clk_get_parent; struct cmd_clk_set_parent_request clk_set_parent; struct cmd_clk_enable_request clk_enable; struct cmd_clk_disable_request clk_disable; struct cmd_clk_is_enabled_request clk_is_enabled; struct cmd_clk_properties_request clk_properties; struct cmd_clk_possible_parents_request clk_possible_parents; struct cmd_clk_num_possible_parents_request clk_num_possible_parents; struct cmd_clk_get_possible_parent_request clk_get_possible_parent; struct cmd_clk_get_all_info_request clk_get_all_info; struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id; struct cmd_clk_get_fmax_at_vmin_request clk_get_fmax_at_vmin; }; }; struct cmd_clk_get_max_clk_id_response { uint32_t max_id; }; struct tegra_bpmp_clk_info { unsigned int id; char name[40]; unsigned int parents[16]; unsigned int num_parents; unsigned long flags; }; struct cmd_clk_get_all_info_response { uint32_t flags; uint32_t parent; uint32_t parents[16]; uint8_t num_parents; uint8_t name[40]; } __attribute__((packed)); struct cmd_clk_get_rate_response { int64_t rate; }; struct cmd_clk_get_parent_response { uint32_t parent_id; }; struct cmd_clk_is_enabled_response { int32_t state; }; struct cmd_clk_round_rate_response { int64_t rate; }; struct cmd_clk_set_parent_response { uint32_t parent_id; }; struct cmd_clk_set_rate_response { int64_t rate; }; struct dma_chan; struct dma_chan_tbl_ent { struct dma_chan *chan; }; typedef s32 dma_cookie_t; struct dma_device; struct dma_chan_dev; struct dma_chan_percpu; struct dma_router; struct dma_chan { struct dma_device *device; struct device *slave; dma_cookie_t cookie; dma_cookie_t completed_cookie; int chan_id; struct dma_chan_dev *dev; const char *name; char *dbg_client_name; struct list_head device_node; struct dma_chan_percpu __attribute__((btf_type_tag("percpu"))) *local; int client_count; int table_count; struct dma_router *router; void *route_data; void *private; }; typedef bool (*dma_filter_fn)(struct dma_chan *, void *); struct dma_slave_map; struct dma_filter { dma_filter_fn fn; int mapcnt; const struct dma_slave_map *map; }; typedef struct { unsigned long bits[1]; } dma_cap_mask_t; enum dma_desc_metadata_mode { DESC_METADATA_NONE = 0, DESC_METADATA_CLIENT = 1, DESC_METADATA_ENGINE = 2, }; enum dmaengine_alignment { DMAENGINE_ALIGN_1_BYTE = 0, DMAENGINE_ALIGN_2_BYTES = 1, DMAENGINE_ALIGN_4_BYTES = 2, DMAENGINE_ALIGN_8_BYTES = 3, DMAENGINE_ALIGN_16_BYTES = 4, DMAENGINE_ALIGN_32_BYTES = 5, DMAENGINE_ALIGN_64_BYTES = 6, DMAENGINE_ALIGN_128_BYTES = 7, DMAENGINE_ALIGN_256_BYTES = 8, }; enum dma_residue_granularity { DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, DMA_RESIDUE_GRANULARITY_SEGMENT = 1, DMA_RESIDUE_GRANULARITY_BURST = 2, }; enum sum_check_flags { SUM_CHECK_P_RESULT = 1, SUM_CHECK_Q_RESULT = 2, }; enum dma_transfer_direction { DMA_MEM_TO_MEM = 0, DMA_MEM_TO_DEV = 1, DMA_DEV_TO_MEM = 2, DMA_DEV_TO_DEV = 3, DMA_TRANS_NONE = 4, }; enum dma_status { DMA_COMPLETE = 0, DMA_IN_PROGRESS = 1, DMA_PAUSED = 2, DMA_ERROR = 3, DMA_OUT_OF_ORDER = 4, }; struct dma_async_tx_descriptor; struct dma_interleaved_template; struct dma_slave_caps; struct dma_slave_config; struct dma_tx_state; struct dma_device { struct kref ref; unsigned int chancnt; unsigned int privatecnt; struct list_head channels; struct list_head global_node; struct dma_filter filter; dma_cap_mask_t cap_mask; enum dma_desc_metadata_mode desc_metadata_modes; unsigned short max_xor; unsigned short max_pq; enum dmaengine_alignment copy_align; enum dmaengine_alignment xor_align; enum dmaengine_alignment pq_align; enum dmaengine_alignment fill_align; int dev_id; struct device *dev; struct module *owner; struct ida chan_ida; u32 src_addr_widths; u32 dst_addr_widths; u32 directions; u32 min_burst; u32 max_burst; u32 max_sg_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; int (*device_alloc_chan_resources)(struct dma_chan *); int (*device_router_config)(struct dma_chan *); void (*device_free_chan_resources)(struct dma_chan *); struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan *, dma_addr_t, dma_addr_t, size_t, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan *, dma_addr_t, dma_addr_t *, unsigned int, size_t, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan *, dma_addr_t, int, size_t, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan *, struct scatterlist *, unsigned int, int, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan *, unsigned long); struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan *, struct scatterlist *, unsigned int, enum dma_transfer_direction, unsigned long, void *); struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, unsigned long); struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan *, struct dma_interleaved_template *, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan *, dma_addr_t, u64, unsigned long); void (*device_caps)(struct dma_chan *, struct dma_slave_caps *); int (*device_config)(struct dma_chan *, struct dma_slave_config *); int (*device_pause)(struct dma_chan *); int (*device_resume)(struct dma_chan *); int (*device_terminate_all)(struct dma_chan *); void (*device_synchronize)(struct dma_chan *); enum dma_status (*device_tx_status)(struct dma_chan *, dma_cookie_t, struct dma_tx_state *); void (*device_issue_pending)(struct dma_chan *); void (*device_release)(struct dma_device *); void (*dbg_summary_show)(struct seq_file *, struct dma_device *); struct dentry *dbg_dev_root; }; struct dma_slave_map { const char *devname; const char *slave; void *param; }; enum dma_ctrl_flags { DMA_PREP_INTERRUPT = 1, DMA_CTRL_ACK = 2, DMA_PREP_PQ_DISABLE_P = 4, DMA_PREP_PQ_DISABLE_Q = 8, DMA_PREP_CONTINUE = 16, DMA_PREP_FENCE = 32, DMA_CTRL_REUSE = 64, DMA_PREP_CMD = 128, DMA_PREP_REPEAT = 256, DMA_PREP_LOAD_EOT = 512, }; typedef void (*dma_async_tx_callback)(void *); struct dmaengine_result; typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *); struct dmaengine_unmap_data; struct dma_descriptor_metadata_ops; struct dma_async_tx_descriptor { dma_cookie_t cookie; enum dma_ctrl_flags flags; dma_addr_t phys; struct dma_chan *chan; dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); int (*desc_free)(struct dma_async_tx_descriptor *); dma_async_tx_callback callback; dma_async_tx_callback_result callback_result; void *callback_param; struct dmaengine_unmap_data *unmap; enum dma_desc_metadata_mode desc_metadata_mode; struct dma_descriptor_metadata_ops *metadata_ops; struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; spinlock_t lock; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; enum dmaengine_tx_result { DMA_TRANS_NOERROR = 0, DMA_TRANS_READ_FAILED = 1, DMA_TRANS_WRITE_FAILED = 2, DMA_TRANS_ABORTED = 3, }; struct dmaengine_result { enum dmaengine_tx_result result; u32 residue; }; struct dmaengine_unmap_data { u8 map_cnt; u8 to_cnt; u8 from_cnt; u8 bidi_cnt; struct device *dev; struct kref kref; size_t len; dma_addr_t addr[0]; }; struct dma_descriptor_metadata_ops { int (*attach)(struct dma_async_tx_descriptor *, void *, size_t); void * (*get_ptr)(struct dma_async_tx_descriptor *, size_t *, size_t *); int (*set_len)(struct dma_async_tx_descriptor *, size_t); }; struct data_chunk { size_t size; size_t icg; size_t dst_icg; size_t src_icg; }; struct dma_interleaved_template { dma_addr_t src_start; dma_addr_t dst_start; enum dma_transfer_direction dir; bool src_inc; bool dst_inc; bool src_sgl; bool dst_sgl; size_t numf; size_t frame_size; struct data_chunk sgl[0]; }; struct dma_slave_caps { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; u32 min_burst; u32 max_burst; u32 max_sg_burst; bool cmd_pause; bool cmd_resume; bool cmd_terminate; enum dma_residue_granularity residue_granularity; bool descriptor_reuse; }; enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, DMA_SLAVE_BUSWIDTH_1_BYTE = 1, DMA_SLAVE_BUSWIDTH_2_BYTES = 2, DMA_SLAVE_BUSWIDTH_3_BYTES = 3, DMA_SLAVE_BUSWIDTH_4_BYTES = 4, DMA_SLAVE_BUSWIDTH_8_BYTES = 8, DMA_SLAVE_BUSWIDTH_16_BYTES = 16, DMA_SLAVE_BUSWIDTH_32_BYTES = 32, DMA_SLAVE_BUSWIDTH_64_BYTES = 64, DMA_SLAVE_BUSWIDTH_128_BYTES = 128, }; struct dma_slave_config { enum dma_transfer_direction direction; phys_addr_t src_addr; phys_addr_t dst_addr; enum dma_slave_buswidth src_addr_width; enum dma_slave_buswidth dst_addr_width; u32 src_maxburst; u32 dst_maxburst; u32 src_port_window_size; u32 dst_port_window_size; bool device_fc; void *peripheral_config; size_t peripheral_size; }; struct dma_tx_state { dma_cookie_t last; dma_cookie_t used; u32 residue; u32 in_flight_bytes; }; struct dma_chan_dev { struct dma_chan *chan; struct device device; int dev_id; bool chan_dma_dev; }; struct dma_chan_percpu { unsigned long memcpy_count; unsigned long bytes_transferred; }; struct dma_router { struct device *dev; void (*route_free)(struct device *, void *); }; struct dmaengine_unmap_pool { struct kmem_cache *cache; const char *name; mempool_t *pool; size_t size; }; enum dma_transaction_type { DMA_MEMCPY = 0, DMA_XOR = 1, DMA_PQ = 2, DMA_XOR_VAL = 3, DMA_PQ_VAL = 4, DMA_MEMSET = 5, DMA_MEMSET_SG = 6, DMA_INTERRUPT = 7, DMA_PRIVATE = 8, DMA_ASYNC_TX = 9, DMA_SLAVE = 10, DMA_CYCLIC = 11, DMA_INTERLEAVE = 12, DMA_COMPLETION_NO_ORDER = 13, DMA_REPEAT = 14, DMA_LOAD_EOT = 15, DMA_TX_TYPE_END = 16, }; struct virt_dma_desc { struct dma_async_tx_descriptor tx; struct dmaengine_result tx_result; struct list_head node; }; struct virt_dma_chan { struct dma_chan chan; struct tasklet_struct task; void (*desc_free)(struct virt_dma_desc *); spinlock_t lock; struct list_head desc_allocated; struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; struct list_head desc_terminated; struct virt_dma_desc *cyclic; }; struct dmaengine_desc_callback { dma_async_tx_callback callback; dma_async_tx_callback_result callback_result; void *callback_param; }; struct of_dma { struct list_head of_dma_controllers; struct device_node *of_node; struct dma_chan * (*of_dma_xlate)(struct of_phandle_args *, struct of_dma *); void * (*of_dma_route_allocate)(struct of_phandle_args *, struct of_dma *); struct dma_router *dma_router; void *of_dma_data; }; struct of_dma_filter_info { dma_cap_mask_t dma_cap; dma_filter_fn filter_fn; }; struct geni_se_desc { unsigned int num_clks; const char * const *clks; }; enum geni_se_xfer_mode { GENI_SE_INVALID = 0, GENI_SE_FIFO = 1, GENI_SE_DMA = 2, GENI_GPI_DMA = 3, }; enum geni_se_protocol_type { GENI_SE_NONE = 0, GENI_SE_SPI = 1, GENI_SE_UART = 2, GENI_SE_I2C = 3, GENI_SE_I3C = 4, GENI_SE_SPI_SLAVE = 5, }; struct geni_icc_path { struct icc_path *path; unsigned int avg_bw; }; struct geni_wrapper; struct geni_se { void *base; struct device *dev; struct geni_wrapper *wrapper; struct clk *clk; unsigned int num_clk_levels; unsigned long *clk_perf_tbl; struct geni_icc_path icc_paths[3]; }; struct geni_wrapper { struct device *dev; void *base; struct clk_bulk_data clks[2]; unsigned int num_clks; }; struct qcom_smem_state_ops { int (*update_bits)(void *, u32, u32); }; struct qcom_smem_state { struct kref refcount; bool orphan; struct list_head list; struct device_node *of_node; void *priv; struct qcom_smem_state_ops ops; }; enum bus_notifier_event { BUS_NOTIFY_ADD_DEVICE = 0, BUS_NOTIFY_DEL_DEVICE = 1, BUS_NOTIFY_REMOVED_DEVICE = 2, BUS_NOTIFY_BIND_DRIVER = 3, BUS_NOTIFY_BOUND_DRIVER = 4, BUS_NOTIFY_UNBIND_DRIVER = 5, BUS_NOTIFY_UNBOUND_DRIVER = 6, BUS_NOTIFY_DRIVER_NOT_BOUND = 7, }; struct sunxi_sram_func; struct sunxi_sram_data { char *name; u8 reg; u8 offset; u8 width; struct sunxi_sram_func *func; struct list_head list; }; struct sunxi_sram_desc { struct sunxi_sram_data data; bool claimed; }; struct sunxi_sram_func { char *func; u8 val; u32 reg_val; }; struct sunxi_sramc_variant { int num_emac_clocks; bool has_ldo_ctrl; }; enum tegra_revision { TEGRA_REVISION_UNKNOWN = 0, TEGRA_REVISION_A01 = 1, TEGRA_REVISION_A02 = 2, TEGRA_REVISION_A03 = 3, TEGRA_REVISION_A03p = 4, TEGRA_REVISION_A04 = 5, TEGRA_REVISION_MAX = 6, }; enum tegra_platform { TEGRA_PLATFORM_SILICON = 0, TEGRA_PLATFORM_QT = 1, TEGRA_PLATFORM_SYSTEM_FPGA = 2, TEGRA_PLATFORM_UNIT_FPGA = 3, TEGRA_PLATFORM_ASIM_QT = 4, TEGRA_PLATFORM_ASIM_LINSIM = 5, TEGRA_PLATFORM_DSIM_ASIM_LINSIM = 6, TEGRA_PLATFORM_VERIFICATION_SIMULATION = 7, TEGRA_PLATFORM_VDK = 8, TEGRA_PLATFORM_VSP = 9, TEGRA_PLATFORM_MAX = 10, }; struct tegra_sku_info { int sku_id; int cpu_process_id; int cpu_speedo_id; int cpu_speedo_value; int cpu_iddq_value; int soc_process_id; int soc_speedo_id; int soc_speedo_value; int gpu_process_id; int gpu_speedo_id; int gpu_speedo_value; enum tegra_revision revision; enum tegra_platform platform; }; struct tegra_fuse_soc; struct nvmem_device; struct nvmem_cell_lookup; struct tegra_fuse { struct device *dev; void *base; phys_addr_t phys; struct clk *clk; struct reset_control *rst; u32 (*read_early)(struct tegra_fuse *, unsigned int); u32 (*read)(struct tegra_fuse *, unsigned int); const struct tegra_fuse_soc *soc; struct { struct mutex lock; struct completion wait; struct dma_chan *chan; struct dma_slave_config config; dma_addr_t phys; u32 *virt; } apbdma; struct nvmem_device *nvmem; struct nvmem_cell_lookup *lookups; }; struct tegra_fuse_info; struct nvmem_cell_info; struct nvmem_keepout; struct tegra_fuse_soc { void (*init)(struct tegra_fuse *); void (*speedo_init)(struct tegra_sku_info *); int (*probe)(struct tegra_fuse *); const struct tegra_fuse_info *info; const struct nvmem_cell_lookup *lookups; unsigned int num_lookups; const struct nvmem_cell_info *cells; unsigned int num_cells; const struct nvmem_keepout *keepouts; unsigned int num_keepouts; const struct attribute_group *soc_attr_group; bool clk_suspend_on; }; struct tegra_fuse_info { u32 (*read)(struct tegra_fuse *, unsigned int); unsigned int size; unsigned int spare; }; struct nvmem_cell_lookup { const char *nvmem_name; const char *cell_name; const char *dev_id; const char *con_id; struct list_head node; }; typedef int (*nvmem_cell_post_process_t)(void *, const char *, int, unsigned int, void *, size_t); struct nvmem_cell_info { const char *name; unsigned int offset; size_t raw_len; unsigned int bytes; unsigned int bit_offset; unsigned int nbits; struct device_node *np; nvmem_cell_post_process_t read_post_process; void *priv; }; struct nvmem_keepout { unsigned int start; unsigned int end; unsigned char value; }; enum nvmem_type { NVMEM_TYPE_UNKNOWN = 0, NVMEM_TYPE_EEPROM = 1, NVMEM_TYPE_OTP = 2, NVMEM_TYPE_BATTERY_BACKED = 3, NVMEM_TYPE_FRAM = 4, }; struct soc_device_attribute { const char *machine; const char *family; const char *revision; const char *serial_number; const char *soc_id; const void *data; const struct attribute_group *custom_attr_group; }; typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t); typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t); struct nvmem_layout; struct nvmem_config { struct device *dev; const char *name; int id; struct module *owner; const struct nvmem_cell_info *cells; int ncells; bool add_legacy_fixed_of_cells; const struct nvmem_keepout *keepout; unsigned int nkeepout; enum nvmem_type type; bool read_only; bool root_only; bool ignore_wp; struct nvmem_layout *layout; struct device_node *of_node; bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; int size; int word_size; int stride; void *priv; bool compat; struct device *base_dev; }; struct nvmem_layout { const char *name; const struct of_device_id *of_match_table; int (*add_cells)(struct device *, struct nvmem_device *, struct nvmem_layout *); void (*fixup_cell_info)(struct nvmem_device *, struct nvmem_layout *, struct nvmem_cell_info *); struct module *owner; struct list_head node; }; struct tegra_cbb_ops; struct tegra_cbb { struct device *dev; const struct tegra_cbb_ops *ops; struct list_head node; }; struct tegra_cbb_ops { int (*debugfs_show)(struct tegra_cbb *, struct seq_file *, void *); int (*interrupt_enable)(struct tegra_cbb *); void (*error_enable)(struct tegra_cbb *); void (*fault_enable)(struct tegra_cbb *); void (*stall_enable)(struct tegra_cbb *); void (*error_clear)(struct tegra_cbb *); u32 (*get_status)(struct tegra_cbb *); }; struct tegra234_cbb_fabric; struct tegra234_cbb_acpi_uid { const char *hid; const char *uid; const struct tegra234_cbb_fabric *fabric; }; struct tegra_cbb_error; struct tegra234_slave_lookup; struct tegra234_cbb_fabric { const char *name; phys_addr_t off_mask_erd; phys_addr_t firewall_base; unsigned int firewall_ctl; unsigned int firewall_wr_ctl; const char * const *master_id; unsigned int notifier_offset; const struct tegra_cbb_error *errors; const int max_errors; const struct tegra234_slave_lookup *slave_map; const int max_slaves; }; struct tegra_cbb_error { const char *code; const char *source; const char *desc; }; struct tegra234_slave_lookup { const char *name; unsigned int offset; }; enum tegra234_cbb_fabric_ids { CBB_FAB_ID = 0, SCE_FAB_ID = 1, RCE_FAB_ID = 2, DCE_FAB_ID = 3, AON_FAB_ID = 4, PSC_FAB_ID = 5, BPMP_FAB_ID = 6, FSI_FAB_ID = 7, MAX_FAB_ID = 8, }; struct tegra234_cbb { struct tegra_cbb base; const struct tegra234_cbb_fabric *fabric; struct resource *res; void *regs; int num_intr; int sec_irq; void *mon; unsigned int type; u32 mask; u64 access; u32 mn_attr0; u32 mn_attr1; u32 mn_attr2; u32 mn_user_bits; }; typedef int (*config_clks_t)(struct device *, struct opp_table *, struct dev_pm_opp *, void *, bool); typedef int (*config_regulators_t)(struct device *, struct dev_pm_opp *, struct dev_pm_opp *, struct regulator **, unsigned int); struct dev_pm_opp_config { const char * const *clk_names; config_clks_t config_clks; const char *prop_name; config_regulators_t config_regulators; const unsigned int *supported_hw; unsigned int supported_hw_count; const char * const *regulator_names; const char * const *genpd_names; struct device ***virt_devs; }; enum tegra_suspend_mode { TEGRA_SUSPEND_NONE = 0, TEGRA_SUSPEND_LP2 = 1, TEGRA_SUSPEND_LP1 = 2, TEGRA_SUSPEND_LP0 = 3, TEGRA_MAX_SUSPEND_MODE = 4, TEGRA_SUSPEND_NOT_READY = 5, }; struct tegra_pmc_soc; struct tegra_pmc { struct device *dev; void *base; void *wake; void *aotag; void *scratch; struct clk *clk; const struct tegra_pmc_soc *soc; bool tz_only; unsigned long rate; enum tegra_suspend_mode suspend_mode; u32 cpu_good_time; u32 cpu_off_time; u32 core_osc_time; u32 core_pmu_time; u32 core_off_time; bool corereq_high; bool sysclkreq_high; bool combined_req; bool cpu_pwr_good_en; u32 lp0_vec_phys; u32 lp0_vec_size; unsigned long powergates_available[1]; struct mutex powergates_lock; struct pinctrl_dev *pctl_dev; struct irq_domain *domain; struct irq_chip irq; struct notifier_block clk_nb; bool core_domain_state_synced; bool core_domain_registered; unsigned long *wake_type_level_map; unsigned long *wake_type_dual_edge_map; unsigned long *wake_sw_status_map; unsigned long *wake_cntrl_level_map; struct syscore_ops syscore; }; struct tegra_io_pad_soc; struct tegra_pmc_regs; struct tegra_wake_event; struct pmc_clk_init_data; struct tegra_pmc_soc { unsigned int num_powergates; const char * const *powergates; unsigned int num_cpu_powergates; const u8 *cpu_powergates; bool has_tsense_reset; bool has_gpu_clamps; bool needs_mbist_war; bool has_impl_33v_pwr; bool maybe_tz_only; const struct tegra_io_pad_soc *io_pads; unsigned int num_io_pads; const struct pinctrl_pin_desc *pin_descs; unsigned int num_pin_descs; const struct tegra_pmc_regs *regs; void (*init)(struct tegra_pmc *); void (*setup_irq_polarity)(struct tegra_pmc *, struct device_node *, bool); void (*set_wake_filters)(struct tegra_pmc *); int (*irq_set_wake)(struct irq_data *, unsigned int); int (*irq_set_type)(struct irq_data *, unsigned int); int (*powergate_set)(struct tegra_pmc *, unsigned int, bool); const char * const *reset_sources; unsigned int num_reset_sources; const char * const *reset_levels; unsigned int num_reset_levels; const struct tegra_wake_event *wake_events; unsigned int num_wake_events; unsigned int max_wake_events; unsigned int max_wake_vectors; const struct pmc_clk_init_data *pmc_clks_data; unsigned int num_pmc_clks; bool has_blink_output; bool has_usb_sleepwalk; bool supports_core_domain; }; enum tegra_io_pad { TEGRA_IO_PAD_AUDIO = 0, TEGRA_IO_PAD_AUDIO_HV = 1, TEGRA_IO_PAD_BB = 2, TEGRA_IO_PAD_CAM = 3, TEGRA_IO_PAD_COMP = 4, TEGRA_IO_PAD_CONN = 5, TEGRA_IO_PAD_CSIA = 6, TEGRA_IO_PAD_CSIB = 7, TEGRA_IO_PAD_CSIC = 8, TEGRA_IO_PAD_CSID = 9, TEGRA_IO_PAD_CSIE = 10, TEGRA_IO_PAD_CSIF = 11, TEGRA_IO_PAD_CSIG = 12, TEGRA_IO_PAD_CSIH = 13, TEGRA_IO_PAD_DAP3 = 14, TEGRA_IO_PAD_DAP5 = 15, TEGRA_IO_PAD_DBG = 16, TEGRA_IO_PAD_DEBUG_NONAO = 17, TEGRA_IO_PAD_DMIC = 18, TEGRA_IO_PAD_DMIC_HV = 19, TEGRA_IO_PAD_DP = 20, TEGRA_IO_PAD_DSI = 21, TEGRA_IO_PAD_DSIB = 22, TEGRA_IO_PAD_DSIC = 23, TEGRA_IO_PAD_DSID = 24, TEGRA_IO_PAD_EDP = 25, TEGRA_IO_PAD_EMMC = 26, TEGRA_IO_PAD_EMMC2 = 27, TEGRA_IO_PAD_EQOS = 28, TEGRA_IO_PAD_GPIO = 29, TEGRA_IO_PAD_GP_PWM2 = 30, TEGRA_IO_PAD_GP_PWM3 = 31, TEGRA_IO_PAD_HDMI = 32, TEGRA_IO_PAD_HDMI_DP0 = 33, TEGRA_IO_PAD_HDMI_DP1 = 34, TEGRA_IO_PAD_HDMI_DP2 = 35, TEGRA_IO_PAD_HDMI_DP3 = 36, TEGRA_IO_PAD_HSIC = 37, TEGRA_IO_PAD_HV = 38, TEGRA_IO_PAD_LVDS = 39, TEGRA_IO_PAD_MIPI_BIAS = 40, TEGRA_IO_PAD_NAND = 41, TEGRA_IO_PAD_PEX_BIAS = 42, TEGRA_IO_PAD_PEX_CLK_BIAS = 43, TEGRA_IO_PAD_PEX_CLK1 = 44, TEGRA_IO_PAD_PEX_CLK2 = 45, TEGRA_IO_PAD_PEX_CLK3 = 46, TEGRA_IO_PAD_PEX_CLK_2_BIAS = 47, TEGRA_IO_PAD_PEX_CLK_2 = 48, TEGRA_IO_PAD_PEX_CNTRL = 49, TEGRA_IO_PAD_PEX_CTL2 = 50, TEGRA_IO_PAD_PEX_L0_RST = 51, TEGRA_IO_PAD_PEX_L1_RST = 52, TEGRA_IO_PAD_PEX_L5_RST = 53, TEGRA_IO_PAD_PWR_CTL = 54, TEGRA_IO_PAD_SDMMC1 = 55, TEGRA_IO_PAD_SDMMC1_HV = 56, TEGRA_IO_PAD_SDMMC2 = 57, TEGRA_IO_PAD_SDMMC2_HV = 58, TEGRA_IO_PAD_SDMMC3 = 59, TEGRA_IO_PAD_SDMMC3_HV = 60, TEGRA_IO_PAD_SDMMC4 = 61, TEGRA_IO_PAD_SOC_GPIO10 = 62, TEGRA_IO_PAD_SOC_GPIO12 = 63, TEGRA_IO_PAD_SOC_GPIO13 = 64, TEGRA_IO_PAD_SOC_GPIO53 = 65, TEGRA_IO_PAD_SPI = 66, TEGRA_IO_PAD_SPI_HV = 67, TEGRA_IO_PAD_SYS_DDC = 68, TEGRA_IO_PAD_UART = 69, TEGRA_IO_PAD_UART4 = 70, TEGRA_IO_PAD_UART5 = 71, TEGRA_IO_PAD_UFS = 72, TEGRA_IO_PAD_USB0 = 73, TEGRA_IO_PAD_USB1 = 74, TEGRA_IO_PAD_USB2 = 75, TEGRA_IO_PAD_USB3 = 76, TEGRA_IO_PAD_USB_BIAS = 77, TEGRA_IO_PAD_AO_HV = 78, }; struct tegra_io_pad_soc { enum tegra_io_pad id; unsigned int dpd; unsigned int request; unsigned int status; unsigned int voltage; const char *name; }; struct tegra_pmc_regs { unsigned int scratch0; unsigned int rst_status; unsigned int rst_source_shift; unsigned int rst_source_mask; unsigned int rst_level_shift; unsigned int rst_level_mask; }; struct tegra_wake_event { const char *name; unsigned int id; unsigned int irq; struct { unsigned int instance; unsigned int pin; } gpio; }; struct pmc_clk_init_data { char *name; const char * const *parents; int num_parents; int clk_id; u8 mux_shift; u8 force_en_shift; }; struct tegra_powergate { struct generic_pm_domain genpd; struct tegra_pmc *pmc; unsigned int id; struct clk **clks; unsigned int num_clks; unsigned long *clk_rates; struct reset_control *reset; }; struct pmc_clk { struct clk_hw hw; unsigned long offs; u32 mux_shift; u32 force_en_shift; }; struct pmc_clk_gate { struct clk_hw hw; unsigned long offs; u32 shift; }; struct arm_smccc_quirk { int id; union { unsigned long a6; } state; }; typedef int (*regmap_hw_write)(void *, const void *, size_t); typedef int (*regmap_hw_gather_write)(void *, const void *, size_t, const void *, size_t); struct regmap_async; typedef int (*regmap_hw_async_write)(void *, const void *, size_t, const void *, size_t, struct regmap_async *); typedef int (*regmap_hw_reg_write)(void *, unsigned int, unsigned int); typedef int (*regmap_hw_reg_noinc_write)(void *, unsigned int, const void *, size_t); typedef int (*regmap_hw_reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); typedef int (*regmap_hw_read)(void *, const void *, size_t, void *, size_t); typedef int (*regmap_hw_reg_read)(void *, unsigned int, unsigned int *); typedef int (*regmap_hw_reg_noinc_read)(void *, unsigned int, void *, size_t); typedef void (*regmap_hw_free_context)(void *); typedef struct regmap_async * (*regmap_hw_async_alloc)(); struct regmap_bus { bool fast_io; bool free_on_exit; regmap_hw_write write; regmap_hw_gather_write gather_write; regmap_hw_async_write async_write; regmap_hw_reg_write reg_write; regmap_hw_reg_noinc_write reg_noinc_write; regmap_hw_reg_update_bits reg_update_bits; regmap_hw_read read; regmap_hw_reg_read reg_read; regmap_hw_reg_noinc_read reg_noinc_read; regmap_hw_free_context free_context; regmap_hw_async_alloc async_alloc; u8 read_flag_mask; enum regmap_endian reg_format_endian_default; enum regmap_endian val_format_endian_default; size_t max_raw_read; size_t max_raw_write; u64 android_kabi_reserved1; }; struct scmi_device; struct scmi_device_id; struct scmi_driver { const char *name; int (*probe)(struct scmi_device *); void (*remove)(struct scmi_device *); const struct scmi_device_id *id_table; struct device_driver driver; }; struct scmi_handle; struct scmi_device { u32 id; u8 protocol_id; const char *name; struct device dev; struct scmi_handle *handle; u64 android_kabi_reserved1; }; struct scmi_revision_info; struct scmi_protocol_handle; struct scmi_notify_ops; struct scmi_handle { struct device *dev; struct scmi_revision_info *version; int (*devm_protocol_acquire)(struct scmi_device *, u8); const void * (*devm_protocol_get)(struct scmi_device *, u8, struct scmi_protocol_handle **); void (*devm_protocol_put)(struct scmi_device *, u8); bool (*is_transport_atomic)(const struct scmi_handle *, unsigned int *); const struct scmi_notify_ops *notify_ops; u64 android_kabi_reserved1; }; struct scmi_revision_info { u16 major_ver; u16 minor_ver; u8 num_protocols; u8 num_agents; u32 impl_ver; char vendor_id[16]; char sub_vendor_id[16]; }; struct scmi_notify_ops { int (*devm_event_notifier_register)(struct scmi_device *, u8, u8, const u32 *, struct notifier_block *); int (*devm_event_notifier_unregister)(struct scmi_device *, u8, u8, const u32 *, struct notifier_block *); int (*event_notifier_register)(const struct scmi_handle *, u8, u8, const u32 *, struct notifier_block *); int (*event_notifier_unregister)(const struct scmi_handle *, u8, u8, const u32 *, struct notifier_block *); }; struct scmi_device_id { u8 protocol_id; const char *name; }; enum scmi_power_scale { SCMI_POWER_BOGOWATTS = 0, SCMI_POWER_MILLIWATTS = 1, SCMI_POWER_MICROWATTS = 2, }; enum scmi_std_protocol { SCMI_PROTOCOL_BASE = 16, SCMI_PROTOCOL_POWER = 17, SCMI_PROTOCOL_SYSTEM = 18, SCMI_PROTOCOL_PERF = 19, SCMI_PROTOCOL_CLOCK = 20, SCMI_PROTOCOL_SENSOR = 21, SCMI_PROTOCOL_RESET = 22, SCMI_PROTOCOL_VOLTAGE = 23, SCMI_PROTOCOL_POWERCAP = 24, SCMI_PROTOCOL_PINCTRL = 25, }; struct scmi_perf_proto_ops; struct scmi_perf_domain_info; struct scmi_perf_domain { struct generic_pm_domain genpd; const struct scmi_perf_proto_ops *perf_ops; const struct scmi_protocol_handle *ph; const struct scmi_perf_domain_info *info; u32 domain_id; }; struct scmi_perf_proto_ops { int (*num_domains_get)(const struct scmi_protocol_handle *); const struct scmi_perf_domain_info * (*info_get)(const struct scmi_protocol_handle *, u32); int (*limits_set)(const struct scmi_protocol_handle *, u32, u32, u32); int (*limits_get)(const struct scmi_protocol_handle *, u32, u32 *, u32 *); int (*level_set)(const struct scmi_protocol_handle *, u32, u32, bool); int (*level_get)(const struct scmi_protocol_handle *, u32, u32 *, bool); int (*transition_latency_get)(const struct scmi_protocol_handle *, u32); int (*device_opps_add)(const struct scmi_protocol_handle *, struct device *, u32); int (*freq_set)(const struct scmi_protocol_handle *, u32, unsigned long, bool); int (*freq_get)(const struct scmi_protocol_handle *, u32, unsigned long *, bool); int (*est_power_get)(const struct scmi_protocol_handle *, u32, unsigned long *, unsigned long *); bool (*fast_switch_possible)(const struct scmi_protocol_handle *, u32); enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *); u64 android_kabi_reserved1; }; struct scmi_xfer_ops; struct scmi_proto_helpers_ops; struct scmi_protocol_handle { struct device *dev; const struct scmi_xfer_ops *xops; const struct scmi_proto_helpers_ops *hops; int (*set_priv)(const struct scmi_protocol_handle *, void *, u32); void * (*get_priv)(const struct scmi_protocol_handle *); }; struct scmi_perf_domain_info { char name[64]; bool set_perf; }; enum mrq_pg_cmd { CMD_PG_QUERY_ABI = 0, CMD_PG_SET_STATE = 1, CMD_PG_GET_STATE = 2, CMD_PG_GET_NAME = 3, CMD_PG_GET_MAX_ID = 4, }; enum pg_states { PG_STATE_OFF = 0, PG_STATE_ON = 1, PG_STATE_RUNNING = 2, }; struct tegra_powergate___2 { struct generic_pm_domain genpd; struct tegra_bpmp *bpmp; unsigned int id; }; struct cmd_pg_query_abi_request { uint32_t type; }; struct cmd_pg_set_state_request { uint32_t state; }; struct mrq_pg_request { uint32_t cmd; uint32_t id; union { struct cmd_pg_query_abi_request query_abi; struct cmd_pg_set_state_request set_state; }; }; struct cmd_pg_get_state_response { uint32_t state; }; struct cmd_pg_get_name_response { uint8_t name[40]; }; struct cmd_pg_get_max_id_response { uint32_t max_id; }; struct mrq_pg_response { union { struct cmd_pg_get_state_response get_state; struct cmd_pg_get_name_response get_name; struct cmd_pg_get_max_id_response get_max_id; }; }; struct tegra_powergate_info { unsigned int id; char *name; }; struct vring_desc; typedef struct vring_desc vring_desc_t; struct vring_avail; typedef struct vring_avail vring_avail_t; struct vring_used; typedef struct vring_used vring_used_t; struct vring { unsigned int num; vring_desc_t *desc; vring_avail_t *avail; vring_used_t *used; }; struct vring_desc_state_split; struct vring_desc_extra; struct vring_virtqueue_split { struct vring vring; u16 avail_flags_shadow; u16 avail_idx_shadow; struct vring_desc_state_split *desc_state; struct vring_desc_extra *desc_extra; dma_addr_t queue_dma_addr; size_t queue_size_in_bytes; u32 vring_align; bool may_reduce_num; }; struct vring_packed_desc; struct vring_packed_desc_event; struct vring_desc_state_packed; struct vring_virtqueue_packed { struct { unsigned int num; struct vring_packed_desc *desc; struct vring_packed_desc_event *driver; struct vring_packed_desc_event *device; } vring; bool avail_wrap_counter; u16 avail_used_flags; u16 next_avail_idx; u16 event_flags_shadow; struct vring_desc_state_packed *desc_state; struct vring_desc_extra *desc_extra; dma_addr_t ring_dma_addr; dma_addr_t driver_event_dma_addr; dma_addr_t device_event_dma_addr; size_t ring_size_in_bytes; size_t event_size_in_bytes; }; struct vring_virtqueue { struct virtqueue vq; bool packed_ring; bool use_dma_api; bool weak_barriers; bool broken; bool indirect; bool event; bool premapped; bool do_unmap; unsigned int free_head; unsigned int num_added; u16 last_used_idx; bool event_triggered; union { struct vring_virtqueue_split split; struct vring_virtqueue_packed packed; }; bool (*notify)(struct virtqueue *); bool we_own_ring; struct device *dma_dev; }; typedef __u64 __virtio64; typedef __u32 __virtio32; typedef __u16 __virtio16; struct vring_desc { __virtio64 addr; __virtio32 len; __virtio16 flags; __virtio16 next; }; struct vring_avail { __virtio16 flags; __virtio16 idx; __virtio16 ring[0]; }; struct vring_used_elem { __virtio32 id; __virtio32 len; }; typedef struct vring_used_elem vring_used_elem_t; struct vring_used { __virtio16 flags; __virtio16 idx; vring_used_elem_t ring[0]; }; struct vring_desc_state_split { void *data; struct vring_desc *indir_desc; }; struct vring_desc_extra { dma_addr_t addr; u32 len; u16 flags; u16 next; }; struct vring_packed_desc { __le64 addr; __le32 len; __le16 id; __le16 flags; }; struct vring_packed_desc_event { __le16 off_wrap; __le16 flags; }; struct vring_desc_state_packed { void *data; struct vring_packed_desc *indir_desc; u16 num; u16 last; }; typedef void (*btf_trace_regulator_enable)(void *, const char *); typedef void (*btf_trace_regulator_enable_delay)(void *, const char *); typedef void (*btf_trace_regulator_enable_complete)(void *, const char *); typedef void (*btf_trace_regulator_disable)(void *, const char *); typedef void (*btf_trace_regulator_disable_complete)(void *, const char *); typedef void (*btf_trace_regulator_bypass_enable)(void *, const char *); typedef void (*btf_trace_regulator_bypass_enable_complete)(void *, const char *); typedef void (*btf_trace_regulator_bypass_disable)(void *, const char *); typedef void (*btf_trace_regulator_bypass_disable_complete)(void *, const char *); typedef void (*btf_trace_regulator_set_voltage)(void *, const char *, int, int); typedef void (*btf_trace_regulator_set_voltage_complete)(void *, const char *, unsigned int); struct ww_class { atomic_long_t stamp; struct lock_class_key acquire_key; struct lock_class_key mutex_key; const char *acquire_name; const char *mutex_name; unsigned int is_wait_die; }; struct regulator_dev; struct regulator_coupler { struct list_head list; int (*attach_regulator)(struct regulator_coupler *, struct regulator_dev *); int (*detach_regulator)(struct regulator_coupler *, struct regulator_dev *); int (*balance_voltage)(struct regulator_coupler *, struct regulator_dev *, suspend_state_t); }; struct coupling_desc { struct regulator_dev **coupled_rdevs; struct regulator_coupler *coupler; int n_resolved; int n_coupled; }; struct regulator_desc; struct regulation_constraints; struct regulator_enable_gpio; struct regulator_dev { const struct regulator_desc *desc; int exclusive; u32 use_count; u32 open_count; u32 bypass_count; struct list_head list; struct list_head consumer_list; struct coupling_desc coupling_desc; struct blocking_notifier_head notifier; struct ww_mutex mutex; struct task_struct *mutex_owner; int ref_cnt; struct module *owner; struct device dev; struct regulation_constraints *constraints; struct regulator *supply; const char *supply_name; struct regmap *regmap; struct delayed_work disable_work; void *reg_data; struct dentry *debugfs; struct regulator_enable_gpio *ena_pin; unsigned int ena_gpio_state: 1; unsigned int is_switch: 1; ktime_t last_off; int cached_err; bool use_cached_err; spinlock_t err_lock; u64 android_kabi_reserved1; }; enum regulator_type { REGULATOR_VOLTAGE = 0, REGULATOR_CURRENT = 1, }; struct regulator_config; struct regulator_ops; struct regulator_desc { const char *name; const char *supply_name; const char *of_match; bool of_match_full_name; const char *regulators_node; int (*of_parse_cb)(struct device_node *, const struct regulator_desc *, struct regulator_config *); int id; unsigned int continuous_voltage_range: 1; unsigned int n_voltages; unsigned int n_current_limits; const struct regulator_ops *ops; int irq; enum regulator_type type; struct module *owner; unsigned int min_uV; unsigned int uV_step; unsigned int linear_min_sel; int fixed_uV; unsigned int ramp_delay; int min_dropout_uV; const struct linear_range *linear_ranges; const unsigned int *linear_range_selectors_bitfield; int n_linear_ranges; const unsigned int *volt_table; const unsigned int *curr_table; unsigned int vsel_range_reg; unsigned int vsel_range_mask; unsigned int vsel_reg; unsigned int vsel_mask; unsigned int vsel_step; unsigned int csel_reg; unsigned int csel_mask; unsigned int apply_reg; unsigned int apply_bit; unsigned int enable_reg; unsigned int enable_mask; unsigned int enable_val; unsigned int disable_val; bool enable_is_inverted; unsigned int bypass_reg; unsigned int bypass_mask; unsigned int bypass_val_on; unsigned int bypass_val_off; unsigned int active_discharge_on; unsigned int active_discharge_off; unsigned int active_discharge_mask; unsigned int active_discharge_reg; unsigned int soft_start_reg; unsigned int soft_start_mask; unsigned int soft_start_val_on; unsigned int pull_down_reg; unsigned int pull_down_mask; unsigned int pull_down_val_on; unsigned int ramp_reg; unsigned int ramp_mask; const unsigned int *ramp_delay_table; unsigned int n_ramp_values; unsigned int enable_time; unsigned int off_on_delay; unsigned int poll_enabled_time; unsigned int (*of_map_mode)(unsigned int); u64 android_kabi_reserved1; }; struct regulator_init_data; struct regulator_config { struct device *dev; const struct regulator_init_data *init_data; void *driver_data; struct device_node *of_node; struct regmap *regmap; struct gpio_desc *ena_gpiod; }; struct regulator_state { int uV; int min_uV; int max_uV; unsigned int mode; int enabled; bool changeable; }; struct notification_limit { int prot; int err; int warn; }; struct regulation_constraints { const char *name; int min_uV; int max_uV; int uV_offset; int min_uA; int max_uA; int ilim_uA; int system_load; u32 *max_spread; int max_uV_step; unsigned int valid_modes_mask; unsigned int valid_ops_mask; int input_uV; struct regulator_state state_disk; struct regulator_state state_mem; struct regulator_state state_standby; struct notification_limit over_curr_limits; struct notification_limit over_voltage_limits; struct notification_limit under_voltage_limits; struct notification_limit temp_limits; suspend_state_t initial_state; unsigned int initial_mode; unsigned int ramp_delay; unsigned int settling_time; unsigned int settling_time_up; unsigned int settling_time_down; unsigned int enable_time; unsigned int active_discharge; unsigned int always_on: 1; unsigned int boot_on: 1; unsigned int apply_uV: 1; unsigned int ramp_disable: 1; unsigned int soft_start: 1; unsigned int pull_down: 1; unsigned int over_current_protection: 1; unsigned int over_current_detection: 1; unsigned int over_voltage_detection: 1; unsigned int under_voltage_detection: 1; unsigned int over_temp_detection: 1; }; struct regulator_consumer_supply; struct regulator_init_data { const char *supply_regulator; struct regulation_constraints constraints; int num_consumer_supplies; struct regulator_consumer_supply *consumer_supplies; int (*regulator_init)(void *); void *driver_data; }; struct regulator_consumer_supply { const char *dev_name; const char *supply; }; struct regulator_ops { int (*list_voltage)(struct regulator_dev *, unsigned int); int (*set_voltage)(struct regulator_dev *, int, int, unsigned int *); int (*map_voltage)(struct regulator_dev *, int, int); int (*set_voltage_sel)(struct regulator_dev *, unsigned int); int (*get_voltage)(struct regulator_dev *); int (*get_voltage_sel)(struct regulator_dev *); int (*set_current_limit)(struct regulator_dev *, int, int); int (*get_current_limit)(struct regulator_dev *); int (*set_input_current_limit)(struct regulator_dev *, int); int (*set_over_current_protection)(struct regulator_dev *, int, int, bool); int (*set_over_voltage_protection)(struct regulator_dev *, int, int, bool); int (*set_under_voltage_protection)(struct regulator_dev *, int, int, bool); int (*set_thermal_protection)(struct regulator_dev *, int, int, bool); int (*set_active_discharge)(struct regulator_dev *, bool); int (*enable)(struct regulator_dev *); int (*disable)(struct regulator_dev *); int (*is_enabled)(struct regulator_dev *); int (*set_mode)(struct regulator_dev *, unsigned int); unsigned int (*get_mode)(struct regulator_dev *); int (*get_error_flags)(struct regulator_dev *, unsigned int *); int (*enable_time)(struct regulator_dev *); int (*set_ramp_delay)(struct regulator_dev *, int); int (*set_voltage_time)(struct regulator_dev *, int, int); int (*set_voltage_time_sel)(struct regulator_dev *, unsigned int, unsigned int); int (*set_soft_start)(struct regulator_dev *); int (*get_status)(struct regulator_dev *); unsigned int (*get_optimum_mode)(struct regulator_dev *, int, int, int); int (*set_load)(struct regulator_dev *, int); int (*set_bypass)(struct regulator_dev *, bool); int (*get_bypass)(struct regulator_dev *, bool *); int (*set_suspend_voltage)(struct regulator_dev *, int); int (*set_suspend_enable)(struct regulator_dev *); int (*set_suspend_disable)(struct regulator_dev *); int (*set_suspend_mode)(struct regulator_dev *, unsigned int); int (*resume)(struct regulator_dev *); int (*set_pull_down)(struct regulator_dev *); u64 android_kabi_reserved1; }; struct regulator_voltage { int min_uV; int max_uV; }; struct regulator { struct device *dev; struct list_head list; unsigned int always_on: 1; unsigned int bypass: 1; unsigned int device_link: 1; int uA_load; unsigned int enable_count; unsigned int deferred_disables; struct regulator_voltage voltage[5]; const char *supply_name; struct device_attribute dev_attr; struct regulator_dev *rdev; struct dentry *debugfs; }; struct regulator_enable_gpio { struct list_head list; struct gpio_desc *gpiod; u32 enable_count; u32 request_count; }; enum regulator_get_type { NORMAL_GET = 0, EXCLUSIVE_GET = 1, OPTIONAL_GET = 2, MAX_GET_TYPE = 3, }; enum regulator_status { REGULATOR_STATUS_OFF = 0, REGULATOR_STATUS_ON = 1, REGULATOR_STATUS_ERROR = 2, REGULATOR_STATUS_FAST = 3, REGULATOR_STATUS_NORMAL = 4, REGULATOR_STATUS_IDLE = 5, REGULATOR_STATUS_STANDBY = 6, REGULATOR_STATUS_BYPASS = 7, REGULATOR_STATUS_UNDEFINED = 8, }; enum regulator_detection_severity { REGULATOR_SEVERITY_PROT = 0, REGULATOR_SEVERITY_ERR = 1, REGULATOR_SEVERITY_WARN = 2, }; enum regulator_active_discharge { REGULATOR_ACTIVE_DISCHARGE_DEFAULT = 0, REGULATOR_ACTIVE_DISCHARGE_DISABLE = 1, REGULATOR_ACTIVE_DISCHARGE_ENABLE = 2, }; struct trace_event_raw_regulator_basic { struct trace_entry ent; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_regulator_range { struct trace_entry ent; u32 __data_loc_name; int min; int max; char __data[0]; }; struct trace_event_raw_regulator_value { struct trace_entry ent; u32 __data_loc_name; unsigned int val; char __data[0]; }; struct regulator_map { struct list_head list; const char *dev_name; const char *supply; struct regulator_dev *regulator; }; struct regulator_supply_alias { struct list_head list; struct device *src_dev; const char *src_supply; struct device *alias_dev; const char *alias_supply; }; struct trace_event_data_offsets_regulator_basic { u32 name; }; struct trace_event_data_offsets_regulator_range { u32 name; }; struct trace_event_data_offsets_regulator_value { u32 name; }; struct pre_voltage_change_data { unsigned long old_uV; unsigned long min_uV; unsigned long max_uV; }; struct summary_lock_data { struct ww_acquire_ctx *ww_ctx; struct regulator_dev **new_contended_rdev; struct regulator_dev **old_contended_rdev; }; struct summary_data { struct seq_file *s; struct regulator_dev *parent; int level; }; struct fixed_voltage_config { const char *supply_name; const char *input_supply; int microvolts; unsigned int startup_delay; unsigned int off_on_delay; unsigned int enabled_at_boot: 1; struct regulator_init_data *init_data; }; struct fixed_regulator_data { struct fixed_voltage_config cfg; struct regulator_init_data init_data; struct platform_device pdev; }; struct regulator_bulk_devres { struct regulator_bulk_data *consumers; int num_consumers; }; struct regulator_supply_alias_match { struct device *dev; const char *id; }; struct regulator_irq_data; struct regulator_irq_desc { const char *name; int fatal_cnt; int reread_ms; int irq_off_ms; bool skip_off; bool high_prio; void *data; int (*die)(struct regulator_irq_data *); int (*map_event)(int, struct regulator_irq_data *, unsigned long *); int (*renable)(struct regulator_irq_data *); }; struct regulator_err_state; struct regulator_irq_data { struct regulator_err_state *states; int num_states; void *data; long opaque; }; struct regulator_err_state { struct regulator_dev *rdev; unsigned long notifs; unsigned long errors; int possible_errs; }; struct regulator_notifier_match { struct regulator *regulator; struct notifier_block *nb; }; enum { REGULATOR_ERROR_CLEARED = 0, REGULATOR_FAILED_RETRY = 1, REGULATOR_ERROR_ON = 2, }; struct regulator_irq { struct regulator_irq_data rdata; struct regulator_irq_desc desc; int irq; int retry_cnt; struct delayed_work isr_work; }; struct of_regulator_match { const char *name; void *driver_data; struct regulator_init_data *init_data; struct device_node *of_node; const struct regulator_desc *desc; }; struct devm_of_regulator_matches { struct of_regulator_match *matches; unsigned int num_matches; }; struct fixed_dev_type { bool has_enable_clock; bool has_performance_state; }; struct fixed_voltage_data { struct regulator_desc desc; struct regulator_dev *dev; struct clk *enable_clock; unsigned int enable_counter; int performance_state; }; struct reset_control { struct reset_controller_dev *rcdev; struct list_head list; unsigned int id; struct kref refcnt; bool acquired; bool shared; bool array; atomic_t deassert_count; atomic_t triggered_count; }; struct reset_control_array { struct reset_control base; unsigned int num_rstcs; struct reset_control *rstc[0]; }; struct reset_control_lookup { struct list_head list; const char *provider; unsigned int index; const char *dev_id; const char *con_id; }; struct reset_control_bulk_devres { int num_rstcs; struct reset_control_bulk_data *rstcs; }; enum hi6220_reset_ctrl_type { PERIPHERAL = 0, MEDIA = 1, AO = 2, }; struct hi6220_reset_data { struct reset_controller_dev rc_dev; struct regmap *regmap; }; struct hi3660_reset_controller { struct reset_controller_dev rst; struct regmap *map; }; enum mrq_reset_commands { CMD_RESET_ASSERT = 1, CMD_RESET_DEASSERT = 2, CMD_RESET_MODULE = 3, CMD_RESET_GET_MAX_ID = 4, CMD_RESET_MAX = 5, }; struct mrq_reset_request { uint32_t cmd; uint32_t reset_id; }; struct scmi_reset_proto_ops { int (*num_domains_get)(const struct scmi_protocol_handle *); const char * (*name_get)(const struct scmi_protocol_handle *, u32); int (*latency_get)(const struct scmi_protocol_handle *, u32); int (*reset)(const struct scmi_protocol_handle *, u32); int (*assert)(const struct scmi_protocol_handle *, u32); int (*deassert)(const struct scmi_protocol_handle *, u32); u64 android_kabi_reserved1; }; struct scmi_reset_data { struct reset_controller_dev rcdev; const struct scmi_protocol_handle *ph; }; struct reset_simple_devdata { u32 reg_offset; u32 nr_resets; bool active_low; bool status_active_low; }; struct reset_simple_data { spinlock_t lock; void *membase; struct reset_controller_dev rcdev; bool active_low; bool status_active_low; unsigned int reset_us; }; struct serial_icounter_struct { int cts; int dsr; int rng; int dcd; int rx; int tx; int frame; int overrun; int parity; int brk; int buf_overrun; int reserved[9]; }; struct serial_struct { int type; int line; unsigned int port; int irq; int flags; int xmit_fifo_size; int custom_divisor; int baud_base; unsigned short close_delay; char io_type; char reserved_char[1]; int hub6; unsigned short closing_wait; unsigned short closing_wait2; unsigned char *iomem_base; unsigned short iomem_reg_shift; unsigned int port_high; unsigned long iomap_base; }; struct serial_struct32 { compat_int_t type; compat_int_t line; compat_uint_t port; compat_int_t irq; compat_int_t flags; compat_int_t xmit_fifo_size; compat_int_t custom_divisor; compat_int_t baud_base; unsigned short close_delay; char io_type; char reserved_char; compat_int_t hub6; unsigned short closing_wait; unsigned short closing_wait2; compat_uint_t iomem_base; unsigned short iomem_reg_shift; unsigned int port_high; compat_int_t reserved; }; enum { ERASE = 0, WERASE = 1, KILL = 2, }; struct n_tty_data { size_t read_head; size_t commit_head; size_t canon_head; size_t echo_head; size_t echo_commit; size_t echo_mark; unsigned long char_map[4]; unsigned long overrun_time; unsigned int num_overrun; bool no_room; unsigned char lnext: 1; unsigned char erasing: 1; unsigned char raw: 1; unsigned char real_raw: 1; unsigned char icanon: 1; unsigned char push: 1; u8 read_buf[4096]; unsigned long read_flags[64]; u8 echo_buf[4096]; size_t read_tail; size_t line_start; size_t lookahead_count; unsigned int column; unsigned int canon_column; size_t echo_tail; struct mutex atomic_read_lock; struct mutex output_lock; }; struct termios { tcflag_t c_iflag; tcflag_t c_oflag; tcflag_t c_cflag; tcflag_t c_lflag; cc_t c_line; cc_t c_cc[19]; }; struct termios2 { tcflag_t c_iflag; tcflag_t c_oflag; tcflag_t c_cflag; tcflag_t c_lflag; cc_t c_line; cc_t c_cc[19]; speed_t c_ispeed; speed_t c_ospeed; }; struct termio { unsigned short c_iflag; unsigned short c_oflag; unsigned short c_cflag; unsigned short c_lflag; unsigned char c_line; unsigned char c_cc[8]; }; struct ldsem_waiter { struct list_head list; struct task_struct *task; }; struct tty_audit_buf { struct mutex mutex; dev_t dev; bool icanon; size_t valid; u8 *data; }; struct input_handle; struct input_value; struct input_dev; struct input_device_id; struct input_handler { void *private; void (*event)(struct input_handle *, unsigned int, unsigned int, int); void (*events)(struct input_handle *, const struct input_value *, unsigned int); bool (*filter)(struct input_handle *, unsigned int, unsigned int, int); bool (*match)(struct input_handler *, struct input_dev *); int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *); void (*disconnect)(struct input_handle *); void (*start)(struct input_handle *); bool legacy_minors; int minor; const char *name; const struct input_device_id *id_table; struct list_head h_list; struct list_head node; u64 android_kabi_reserved1; }; struct input_handle { void *private; int open; const char *name; struct input_dev *dev; struct input_handler *handler; struct list_head d_node; struct list_head h_node; u64 android_kabi_reserved1; }; struct input_id { __u16 bustype; __u16 vendor; __u16 product; __u16 version; }; struct input_keymap_entry; struct ff_device; struct input_dev_poller; struct input_mt; struct input_absinfo; struct input_dev { const char *name; const char *phys; const char *uniq; struct input_id id; unsigned long propbit[1]; unsigned long evbit[1]; unsigned long keybit[12]; unsigned long relbit[1]; unsigned long absbit[1]; unsigned long mscbit[1]; unsigned long ledbit[1]; unsigned long sndbit[1]; unsigned long ffbit[2]; unsigned long swbit[1]; unsigned int hint_events_per_packet; unsigned int keycodemax; unsigned int keycodesize; void *keycode; int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *); int (*getkeycode)(struct input_dev *, struct input_keymap_entry *); struct ff_device *ff; struct input_dev_poller *poller; unsigned int repeat_key; struct timer_list timer; int rep[2]; struct input_mt *mt; struct input_absinfo *absinfo; unsigned long key[12]; unsigned long led[1]; unsigned long snd[1]; unsigned long sw[1]; int (*open)(struct input_dev *); void (*close)(struct input_dev *); int (*flush)(struct input_dev *, struct file *); int (*event)(struct input_dev *, unsigned int, unsigned int, int); struct input_handle __attribute__((btf_type_tag("rcu"))) *grab; spinlock_t event_lock; struct mutex mutex; unsigned int users; bool going_away; struct device dev; struct list_head h_list; struct list_head node; unsigned int num_vals; unsigned int max_vals; struct input_value *vals; bool devres_managed; ktime_t timestamp[3]; bool inhibited; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct input_keymap_entry { __u8 flags; __u8 len; __u16 index; __u32 keycode; __u8 scancode[32]; }; struct ff_effect; struct ff_device { int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *); int (*erase)(struct input_dev *, int); int (*playback)(struct input_dev *, int, int); void (*set_gain)(struct input_dev *, u16); void (*set_autocenter)(struct input_dev *, u16); void (*destroy)(struct ff_device *); void *private; unsigned long ffbit[2]; struct mutex mutex; int max_effects; struct ff_effect *effects; u64 android_kabi_reserved1; struct file *effect_owners[0]; }; struct ff_envelope { __u16 attack_length; __u16 attack_level; __u16 fade_length; __u16 fade_level; }; struct ff_constant_effect { __s16 level; struct ff_envelope envelope; }; struct ff_ramp_effect { __s16 start_level; __s16 end_level; struct ff_envelope envelope; }; struct ff_periodic_effect { __u16 waveform; __u16 period; __s16 magnitude; __s16 offset; __u16 phase; struct ff_envelope envelope; __u32 custom_len; __s16 __attribute__((btf_type_tag("user"))) *custom_data; }; struct ff_condition_effect { __u16 right_saturation; __u16 left_saturation; __s16 right_coeff; __s16 left_coeff; __u16 deadband; __s16 center; }; struct ff_rumble_effect { __u16 strong_magnitude; __u16 weak_magnitude; }; struct ff_trigger { __u16 button; __u16 interval; }; struct ff_replay { __u16 length; __u16 delay; }; struct ff_effect { __u16 type; __s16 id; __u16 direction; struct ff_trigger trigger; struct ff_replay replay; union { struct ff_constant_effect constant; struct ff_ramp_effect ramp; struct ff_periodic_effect periodic; struct ff_condition_effect condition[2]; struct ff_rumble_effect rumble; } u; }; struct input_absinfo { __s32 value; __s32 minimum; __s32 maximum; __s32 fuzz; __s32 flat; __s32 resolution; }; struct input_value { __u16 type; __u16 code; __s32 value; }; struct input_device_id { kernel_ulong_t flags; __u16 bustype; __u16 vendor; __u16 product; __u16 version; kernel_ulong_t evbit[1]; kernel_ulong_t keybit[12]; kernel_ulong_t relbit[1]; kernel_ulong_t absbit[1]; kernel_ulong_t mscbit[1]; kernel_ulong_t ledbit[1]; kernel_ulong_t sndbit[1]; kernel_ulong_t ffbit[2]; kernel_ulong_t swbit[1]; kernel_ulong_t propbit[1]; kernel_ulong_t driver_info; }; struct sysrq_state { struct input_handle handle; struct work_struct reinject_work; unsigned long key_down[12]; unsigned int alt; unsigned int alt_use; unsigned int shift; unsigned int shift_use; bool active; bool need_reinject; bool reinjecting; bool reset_canceled; bool reset_requested; unsigned long reset_keybit[12]; int reset_seq_len; int reset_seq_cnt; int reset_seq_version; struct timer_list keyreset_timer; }; struct earlycon_device; struct earlycon_id { char name[15]; char name_term; char compatible[128]; int (*setup)(struct earlycon_device *, const char *); }; struct uart_icount { __u32 cts; __u32 dsr; __u32 rng; __u32 dcd; __u32 rx; __u32 tx; __u32 frame; __u32 overrun; __u32 parity; __u32 brk; __u32 buf_overrun; }; typedef u64 upf_t; typedef unsigned int upstat_t; struct serial_rs485 { __u32 flags; __u32 delay_rts_before_send; __u32 delay_rts_after_send; union { __u32 padding[5]; struct { __u8 addr_recv; __u8 addr_dest; __u8 padding0[2]; __u32 padding1[4]; }; }; }; struct serial_iso7816 { __u32 flags; __u32 tg; __u32 sc_fi; __u32 sc_di; __u32 clk; __u32 reserved[5]; }; struct uart_state; struct uart_ops; struct serial_port_device; struct uart_port { spinlock_t lock; unsigned long iobase; unsigned char *membase; unsigned int (*serial_in)(struct uart_port *, int); void (*serial_out)(struct uart_port *, int, int); void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); void (*set_ldisc)(struct uart_port *, struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); unsigned int (*get_divisor)(struct uart_port *, unsigned int, unsigned int *); void (*set_divisor)(struct uart_port *, unsigned int, unsigned int, unsigned int); int (*startup)(struct uart_port *); void (*shutdown)(struct uart_port *); void (*throttle)(struct uart_port *); void (*unthrottle)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int, unsigned int); void (*handle_break)(struct uart_port *); int (*rs485_config)(struct uart_port *, struct ktermios *, struct serial_rs485 *); int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *); unsigned int ctrl_id; unsigned int port_id; unsigned int irq; unsigned long irqflags; unsigned int uartclk; unsigned int fifosize; unsigned char x_char; unsigned char regshift; unsigned char iotype; unsigned char quirks; unsigned int read_status_mask; unsigned int ignore_status_mask; struct uart_state *state; struct uart_icount icount; struct console *cons; upf_t flags; upstat_t status; bool hw_stopped; unsigned int mctrl; unsigned int frame_time; unsigned int type; const struct uart_ops *ops; unsigned int custom_divisor; unsigned int line; unsigned int minor; resource_size_t mapbase; resource_size_t mapsize; struct device *dev; struct serial_port_device *port_dev; unsigned long sysrq; u8 sysrq_ch; unsigned char has_sysrq; unsigned char sysrq_seq; unsigned char hub6; unsigned char suspended; unsigned char console_reinit; const char *name; struct attribute_group *attr_group; const struct attribute_group **tty_groups; struct serial_rs485 rs485; struct serial_rs485 rs485_supported; struct gpio_desc *rs485_term_gpio; struct gpio_desc *rs485_rx_during_tx_gpio; struct serial_iso7816 iso7816; void *private_data; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct earlycon_device { struct console *con; struct uart_port port; char options[32]; unsigned int baud; }; enum uart_pm_state { UART_PM_STATE_ON = 0, UART_PM_STATE_OFF = 3, UART_PM_STATE_UNDEFINED = 4, }; struct circ_buf { char *buf; int head; int tail; }; struct uart_state { struct tty_port port; enum uart_pm_state pm_state; struct circ_buf xmit; atomic_t refcount; wait_queue_head_t remove_wait; struct uart_port *uart_port; }; struct uart_ops { unsigned int (*tx_empty)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); unsigned int (*get_mctrl)(struct uart_port *); void (*stop_tx)(struct uart_port *); void (*start_tx)(struct uart_port *); void (*throttle)(struct uart_port *); void (*unthrottle)(struct uart_port *); void (*send_xchar)(struct uart_port *, char); void (*stop_rx)(struct uart_port *); void (*start_rx)(struct uart_port *); void (*enable_ms)(struct uart_port *); void (*break_ctl)(struct uart_port *, int); int (*startup)(struct uart_port *); void (*shutdown)(struct uart_port *); void (*flush_buffer)(struct uart_port *); void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); void (*set_ldisc)(struct uart_port *, struct ktermios *); void (*pm)(struct uart_port *, unsigned int, unsigned int); const char * (*type)(struct uart_port *); void (*release_port)(struct uart_port *); int (*request_port)(struct uart_port *); void (*config_port)(struct uart_port *, int); int (*verify_port)(struct uart_port *, struct serial_struct *); int (*ioctl)(struct uart_port *, unsigned int, unsigned long); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct hvc_struct; struct hv_ops { int (*get_chars)(uint32_t, char *, int); int (*put_chars)(uint32_t, const char *, int); int (*flush)(uint32_t, bool); int (*notifier_add)(struct hvc_struct *, int); void (*notifier_del)(struct hvc_struct *, int); void (*notifier_hangup)(struct hvc_struct *, int); int (*tiocmget)(struct hvc_struct *); int (*tiocmset)(struct hvc_struct *, unsigned int, unsigned int); void (*dtr_rts)(struct hvc_struct *, bool); }; struct hvc_struct { struct tty_port port; spinlock_t lock; int index; int do_wakeup; char *outbuf; int outbuf_size; int n_outbuf; uint32_t vtermno; const struct hv_ops *ops; int irq_requested; int data; struct winsize ws; struct work_struct tty_resize; struct list_head next; unsigned long flags; }; struct serial_port_device { struct device dev; struct uart_port *port; unsigned int tx_enabled: 1; }; struct serial_ctrl_device { struct device dev; struct ida port_ida; }; struct uart_driver { struct module *owner; const char *driver_name; const char *dev_name; int major; int minor; int nr; struct console *cons; struct uart_state *state; struct tty_driver *tty_driver; u64 android_kabi_reserved1; }; struct uart_match { struct uart_port *port; struct uart_driver *driver; }; struct mctrl_gpios; struct uart_8250_dma; struct uart_8250_ops; struct uart_8250_em485; struct uart_8250_port { struct uart_port port; struct timer_list timer; struct list_head list; u32 capabilities; u16 bugs; unsigned int tx_loadsz; unsigned char acr; unsigned char fcr; unsigned char ier; unsigned char lcr; unsigned char mcr; unsigned char cur_iotype; unsigned int rpm_tx_active; unsigned char canary; unsigned char probe; struct mctrl_gpios *gpios; u16 lsr_saved_flags; u16 lsr_save_mask; unsigned char msr_saved_flags; struct uart_8250_dma *dma; const struct uart_8250_ops *ops; u32 (*dl_read)(struct uart_8250_port *); void (*dl_write)(struct uart_8250_port *, u32); struct uart_8250_em485 *em485; void (*rs485_start_tx)(struct uart_8250_port *); void (*rs485_stop_tx)(struct uart_8250_port *); struct delayed_work overrun_backoff; u32 overrun_backoff_time_ms; }; struct uart_8250_dma { int (*tx_dma)(struct uart_8250_port *); int (*rx_dma)(struct uart_8250_port *); void (*prepare_tx_dma)(struct uart_8250_port *); void (*prepare_rx_dma)(struct uart_8250_port *); dma_filter_fn fn; void *rx_param; void *tx_param; struct dma_slave_config rxconf; struct dma_slave_config txconf; struct dma_chan *rxchan; struct dma_chan *txchan; phys_addr_t rx_dma_addr; phys_addr_t tx_dma_addr; dma_addr_t rx_addr; dma_addr_t tx_addr; dma_cookie_t rx_cookie; dma_cookie_t tx_cookie; void *rx_buf; size_t rx_size; size_t tx_size; unsigned char tx_running; unsigned char tx_err; unsigned char rx_running; }; struct uart_8250_ops { int (*setup_irq)(struct uart_8250_port *); void (*release_irq)(struct uart_8250_port *); void (*setup_timer)(struct uart_8250_port *); }; struct uart_8250_em485 { struct hrtimer start_tx_timer; struct hrtimer stop_tx_timer; struct hrtimer *active_timer; struct uart_8250_port *port; unsigned int tx_stopped: 1; }; struct old_serial_port { unsigned int uart; unsigned int baud_base; unsigned int port; unsigned int irq; upf_t flags; unsigned char io_type; unsigned char *iomem_base; unsigned short iomem_reg_shift; }; enum { PLAT8250_DEV_LEGACY = -1, PLAT8250_DEV_PLATFORM = 0, PLAT8250_DEV_PLATFORM1 = 1, PLAT8250_DEV_PLATFORM2 = 2, PLAT8250_DEV_FOURPORT = 3, PLAT8250_DEV_ACCENT = 4, PLAT8250_DEV_BOCA = 5, PLAT8250_DEV_EXAR_ST16C554 = 6, PLAT8250_DEV_HUB6 = 7, PLAT8250_DEV_AU1X00 = 8, PLAT8250_DEV_SM501 = 9, }; struct irq_info { struct hlist_node node; int irq; spinlock_t lock; struct list_head *head; }; struct plat_serial8250_port { unsigned long iobase; void *membase; resource_size_t mapbase; resource_size_t mapsize; unsigned int uartclk; unsigned int irq; unsigned long irqflags; void *private_data; unsigned char regshift; unsigned char iotype; unsigned char hub6; unsigned char has_sysrq; unsigned int type; upf_t flags; u16 bugs; unsigned int (*serial_in)(struct uart_port *, int); void (*serial_out)(struct uart_port *, int, int); u32 (*dl_read)(struct uart_8250_port *); void (*dl_write)(struct uart_8250_port *, u32); void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); void (*set_ldisc)(struct uart_port *, struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int, unsigned int); void (*handle_break)(struct uart_port *); }; struct serial8250_config { const char *name; unsigned short fifo_size; unsigned short tx_loadsz; unsigned char fcr; unsigned char rxtrig_bytes[4]; unsigned int flags; }; struct dw8250_port_data { int line; struct uart_8250_dma dma; u8 dlf_size; bool hw_rs485_support; }; struct dw8250_platform_data; struct dw8250_data { struct dw8250_port_data data; const struct dw8250_platform_data *pdata; int msr_mask_on; int msr_mask_off; struct clk *clk; struct clk *pclk; struct notifier_block clk_notifier; struct work_struct clk_work; struct reset_control *rst; unsigned int skip_autocfg: 1; unsigned int uart_16550_compatible: 1; }; struct dw8250_platform_data { u8 usr_reg; u32 cpr_val; unsigned int quirks; }; struct serial_private; struct pciserial_board; struct pci_serial_quirk { u32 vendor; u32 device; u32 subvendor; u32 subdevice; int (*probe)(struct pci_dev *); int (*init)(struct pci_dev *); int (*setup)(struct serial_private *, const struct pciserial_board *, struct uart_8250_port *, int); void (*exit)(struct pci_dev *); }; struct serial_private { struct pci_dev *dev; unsigned int nr; struct pci_serial_quirk *quirk; const struct pciserial_board *board; int line[0]; }; struct pciserial_board { unsigned int flags; unsigned int num_ports; unsigned int base_baud; unsigned int uart_offset; unsigned int reg_shift; unsigned int first_offset; }; struct timedia_struct { int num; const unsigned short *ids; }; enum pci_board_num_t { pbn_default = 0, pbn_b0_1_115200 = 1, pbn_b0_2_115200 = 2, pbn_b0_4_115200 = 3, pbn_b0_5_115200 = 4, pbn_b0_8_115200 = 5, pbn_b0_1_921600 = 6, pbn_b0_2_921600 = 7, pbn_b0_4_921600 = 8, pbn_b0_2_1130000 = 9, pbn_b0_4_1152000 = 10, pbn_b0_4_1250000 = 11, pbn_b0_2_1843200 = 12, pbn_b0_4_1843200 = 13, pbn_b0_1_15625000 = 14, pbn_b0_bt_1_115200 = 15, pbn_b0_bt_2_115200 = 16, pbn_b0_bt_4_115200 = 17, pbn_b0_bt_8_115200 = 18, pbn_b0_bt_1_460800 = 19, pbn_b0_bt_2_460800 = 20, pbn_b0_bt_4_460800 = 21, pbn_b0_bt_1_921600 = 22, pbn_b0_bt_2_921600 = 23, pbn_b0_bt_4_921600 = 24, pbn_b0_bt_8_921600 = 25, pbn_b1_1_115200 = 26, pbn_b1_2_115200 = 27, pbn_b1_4_115200 = 28, pbn_b1_8_115200 = 29, pbn_b1_16_115200 = 30, pbn_b1_1_921600 = 31, pbn_b1_2_921600 = 32, pbn_b1_4_921600 = 33, pbn_b1_8_921600 = 34, pbn_b1_2_1250000 = 35, pbn_b1_bt_1_115200 = 36, pbn_b1_bt_2_115200 = 37, pbn_b1_bt_4_115200 = 38, pbn_b1_bt_2_921600 = 39, pbn_b1_1_1382400 = 40, pbn_b1_2_1382400 = 41, pbn_b1_4_1382400 = 42, pbn_b1_8_1382400 = 43, pbn_b2_1_115200 = 44, pbn_b2_2_115200 = 45, pbn_b2_4_115200 = 46, pbn_b2_8_115200 = 47, pbn_b2_1_460800 = 48, pbn_b2_4_460800 = 49, pbn_b2_8_460800 = 50, pbn_b2_16_460800 = 51, pbn_b2_1_921600 = 52, pbn_b2_4_921600 = 53, pbn_b2_8_921600 = 54, pbn_b2_8_1152000 = 55, pbn_b2_bt_1_115200 = 56, pbn_b2_bt_2_115200 = 57, pbn_b2_bt_4_115200 = 58, pbn_b2_bt_2_921600 = 59, pbn_b2_bt_4_921600 = 60, pbn_b3_2_115200 = 61, pbn_b3_4_115200 = 62, pbn_b3_8_115200 = 63, pbn_b4_bt_2_921600 = 64, pbn_b4_bt_4_921600 = 65, pbn_b4_bt_8_921600 = 66, pbn_panacom = 67, pbn_panacom2 = 68, pbn_panacom4 = 69, pbn_plx_romulus = 70, pbn_oxsemi = 71, pbn_oxsemi_1_15625000 = 72, pbn_oxsemi_2_15625000 = 73, pbn_oxsemi_4_15625000 = 74, pbn_oxsemi_8_15625000 = 75, pbn_intel_i960 = 76, pbn_sgi_ioc3 = 77, pbn_computone_4 = 78, pbn_computone_6 = 79, pbn_computone_8 = 80, pbn_sbsxrsio = 81, pbn_pasemi_1682M = 82, pbn_ni8430_2 = 83, pbn_ni8430_4 = 84, pbn_ni8430_8 = 85, pbn_ni8430_16 = 86, pbn_ADDIDATA_PCIe_1_3906250 = 87, pbn_ADDIDATA_PCIe_2_3906250 = 88, pbn_ADDIDATA_PCIe_4_3906250 = 89, pbn_ADDIDATA_PCIe_8_3906250 = 90, pbn_ce4100_1_115200 = 91, pbn_omegapci = 92, pbn_NETMOS9900_2s_115200 = 93, pbn_brcm_trumanage = 94, pbn_fintek_4 = 95, pbn_fintek_8 = 96, pbn_fintek_12 = 97, pbn_fintek_F81504A = 98, pbn_fintek_F81508A = 99, pbn_fintek_F81512A = 100, pbn_wch382_2 = 101, pbn_wch384_4 = 102, pbn_wch384_8 = 103, pbn_sunix_pci_1s = 104, pbn_sunix_pci_2s = 105, pbn_sunix_pci_4s = 106, pbn_sunix_pci_8s = 107, pbn_sunix_pci_16s = 108, pbn_titan_1_4000000 = 109, pbn_titan_2_4000000 = 110, pbn_titan_4_4000000 = 111, pbn_titan_8_4000000 = 112, pbn_moxa8250_2p = 113, pbn_moxa8250_4p = 114, pbn_moxa8250_8p = 115, }; struct f815xxa_data { spinlock_t lock; int idx; }; struct pericom8250 { void *virt; unsigned int nr; int line[0]; }; struct of_serial_info { struct clk *clk; struct reset_control *rst; int type; int line; }; struct pl011_dmabuf { dma_addr_t dma; size_t len; char *buf; }; struct pl011_dmarx_data { struct dma_chan *chan; struct completion complete; bool use_buf_b; struct pl011_dmabuf dbuf_a; struct pl011_dmabuf dbuf_b; dma_cookie_t cookie; bool running; struct timer_list timer; unsigned int last_residue; unsigned long last_jiffies; bool auto_poll_rate; unsigned int poll_rate; unsigned int poll_timeout; }; struct pl011_dmatx_data { struct dma_chan *chan; dma_addr_t dma; size_t len; char *buf; bool queued; }; struct vendor_data; struct uart_amba_port { struct uart_port port; const u16 *reg_offset; struct clk *clk; const struct vendor_data *vendor; unsigned int dmacr; unsigned int im; unsigned int old_status; unsigned int fifosize; unsigned int fixed_baud; char type[12]; bool rs485_tx_started; unsigned int rs485_tx_drain_interval; bool using_tx_dma; bool using_rx_dma; struct pl011_dmarx_data dmarx; struct pl011_dmatx_data dmatx; bool dma_probed; }; struct vendor_data { const u16 *reg_offset; unsigned int ifls; unsigned int fr_busy; unsigned int fr_dsr; unsigned int fr_cts; unsigned int fr_ri; unsigned int inv_fr; bool access_32b; bool oversampling; bool dma_threshold; bool cts_event_workaround; bool always_enabled; bool fixed_options; unsigned int (*get_fifosize)(struct amba_device *); }; enum { REG_DR = 0, REG_ST_DMAWM = 1, REG_ST_TIMEOUT = 2, REG_FR = 3, REG_LCRH_RX = 4, REG_LCRH_TX = 5, REG_IBRD = 6, REG_FBRD = 7, REG_CR = 8, REG_IFLS = 9, REG_IMSC = 10, REG_RIS = 11, REG_MIS = 12, REG_ICR = 13, REG_DMACR = 14, REG_ST_XFCR = 15, REG_ST_XON1 = 16, REG_ST_XON2 = 17, REG_ST_XOFF1 = 18, REG_ST_XOFF2 = 19, REG_ST_ITCR = 20, REG_ST_ITIP = 21, REG_ST_ABCR = 22, REG_ST_ABIMSC = 23, REG_ARRAY_SIZE = 24, }; struct amba_pl011_data { bool (*dma_filter)(struct dma_chan *, void *); void *dma_rx_param; void *dma_tx_param; bool dma_rx_poll_enable; unsigned int dma_rx_poll_rate; unsigned int dma_rx_poll_timeout; void (*init)(); void (*exit)(); }; struct s3c24xx_uart_info; struct s3c24xx_serial_drv_data; struct s3c2410_uartcfg; struct s3c24xx_uart_dma; struct s3c24xx_uart_port { unsigned char rx_claimed; unsigned char tx_claimed; unsigned char rx_enabled; unsigned char tx_enabled; unsigned int pm_level; unsigned long baudclk_rate; unsigned int min_dma_size; unsigned int rx_irq; unsigned int tx_irq; unsigned int tx_in_progress; unsigned int tx_mode; unsigned int rx_mode; const struct s3c24xx_uart_info *info; struct clk *clk; struct clk *baudclk; struct uart_port port; const struct s3c24xx_serial_drv_data *drv_data; const struct s3c2410_uartcfg *cfg; struct s3c24xx_uart_dma *dma; }; enum s3c24xx_port_type { TYPE_S3C24XX = 0, TYPE_S3C6400 = 1, TYPE_APPLE_S5L = 2, }; struct s3c24xx_uart_info { const char *name; enum s3c24xx_port_type type; unsigned int port_type; unsigned int fifosize; unsigned long rx_fifomask; unsigned long rx_fifoshift; unsigned long rx_fifofull; unsigned long tx_fifomask; unsigned long tx_fifoshift; unsigned long tx_fifofull; unsigned int def_clk_sel; unsigned long num_clks; unsigned long clksel_mask; unsigned long clksel_shift; unsigned long ucon_mask; unsigned int has_divslot: 1; }; struct s3c2410_uartcfg { unsigned char hwport; unsigned char unused; unsigned short flags; upf_t uart_flags; unsigned int clk_sel; unsigned int has_fracval; unsigned long ucon; unsigned long ulcon; unsigned long ufcon; }; struct s3c24xx_serial_drv_data { const struct s3c24xx_uart_info info; const struct s3c2410_uartcfg def_cfg; const unsigned int fifosize[12]; }; struct s3c24xx_uart_dma { unsigned int rx_chan_id; unsigned int tx_chan_id; struct dma_slave_config rx_conf; struct dma_slave_config tx_conf; struct dma_chan *rx_chan; struct dma_chan *tx_chan; dma_addr_t rx_addr; dma_addr_t tx_addr; dma_cookie_t rx_cookie; dma_cookie_t tx_cookie; char *rx_buf; dma_addr_t tx_transfer_addr; size_t rx_size; size_t tx_size; struct dma_async_tx_descriptor *tx_desc; struct dma_async_tx_descriptor *rx_desc; int tx_bytes_requested; int rx_bytes_requested; }; struct samsung_early_console_data { u32 txfull_mask; u32 rxfifo_mask; }; struct qcom_geni_private_data { struct uart_driver *drv; u32 poll_cached_bytes; unsigned int poll_cached_bytes_cnt; u32 write_cached_bytes; unsigned int write_cached_bytes_cnt; }; struct qcom_geni_device_data; struct qcom_geni_serial_port { struct uart_port uport; struct geni_se se; const char *name; u32 tx_fifo_depth; u32 tx_fifo_width; u32 rx_fifo_depth; dma_addr_t tx_dma_addr; dma_addr_t rx_dma_addr; bool setup; unsigned int baud; unsigned long clk_rate; void *rx_buf; u32 loopback; bool brk; unsigned int tx_remaining; int wakeup_irq; bool rx_tx_swap; bool cts_rts_swap; struct qcom_geni_private_data private_data; const struct qcom_geni_device_data *dev_data; }; struct qcom_geni_device_data { bool console; enum geni_se_xfer_mode mode; }; enum geni_icc_path_index { GENI_TO_CORE = 0, CPU_TO_GENI = 1, GENI_TO_DDR = 2, }; struct tegra_tcu { struct uart_driver driver; struct console console; struct uart_port port; struct mbox_client tx_client; struct mbox_client rx_client; struct mbox_chan *tx; struct mbox_chan *rx; }; struct sprd_uart_dma { struct dma_chan *chn; unsigned char *virt; dma_addr_t phys_addr; dma_cookie_t cookie; u32 trans_len; bool enable; }; struct sprd_uart_port { struct uart_port port; char name[16]; struct clk *clk; struct sprd_uart_dma tx_dma; struct sprd_uart_dma rx_dma; dma_addr_t pos; unsigned char *rx_buf_tail; }; enum sprd_dma_chn_mode { SPRD_DMA_CHN_MODE_NONE = 0, SPRD_DMA_SRC_CHN0 = 1, SPRD_DMA_SRC_CHN1 = 2, SPRD_DMA_DST_CHN0 = 3, SPRD_DMA_DST_CHN1 = 4, }; enum sprd_dma_trg_mode { SPRD_DMA_NO_TRG = 0, SPRD_DMA_FRAG_DONE_TRG = 1, SPRD_DMA_BLOCK_DONE_TRG = 2, SPRD_DMA_TRANS_DONE_TRG = 3, SPRD_DMA_LIST_DONE_TRG = 4, }; enum sprd_dma_req_mode { SPRD_DMA_FRAG_REQ = 0, SPRD_DMA_BLK_REQ = 1, SPRD_DMA_TRANS_REQ = 2, SPRD_DMA_LIST_REQ = 3, }; enum sprd_dma_int_type { SPRD_DMA_NO_INT = 0, SPRD_DMA_FRAG_INT = 1, SPRD_DMA_BLK_INT = 2, SPRD_DMA_BLK_FRAG_INT = 3, SPRD_DMA_TRANS_INT = 4, SPRD_DMA_TRANS_FRAG_INT = 5, SPRD_DMA_TRANS_BLK_INT = 6, SPRD_DMA_LIST_INT = 7, SPRD_DMA_CFGERR_INT = 8, }; enum mctrl_gpio_idx { UART_GPIO_CTS = 0, UART_GPIO_DSR = 1, UART_GPIO_DCD = 2, UART_GPIO_RNG = 3, UART_GPIO_RI = 3, UART_GPIO_RTS = 4, UART_GPIO_DTR = 5, UART_GPIO_MAX = 6, }; struct mctrl_gpios { struct uart_port *port; struct gpio_desc *gpio[6]; int irq[6]; unsigned int mctrl_prev; bool mctrl_on; }; enum serdev_parity { SERDEV_PARITY_NONE = 0, SERDEV_PARITY_EVEN = 1, SERDEV_PARITY_ODD = 2, }; struct serdev_controller; struct serdev_device_ops; struct serdev_device { struct device dev; int nr; struct serdev_controller *ctrl; const struct serdev_device_ops *ops; struct completion write_comp; struct mutex write_lock; }; struct serdev_controller_ops; struct serdev_controller { struct device dev; unsigned int nr; struct serdev_device *serdev; const struct serdev_controller_ops *ops; }; struct serdev_controller_ops { int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t); void (*write_flush)(struct serdev_controller *); int (*write_room)(struct serdev_controller *); int (*open)(struct serdev_controller *); void (*close)(struct serdev_controller *); void (*set_flow_control)(struct serdev_controller *, bool); int (*set_parity)(struct serdev_controller *, enum serdev_parity); unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); void (*wait_until_sent)(struct serdev_controller *, long); int (*get_tiocm)(struct serdev_controller *); int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int); int (*break_ctl)(struct serdev_controller *, unsigned int); }; struct serdev_device_ops { int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); void (*write_wakeup)(struct serdev_device *); }; struct serdev_device_driver { struct device_driver driver; int (*probe)(struct serdev_device *); void (*remove)(struct serdev_device *); }; struct serport { struct tty_port *port; struct tty_struct *tty; struct tty_driver *tty_drv; int tty_idx; unsigned long flags; }; struct memdev { const char *name; const struct file_operations *fops; fmode_t fmode; umode_t mode; }; struct timer_rand_state { unsigned long last_time; long last_delta; long last_delta2; }; enum { CRNG_EMPTY = 0, CRNG_EARLY = 1, CRNG_READY = 2, }; struct batch_u8 { u8 entropy[96]; local_lock_t lock; unsigned long generation; unsigned int position; }; struct batch_u16 { u16 entropy[48]; local_lock_t lock; unsigned long generation; unsigned int position; }; struct batch_u32 { u32 entropy[24]; local_lock_t lock; unsigned long generation; unsigned int position; }; struct batch_u64 { u64 entropy[12]; local_lock_t lock; unsigned long generation; unsigned int position; }; struct crng { u8 key[32]; unsigned long generation; local_lock_t lock; }; struct fast_pool { unsigned long pool[4]; unsigned long last; unsigned int count; struct timer_list mix; }; enum { MIX_INFLIGHT = 2147483648, }; enum { POOL_BITS = 256, POOL_READY_BITS = 256, POOL_EARLY_BITS = 128, }; enum { CRNG_RESEED_START_INTERVAL = 250, CRNG_RESEED_INTERVAL = 15000, }; enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 16, }; struct entropy_timer_state { unsigned long entropy; struct timer_list timer; atomic_t samples; unsigned int samples_per_bit; }; struct hwrng { const char *name; int (*init)(struct hwrng *); void (*cleanup)(struct hwrng *); int (*data_present)(struct hwrng *, int); int (*data_read)(struct hwrng *, u32 *); int (*read)(struct hwrng *, void *, size_t, bool); unsigned long priv; unsigned short quality; struct list_head list; struct kref ref; struct completion cleanup_done; struct completion dying; }; struct hisi_rng { void *base; struct hwrng rng; }; struct histb_rng_priv { struct hwrng rng; void *base; }; struct cn10k_rng { void *reg_base; struct hwrng ops; struct pci_dev *pdev; bool extended_trng_regs; }; struct iommu_group { struct kobject kobj; struct kobject *devices_kobj; struct list_head devices; struct xarray pasid_array; struct mutex mutex; void *iommu_data; void (*iommu_data_release)(void *); char *name; int id; struct iommu_domain *default_domain; struct iommu_domain *blocking_domain; struct iommu_domain *domain; struct list_head entry; unsigned int owner_cnt; void *owner; }; struct iommu_group_attribute { struct attribute attr; ssize_t (*show)(struct iommu_group *, char *); ssize_t (*store)(struct iommu_group *, const char *, size_t); }; enum iommu_fault_type { IOMMU_FAULT_DMA_UNRECOV = 1, IOMMU_FAULT_PAGE_REQ = 2, }; enum fsl_mc_pool_type { FSL_MC_POOL_DPMCP = 0, FSL_MC_POOL_DPBP = 1, FSL_MC_POOL_DPCON = 2, FSL_MC_POOL_IRQ = 3, FSL_MC_NUM_POOL_TYPES = 4, }; enum iommu_resv_type { IOMMU_RESV_DIRECT = 0, IOMMU_RESV_DIRECT_RELAXABLE = 1, IOMMU_RESV_RESERVED = 2, IOMMU_RESV_MSI = 3, IOMMU_RESV_SW_MSI = 4, }; enum { IOMMU_SET_DOMAIN_MUST_SUCCEED = 1, }; struct group_device { struct list_head list; struct device *dev; char *name; }; struct fsl_mc_obj_desc { char type[16]; int id; u16 vendor; u16 ver_major; u16 ver_minor; u8 irq_count; u8 region_count; u32 state; char label[16]; u16 flags; }; struct fsl_mc_io; struct fsl_mc_device_irq; struct fsl_mc_resource; struct fsl_mc_device { struct device dev; u64 dma_mask; u16 flags; u32 icid; u16 mc_handle; struct fsl_mc_io *mc_io; struct fsl_mc_obj_desc obj_desc; struct resource *regions; struct fsl_mc_device_irq **irqs; struct fsl_mc_resource *resource; struct device_link *consumer_link; const char *driver_override; }; struct fsl_mc_io { struct device *dev; u16 flags; u32 portal_size; phys_addr_t portal_phys_addr; void *portal_virt_addr; struct fsl_mc_device *dpmcp_dev; union { struct mutex mutex; raw_spinlock_t spinlock; }; }; struct fsl_mc_resource_pool; struct fsl_mc_resource { enum fsl_mc_pool_type type; s32 id; void *data; struct fsl_mc_resource_pool *parent_pool; struct list_head node; }; struct fsl_mc_device_irq { unsigned int virq; struct fsl_mc_device *mc_dev; u8 dev_irq_index; struct fsl_mc_resource resource; }; struct iommu_resv_region { struct list_head list; phys_addr_t start; size_t length; int prot; enum iommu_resv_type type; void (*free)(struct device *, struct iommu_resv_region *); }; struct group_for_pci_data { struct pci_dev *pdev; struct iommu_group *group; }; typedef void (*btf_trace_add_device_to_group)(void *, int, struct device *); typedef void (*btf_trace_remove_device_from_group)(void *, int, struct device *); typedef void (*btf_trace_attach_device_to_domain)(void *, struct device *); typedef void (*btf_trace_map)(void *, unsigned long, phys_addr_t, size_t); typedef void (*btf_trace_unmap)(void *, unsigned long, size_t, size_t); typedef void (*btf_trace_io_page_fault)(void *, struct device *, unsigned long, int); struct trace_event_raw_iommu_group_event { struct trace_entry ent; int gid; u32 __data_loc_device; char __data[0]; }; struct trace_event_raw_iommu_device_event { struct trace_entry ent; u32 __data_loc_device; char __data[0]; }; struct trace_event_raw_map { struct trace_entry ent; u64 iova; u64 paddr; size_t size; char __data[0]; }; struct trace_event_raw_unmap { struct trace_entry ent; u64 iova; size_t size; size_t unmapped_size; char __data[0]; }; struct trace_event_raw_iommu_error { struct trace_entry ent; u32 __data_loc_device; u32 __data_loc_driver; u64 iova; int flags; char __data[0]; }; struct trace_event_data_offsets_iommu_group_event { u32 device; }; struct trace_event_data_offsets_iommu_device_event { u32 device; }; struct trace_event_data_offsets_map {}; struct trace_event_data_offsets_unmap {}; struct trace_event_data_offsets_iommu_error { u32 device; u32 driver; }; struct iova { struct rb_node node; unsigned long pfn_hi; unsigned long pfn_lo; }; struct iova_rcache; struct iova_domain { spinlock_t iova_rbtree_lock; struct rb_root rbroot; struct rb_node *cached_node; struct rb_node *cached32_node; unsigned long granule; unsigned long start_pfn; unsigned long dma_32bit_pfn; unsigned long max32_alloc_size; struct iova anchor; struct iova_rcache *rcaches; struct hlist_node cpuhp_dead; u64 android_vendor_data1; }; enum iommu_dma_cookie_type { IOMMU_DMA_IOVA_COOKIE = 0, IOMMU_DMA_MSI_COOKIE = 1, }; struct iova_fq; struct iommu_dma_cookie { enum iommu_dma_cookie_type type; union { struct { struct iova_domain iovad; struct iova_fq __attribute__((btf_type_tag("percpu"))) *fq; atomic64_t fq_flush_start_cnt; atomic64_t fq_flush_finish_cnt; struct timer_list fq_timer; atomic_t fq_timer_on; }; dma_addr_t msi_iova; }; struct list_head msi_page_list; struct iommu_domain *fq_domain; struct mutex mutex; }; struct iova_fq_entry { unsigned long iova_pfn; unsigned long pages; struct list_head freelist; u64 counter; }; struct iova_fq { struct iova_fq_entry entries[256]; unsigned int head; unsigned int tail; spinlock_t lock; }; struct iommu_dma_msi_page { struct list_head list; dma_addr_t iova; phys_addr_t phys; }; struct io_pgtable; struct io_pgtable_cfg; struct io_pgtable_init_fns { struct io_pgtable * (*alloc)(struct io_pgtable_cfg *, void *); void (*free)(struct io_pgtable *); int (*configure)(struct io_pgtable_cfg *, size_t *); }; enum io_pgtable_fmt { ARM_32_LPAE_S1 = 0, ARM_32_LPAE_S2 = 1, ARM_64_LPAE_S1 = 2, ARM_64_LPAE_S2 = 3, ARM_V7S = 4, ARM_MALI_LPAE = 5, AMD_IOMMU_V1 = 6, AMD_IOMMU_V2 = 7, APPLE_DART = 8, APPLE_DART2 = 9, IO_PGTABLE_NUM_FMTS = 10, }; struct iommu_flush_ops; struct io_pgtable_cfg { enum io_pgtable_fmt fmt; unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; unsigned int oas; bool coherent_walk; const struct iommu_flush_ops *tlb; struct device *iommu_dev; union { struct { u64 ttbr; struct { u32 ips: 3; u32 tg: 2; u32 sh: 2; u32 orgn: 2; u32 irgn: 2; u32 tsz: 6; } tcr; u64 mair; } arm_lpae_s1_cfg; struct { u64 vttbr; struct { u32 ps: 3; u32 tg: 2; u32 sh: 2; u32 orgn: 2; u32 irgn: 2; u32 sl: 2; u32 tsz: 6; } vtcr; } arm_lpae_s2_cfg; struct { u32 ttbr; u32 tcr; u32 nmrr; u32 prrr; } arm_v7s_cfg; struct { u64 transtab; u64 memattr; } arm_mali_lpae_cfg; struct { u64 ttbr[4]; u32 n_ttbrs; } apple_dart_cfg; }; }; struct io_pgtable_walker; struct io_pgtable_ops { int (*map_pages)(struct io_pgtable_ops *, unsigned long, phys_addr_t, size_t, size_t, int, gfp_t, size_t *); size_t (*unmap_pages)(struct io_pgtable_ops *, unsigned long, size_t, size_t, struct iommu_iotlb_gather *); size_t (*unmap_pages_walk)(struct io_pgtable_ops *, unsigned long, size_t, size_t, struct iommu_iotlb_gather *, struct io_pgtable_walker *); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *, unsigned long); }; struct io_pgtable { enum io_pgtable_fmt fmt; void *cookie; struct io_pgtable_cfg cfg; struct io_pgtable_ops ops; }; struct iommu_flush_ops { void (*tlb_flush_all)(void *); void (*tlb_flush_walk)(unsigned long, size_t, size_t, void *); void (*tlb_add_page)(struct iommu_iotlb_gather *, unsigned long, size_t, void *); }; struct io_pgtable_ctxt; struct io_pgtable_walker { void (*cb)(struct io_pgtable_ctxt *); void * const arg; }; struct io_pgtable_ctxt { void *arg; u64 addr; size_t size; }; typedef u32 arm_v7s_iopte; struct arm_v7s_io_pgtable { struct io_pgtable iop; arm_v7s_iopte *pgd; struct kmem_cache *l2_tables; spinlock_t split_lock; }; struct arm_lpae_io_pgtable { struct io_pgtable iop; int pgd_bits; int start_level; int bits_per_level; void *pgd; bool idmapped; }; typedef u64 arm_lpae_iopte; struct iova_magazine; struct iova_cpu_rcache { spinlock_t lock; struct iova_magazine *loaded; struct iova_magazine *prev; }; struct iova_magazine { unsigned long size; unsigned long pfns[127]; }; struct iova_rcache { spinlock_t lock; unsigned long depot_size; struct iova_magazine *depot[32]; struct iova_cpu_rcache __attribute__((btf_type_tag("percpu"))) *cpu_rcaches; }; struct of_phandle_iterator { const char *cells_name; int cell_count; const struct device_node *parent; const __be32 *list_end; const __be32 *phandle_end; const __be32 *cur; uint32_t cur_count; phandle phandle; struct device_node *node; }; struct of_pci_iommu_alias_info { struct device *dev; struct device_node *np; }; enum switch_power_state { DRM_SWITCH_POWER_ON = 0, DRM_SWITCH_POWER_OFF = 1, DRM_SWITCH_POWER_CHANGING = 2, DRM_SWITCH_POWER_DYNAMIC_OFF = 3, }; struct drm_modeset_lock { struct ww_mutex mutex; struct list_head head; }; struct drm_modeset_acquire_ctx; struct drm_mode_config_funcs; struct drm_property; struct drm_atomic_state; struct drm_mode_config_helper_funcs; struct drm_mode_config { struct mutex mutex; struct drm_modeset_lock connection_mutex; struct drm_modeset_acquire_ctx *acquire_ctx; struct mutex idr_mutex; struct idr object_idr; struct idr tile_idr; struct mutex fb_lock; int num_fb; struct list_head fb_list; spinlock_t connector_list_lock; int num_connector; struct ida connector_ida; struct list_head connector_list; struct llist_head connector_free_list; struct work_struct connector_free_work; int num_encoder; struct list_head encoder_list; int num_total_plane; struct list_head plane_list; int num_crtc; struct list_head crtc_list; struct list_head property_list; struct list_head privobj_list; int min_width; int min_height; int max_width; int max_height; const struct drm_mode_config_funcs *funcs; bool poll_enabled; bool poll_running; bool delayed_event; struct delayed_work output_poll_work; struct mutex blob_lock; struct list_head property_blob_list; struct drm_property *edid_property; struct drm_property *dpms_property; struct drm_property *path_property; struct drm_property *tile_property; struct drm_property *link_status_property; struct drm_property *plane_type_property; struct drm_property *prop_src_x; struct drm_property *prop_src_y; struct drm_property *prop_src_w; struct drm_property *prop_src_h; struct drm_property *prop_crtc_x; struct drm_property *prop_crtc_y; struct drm_property *prop_crtc_w; struct drm_property *prop_crtc_h; struct drm_property *prop_fb_id; struct drm_property *prop_in_fence_fd; struct drm_property *prop_out_fence_ptr; struct drm_property *prop_crtc_id; struct drm_property *prop_fb_damage_clips; struct drm_property *prop_active; struct drm_property *prop_mode_id; struct drm_property *prop_vrr_enabled; struct drm_property *dvi_i_subconnector_property; struct drm_property *dvi_i_select_subconnector_property; struct drm_property *dp_subconnector_property; struct drm_property *tv_subconnector_property; struct drm_property *tv_select_subconnector_property; struct drm_property *legacy_tv_mode_property; struct drm_property *tv_mode_property; struct drm_property *tv_left_margin_property; struct drm_property *tv_right_margin_property; struct drm_property *tv_top_margin_property; struct drm_property *tv_bottom_margin_property; struct drm_property *tv_brightness_property; struct drm_property *tv_contrast_property; struct drm_property *tv_flicker_reduction_property; struct drm_property *tv_overscan_property; struct drm_property *tv_saturation_property; struct drm_property *tv_hue_property; struct drm_property *scaling_mode_property; struct drm_property *aspect_ratio_property; struct drm_property *content_type_property; struct drm_property *degamma_lut_property; struct drm_property *degamma_lut_size_property; struct drm_property *ctm_property; struct drm_property *gamma_lut_property; struct drm_property *gamma_lut_size_property; struct drm_property *suggested_x_property; struct drm_property *suggested_y_property; struct drm_property *non_desktop_property; struct drm_property *panel_orientation_property; struct drm_property *writeback_fb_id_property; struct drm_property *writeback_pixel_formats_property; struct drm_property *writeback_out_fence_ptr_property; struct drm_property *hdr_output_metadata_property; struct drm_property *content_protection_property; struct drm_property *hdcp_content_type_property; uint32_t preferred_depth; uint32_t prefer_shadow; bool quirk_addfb_prefer_xbgr_30bpp; bool quirk_addfb_prefer_host_byte_order; bool async_page_flip; bool fb_modifiers_not_supported; bool normalize_zpos; struct drm_property *modifiers_property; uint32_t cursor_width; uint32_t cursor_height; struct drm_atomic_state *suspend_state; const struct drm_mode_config_helper_funcs *helper_private; }; struct drm_vram_mm; struct drm_driver; struct drm_minor; struct drm_master; struct drm_vblank_crtc; struct drm_vma_offset_manager; struct drm_fb_helper; struct drm_device { int if_version; struct kref ref; struct device *dev; struct { struct list_head resources; void *final_kfree; spinlock_t lock; } managed; const struct drm_driver *driver; void *dev_private; struct drm_minor *primary; struct drm_minor *render; struct drm_minor *accel; bool registered; struct drm_master *master; u32 driver_features; bool unplugged; struct inode *anon_inode; char *unique; struct mutex struct_mutex; struct mutex master_mutex; atomic_t open_count; struct mutex filelist_mutex; struct list_head filelist; struct list_head filelist_internal; struct mutex clientlist_mutex; struct list_head clientlist; bool vblank_disable_immediate; struct drm_vblank_crtc *vblank; spinlock_t vblank_time_lock; spinlock_t vbl_lock; u32 max_vblank_count; struct list_head vblank_event_list; spinlock_t event_lock; unsigned int num_crtcs; struct drm_mode_config mode_config; struct mutex object_name_lock; struct idr object_name_idr; struct drm_vma_offset_manager *vma_offset_manager; struct drm_vram_mm *vram_mm; enum switch_power_state switch_power_state; struct drm_fb_helper *fb_helper; struct mutex debugfs_mutex; struct list_head debugfs_list; }; struct drm_file; struct drm_gem_object; struct dma_buf; struct dma_buf_attachment; struct drm_mode_create_dumb; struct drm_printer; struct drm_ioctl_desc; struct drm_driver { int (*load)(struct drm_device *, unsigned long); int (*open)(struct drm_device *, struct drm_file *); void (*postclose)(struct drm_device *, struct drm_file *); void (*lastclose)(struct drm_device *); void (*unload)(struct drm_device *); void (*release)(struct drm_device *); void (*master_set)(struct drm_device *, struct drm_file *, bool); void (*master_drop)(struct drm_device *, struct drm_file *); void (*debugfs_init)(struct drm_minor *); struct drm_gem_object * (*gem_create_object)(struct drm_device *, size_t); int (*prime_handle_to_fd)(struct drm_device *, struct drm_file *, uint32_t, uint32_t, int *); int (*prime_fd_to_handle)(struct drm_device *, struct drm_file *, int, uint32_t *); struct drm_gem_object * (*gem_prime_import)(struct drm_device *, struct dma_buf *); struct drm_gem_object * (*gem_prime_import_sg_table)(struct drm_device *, struct dma_buf_attachment *, struct sg_table *); int (*dumb_create)(struct drm_file *, struct drm_device *, struct drm_mode_create_dumb *); int (*dumb_map_offset)(struct drm_file *, struct drm_device *, uint32_t, uint64_t *); void (*show_fdinfo)(struct drm_printer *, struct drm_file *); int major; int minor; int patchlevel; char *name; char *desc; char *date; u32 driver_features; const struct drm_ioctl_desc *ioctls; int num_ioctls; const struct file_operations *fops; }; struct drm_mode_create_dumb { __u32 height; __u32 width; __u32 bpp; __u32 flags; __u32 handle; __u32 pitch; __u64 size; }; struct drm_printer { void (*printfn)(struct drm_printer *, struct va_format *); void (*puts)(struct drm_printer *, const char *); void *arg; const char *prefix; }; enum drm_ioctl_flags { DRM_AUTH = 1, DRM_MASTER = 2, DRM_ROOT_ONLY = 4, DRM_UNLOCKED = 16, DRM_RENDER_ALLOW = 32, }; typedef int drm_ioctl_t(struct drm_device *, void *, struct drm_file *); struct drm_ioctl_desc { unsigned int cmd; enum drm_ioctl_flags flags; drm_ioctl_t *func; const char *name; }; struct drm_master { struct kref refcount; struct drm_device *dev; char *unique; int unique_len; struct idr magic_map; void *driver_priv; struct drm_master *lessor; int lessee_id; struct list_head lessee_list; struct list_head lessees; struct idr leases; struct idr lessee_idr; }; struct drm_modeset_acquire_ctx { struct ww_acquire_ctx ww_ctx; struct drm_modeset_lock *contended; depot_stack_handle_t stack_depot; struct list_head locked; bool trylock_only; bool interruptible; }; enum drm_mode_status { MODE_OK = 0, MODE_HSYNC = 1, MODE_VSYNC = 2, MODE_H_ILLEGAL = 3, MODE_V_ILLEGAL = 4, MODE_BAD_WIDTH = 5, MODE_NOMODE = 6, MODE_NO_INTERLACE = 7, MODE_NO_DBLESCAN = 8, MODE_NO_VSCAN = 9, MODE_MEM = 10, MODE_VIRTUAL_X = 11, MODE_VIRTUAL_Y = 12, MODE_MEM_VIRT = 13, MODE_NOCLOCK = 14, MODE_CLOCK_HIGH = 15, MODE_CLOCK_LOW = 16, MODE_CLOCK_RANGE = 17, MODE_BAD_HVALUE = 18, MODE_BAD_VVALUE = 19, MODE_BAD_VSCAN = 20, MODE_HSYNC_NARROW = 21, MODE_HSYNC_WIDE = 22, MODE_HBLANK_NARROW = 23, MODE_HBLANK_WIDE = 24, MODE_VSYNC_NARROW = 25, MODE_VSYNC_WIDE = 26, MODE_VBLANK_NARROW = 27, MODE_VBLANK_WIDE = 28, MODE_PANEL = 29, MODE_INTERLACE_WIDTH = 30, MODE_ONE_WIDTH = 31, MODE_ONE_HEIGHT = 32, MODE_ONE_SIZE = 33, MODE_NO_REDUCED = 34, MODE_NO_STEREO = 35, MODE_NO_420 = 36, MODE_STALE = -3, MODE_BAD = -2, MODE_ERROR = -1, }; struct drm_framebuffer; struct drm_mode_fb_cmd2; struct drm_format_info; struct drm_display_mode; struct drm_mode_config_funcs { struct drm_framebuffer * (*fb_create)(struct drm_device *, struct drm_file *, const struct drm_mode_fb_cmd2 *); const struct drm_format_info * (*get_format_info)(const struct drm_mode_fb_cmd2 *); void (*output_poll_changed)(struct drm_device *); enum drm_mode_status (*mode_valid)(struct drm_device *, const struct drm_display_mode *); int (*atomic_check)(struct drm_device *, struct drm_atomic_state *); int (*atomic_commit)(struct drm_device *, struct drm_atomic_state *, bool); struct drm_atomic_state * (*atomic_state_alloc)(struct drm_device *); void (*atomic_state_clear)(struct drm_atomic_state *); void (*atomic_state_free)(struct drm_atomic_state *); }; struct drm_mode_fb_cmd2 { __u32 fb_id; __u32 width; __u32 height; __u32 pixel_format; __u32 flags; __u32 handles[4]; __u32 pitches[4]; __u32 offsets[4]; __u64 modifier[4]; }; struct drm_format_info { u32 format; u8 depth; u8 num_planes; union { u8 cpp[4]; u8 char_per_block[4]; }; u8 block_w[4]; u8 block_h[4]; u8 hsub; u8 vsub; bool has_alpha; bool is_yuv; bool is_color_indexed; }; struct drm_display_mode { int clock; u16 hdisplay; u16 hsync_start; u16 hsync_end; u16 htotal; u16 hskew; u16 vdisplay; u16 vsync_start; u16 vsync_end; u16 vtotal; u16 vscan; u32 flags; int crtc_clock; u16 crtc_hdisplay; u16 crtc_hblank_start; u16 crtc_hblank_end; u16 crtc_hsync_start; u16 crtc_hsync_end; u16 crtc_htotal; u16 crtc_hskew; u16 crtc_vdisplay; u16 crtc_vblank_start; u16 crtc_vblank_end; u16 crtc_vsync_start; u16 crtc_vsync_end; u16 crtc_vtotal; u16 width_mm; u16 height_mm; u8 type; bool expose_to_userspace; struct list_head head; char name[32]; enum drm_mode_status status; enum hdmi_picture_aspect picture_aspect_ratio; }; struct drm_mode_config_helper_funcs { void (*atomic_commit_tail)(struct drm_atomic_state *); int (*atomic_commit_setup)(struct drm_atomic_state *); }; struct drm_debugfs_info { const char *name; int (*show)(struct seq_file *, void *); u32 driver_features; void *data; }; enum drm_gem_object_status { DRM_GEM_OBJECT_RESIDENT = 1, DRM_GEM_OBJECT_PURGEABLE = 2, }; enum drm_color_encoding { DRM_COLOR_YCBCR_BT601 = 0, DRM_COLOR_YCBCR_BT709 = 1, DRM_COLOR_YCBCR_BT2020 = 2, DRM_COLOR_ENCODING_MAX = 3, }; enum drm_color_range { DRM_COLOR_YCBCR_LIMITED_RANGE = 0, DRM_COLOR_YCBCR_FULL_RANGE = 1, DRM_COLOR_RANGE_MAX = 2, }; enum drm_scaling_filter { DRM_SCALING_FILTER_DEFAULT = 0, DRM_SCALING_FILTER_NEAREST_NEIGHBOR = 1, }; enum drm_plane_type { DRM_PLANE_TYPE_OVERLAY = 0, DRM_PLANE_TYPE_PRIMARY = 1, DRM_PLANE_TYPE_CURSOR = 2, }; enum drm_connector_registration_state { DRM_CONNECTOR_INITIALIZING = 0, DRM_CONNECTOR_REGISTERED = 1, DRM_CONNECTOR_UNREGISTERED = 2, }; enum drm_connector_status { connector_status_connected = 1, connector_status_disconnected = 2, connector_status_unknown = 3, }; enum subpixel_order { SubPixelUnknown = 0, SubPixelHorizontalRGB = 1, SubPixelHorizontalBGR = 2, SubPixelVerticalRGB = 3, SubPixelVerticalBGR = 4, SubPixelNone = 5, }; enum drm_link_status { DRM_LINK_STATUS_GOOD = 0, DRM_LINK_STATUS_BAD = 1, }; enum drm_mode_subconnector { DRM_MODE_SUBCONNECTOR_Automatic = 0, DRM_MODE_SUBCONNECTOR_Unknown = 0, DRM_MODE_SUBCONNECTOR_VGA = 1, DRM_MODE_SUBCONNECTOR_DVID = 3, DRM_MODE_SUBCONNECTOR_DVIA = 4, DRM_MODE_SUBCONNECTOR_Composite = 5, DRM_MODE_SUBCONNECTOR_SVIDEO = 6, DRM_MODE_SUBCONNECTOR_Component = 8, DRM_MODE_SUBCONNECTOR_SCART = 9, DRM_MODE_SUBCONNECTOR_DisplayPort = 10, DRM_MODE_SUBCONNECTOR_HDMIA = 11, DRM_MODE_SUBCONNECTOR_Native = 15, DRM_MODE_SUBCONNECTOR_Wireless = 18, }; enum drm_colorspace { DRM_MODE_COLORIMETRY_DEFAULT = 0, DRM_MODE_COLORIMETRY_NO_DATA = 0, DRM_MODE_COLORIMETRY_SMPTE_170M_YCC = 1, DRM_MODE_COLORIMETRY_BT709_YCC = 2, DRM_MODE_COLORIMETRY_XVYCC_601 = 3, DRM_MODE_COLORIMETRY_XVYCC_709 = 4, DRM_MODE_COLORIMETRY_SYCC_601 = 5, DRM_MODE_COLORIMETRY_OPYCC_601 = 6, DRM_MODE_COLORIMETRY_OPRGB = 7, DRM_MODE_COLORIMETRY_BT2020_CYCC = 8, DRM_MODE_COLORIMETRY_BT2020_RGB = 9, DRM_MODE_COLORIMETRY_BT2020_YCC = 10, DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 = 11, DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER = 12, DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED = 13, DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT = 14, DRM_MODE_COLORIMETRY_BT601_YCC = 15, DRM_MODE_COLORIMETRY_COUNT = 16, }; enum drm_privacy_screen_status { PRIVACY_SCREEN_DISABLED = 0, PRIVACY_SCREEN_ENABLED = 1, PRIVACY_SCREEN_DISABLED_LOCKED = 2, PRIVACY_SCREEN_ENABLED_LOCKED = 3, }; enum drm_connector_force { DRM_FORCE_UNSPECIFIED = 0, DRM_FORCE_OFF = 1, DRM_FORCE_ON = 2, DRM_FORCE_ON_DIGITAL = 3, }; enum drm_panel_orientation { DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1, DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP = 1, DRM_MODE_PANEL_ORIENTATION_LEFT_UP = 2, DRM_MODE_PANEL_ORIENTATION_RIGHT_UP = 3, }; enum drm_connector_tv_mode { DRM_MODE_TV_MODE_NTSC = 0, DRM_MODE_TV_MODE_NTSC_443 = 1, DRM_MODE_TV_MODE_NTSC_J = 2, DRM_MODE_TV_MODE_PAL = 3, DRM_MODE_TV_MODE_PAL_M = 4, DRM_MODE_TV_MODE_PAL_N = 5, DRM_MODE_TV_MODE_SECAM = 6, DRM_MODE_TV_MODE_MAX = 7, }; enum drm_debug_category { DRM_UT_CORE = 0, DRM_UT_DRIVER = 1, DRM_UT_KMS = 2, DRM_UT_PRIME = 3, DRM_UT_ATOMIC = 4, DRM_UT_VBL = 5, DRM_UT_STATE = 6, DRM_UT_LEASE = 7, DRM_UT_DP = 8, DRM_UT_DRMRES = 9, }; enum drm_bridge_attach_flags { DRM_BRIDGE_ATTACH_NO_CONNECTOR = 1, }; enum drm_bridge_ops { DRM_BRIDGE_OP_DETECT = 1, DRM_BRIDGE_OP_EDID = 2, DRM_BRIDGE_OP_HPD = 4, DRM_BRIDGE_OP_MODES = 8, }; enum drm_driver_feature { DRIVER_GEM = 1, DRIVER_MODESET = 2, DRIVER_RENDER = 8, DRIVER_ATOMIC = 16, DRIVER_SYNCOBJ = 32, DRIVER_SYNCOBJ_TIMELINE = 64, DRIVER_COMPUTE_ACCEL = 128, DRIVER_GEM_GPUVA = 256, DRIVER_CURSOR_HOTSPOT = 512, DRIVER_USE_AGP = 33554432, DRIVER_LEGACY = 67108864, DRIVER_PCI_DMA = 134217728, DRIVER_SG = 268435456, DRIVER_HAVE_DMA = 536870912, DRIVER_HAVE_IRQ = 1073741824, }; struct drm_crtc; struct drm_pending_vblank_event; struct drm_crtc_commit { struct drm_crtc *crtc; struct kref ref; struct completion flip_done; struct completion hw_done; struct completion cleanup_done; struct list_head commit_entry; struct drm_pending_vblank_event *event; bool abort_completion; }; struct drm_object_properties; struct drm_mode_object { uint32_t id; uint32_t type; struct drm_object_properties *properties; struct kref refcount; void (*free_cb)(struct kref *); }; struct drm_object_properties { int count; struct drm_property *properties[96]; uint64_t values[96]; }; struct drm_crtc_crc_entry; struct drm_crtc_crc { spinlock_t lock; const char *source; bool opened; bool overflow; struct drm_crtc_crc_entry *entries; int head; int tail; size_t values_cnt; wait_queue_head_t wq; }; struct drm_plane; struct drm_crtc_funcs; struct drm_crtc_helper_funcs; struct drm_crtc_state; struct drm_self_refresh_data; struct drm_crtc { struct drm_device *dev; struct device_node *port; struct list_head head; char *name; struct drm_modeset_lock mutex; struct drm_mode_object base; struct drm_plane *primary; struct drm_plane *cursor; unsigned int index; int cursor_x; int cursor_y; bool enabled; struct drm_display_mode mode; struct drm_display_mode hwmode; int x; int y; const struct drm_crtc_funcs *funcs; uint32_t gamma_size; uint16_t *gamma_store; const struct drm_crtc_helper_funcs *helper_private; struct drm_object_properties properties; struct drm_property *scaling_filter_property; struct drm_crtc_state *state; struct list_head commit_list; spinlock_t commit_lock; struct dentry *debugfs_entry; struct drm_crtc_crc crc; unsigned int fence_context; spinlock_t fence_lock; unsigned long fence_seqno; char timeline_name[32]; struct drm_self_refresh_data *self_refresh_data; }; typedef unsigned int drm_magic_t; struct drm_prime_file_private { struct mutex lock; struct rb_root dmabufs; struct rb_root handles; }; struct drm_file { bool authenticated; bool stereo_allowed; bool universal_planes; bool atomic; bool aspect_ratio_allowed; bool writeback_connectors; bool was_master; bool is_master; bool supports_virtualized_cursor_plane; struct drm_master *master; spinlock_t master_lookup_lock; struct pid __attribute__((btf_type_tag("rcu"))) *pid; u64 client_id; drm_magic_t magic; struct list_head lhead; struct drm_minor *minor; struct idr object_idr; spinlock_t table_lock; struct idr syncobj_idr; spinlock_t syncobj_table_lock; struct file *filp; void *driver_priv; struct list_head fbs; struct mutex fbs_lock; struct list_head blobs; wait_queue_head_t event_wait; struct list_head pending_event_list; struct list_head event_list; int event_space; struct mutex event_read_lock; struct drm_prime_file_private prime; }; struct drm_minor { int index; int type; struct device *kdev; struct drm_device *dev; struct dentry *debugfs_root; struct list_head debugfs_list; struct mutex debugfs_lock; }; struct drm_mm; struct drm_mm_node { unsigned long color; u64 start; u64 size; struct drm_mm *mm; struct list_head node_list; struct list_head hole_stack; struct rb_node rb; struct rb_node rb_hole_size; struct rb_node rb_hole_addr; u64 __subtree_last; u64 hole_size; u64 subtree_max_hole; unsigned long flags; }; struct drm_vma_offset_node { rwlock_t vm_lock; struct drm_mm_node vm_node; struct rb_root vm_files; void *driver_private; }; struct dma_resv_list; struct dma_resv { struct ww_mutex lock; struct dma_resv_list __attribute__((btf_type_tag("rcu"))) *fences; }; struct drm_gem_object_funcs; struct drm_gem_lru; struct drm_gem_object { struct kref refcount; unsigned int handle_count; struct drm_device *dev; struct file *filp; struct drm_vma_offset_node vma_node; size_t size; int name; struct dma_buf *dma_buf; struct dma_buf_attachment *import_attach; struct dma_resv *resv; struct dma_resv _resv; struct { struct list_head list; } gpuva; const struct drm_gem_object_funcs *funcs; struct list_head lru_node; struct drm_gem_lru *lru; }; struct drm_mm { void (*color_adjust)(const struct drm_mm_node *, unsigned long, u64 *, u64 *); struct list_head hole_stack; struct drm_mm_node head_node; struct rb_root_cached interval_tree; struct rb_root_cached holes_size; struct rb_root holes_addr; unsigned long scan_active; }; struct dma_fence; struct dma_resv_list { struct callback_head rcu; u32 num_fences; u32 max_fences; struct dma_fence __attribute__((btf_type_tag("rcu"))) *table[0]; }; struct drm_gem_object_funcs { void (*free)(struct drm_gem_object *); int (*open)(struct drm_gem_object *, struct drm_file *); void (*close)(struct drm_gem_object *, struct drm_file *); void (*print_info)(struct drm_printer *, unsigned int, const struct drm_gem_object *); struct dma_buf * (*export)(struct drm_gem_object *, int); int (*pin)(struct drm_gem_object *); void (*unpin)(struct drm_gem_object *); struct sg_table * (*get_sg_table)(struct drm_gem_object *); int (*vmap)(struct drm_gem_object *, struct iosys_map *); void (*vunmap)(struct drm_gem_object *, struct iosys_map *); int (*mmap)(struct drm_gem_object *, struct vm_area_struct *); int (*evict)(struct drm_gem_object *); enum drm_gem_object_status (*status)(struct drm_gem_object *); const struct vm_operations_struct *vm_ops; }; struct drm_gem_lru { struct mutex *lock; long count; struct list_head list; }; struct drm_vblank_crtc { struct drm_device *dev; wait_queue_head_t queue; struct timer_list disable_timer; seqlock_t seqlock; atomic64_t count; ktime_t time; atomic_t refcount; u32 last; u32 max_vblank_count; unsigned int inmodeset; unsigned int pipe; int framedur_ns; int linedur_ns; struct drm_display_mode hwmode; bool enabled; struct kthread_worker *worker; struct list_head pending_work; wait_queue_head_t work_wait_queue; }; struct drm_framebuffer_funcs; struct drm_framebuffer { struct drm_device *dev; struct list_head head; struct drm_mode_object base; char comm[16]; const struct drm_format_info *format; const struct drm_framebuffer_funcs *funcs; unsigned int pitches[4]; unsigned int offsets[4]; uint64_t modifier; unsigned int width; unsigned int height; int flags; int hot_x; int hot_y; struct list_head filp_head; struct drm_gem_object *obj[4]; }; struct drm_property { struct list_head head; struct drm_mode_object base; uint32_t flags; char name[32]; uint32_t num_values; uint64_t *values; struct drm_device *dev; struct list_head enum_list; }; struct drm_clip_rect; struct drm_framebuffer_funcs { void (*destroy)(struct drm_framebuffer *); int (*create_handle)(struct drm_framebuffer *, struct drm_file *, unsigned int *); int (*dirty)(struct drm_framebuffer *, struct drm_file *, unsigned int, unsigned int, struct drm_clip_rect *, unsigned int); }; struct drm_clip_rect { unsigned short x1; unsigned short y1; unsigned short x2; unsigned short y2; }; struct __drm_planes_state; struct __drm_crtcs_state; struct __drm_connnectors_state; struct __drm_private_objs_state; struct drm_atomic_state { struct kref ref; struct drm_device *dev; bool allow_modeset: 1; bool legacy_cursor_update: 1; bool async_update: 1; bool duplicated: 1; struct __drm_planes_state *planes; struct __drm_crtcs_state *crtcs; int num_connector; struct __drm_connnectors_state *connectors; int num_private_objs; struct __drm_private_objs_state *private_objs; struct drm_modeset_acquire_ctx *acquire_ctx; struct drm_crtc_commit *fake_commit; struct work_struct commit_work; }; struct drm_plane_state; struct __drm_planes_state { struct drm_plane *ptr; struct drm_plane_state *state; struct drm_plane_state *old_state; struct drm_plane_state *new_state; }; struct drm_plane_funcs; struct drm_plane_helper_funcs; struct drm_plane { struct drm_device *dev; struct list_head head; char *name; struct drm_modeset_lock mutex; struct drm_mode_object base; uint32_t possible_crtcs; uint32_t *format_types; unsigned int format_count; bool format_default; uint64_t *modifiers; unsigned int modifier_count; struct drm_crtc *crtc; struct drm_framebuffer *fb; struct drm_framebuffer *old_fb; const struct drm_plane_funcs *funcs; struct drm_object_properties properties; enum drm_plane_type type; unsigned int index; const struct drm_plane_helper_funcs *helper_private; struct drm_plane_state *state; struct drm_property *alpha_property; struct drm_property *zpos_property; struct drm_property *rotation_property; struct drm_property *blend_mode_property; struct drm_property *color_encoding_property; struct drm_property *color_range_property; struct drm_property *scaling_filter_property; }; struct drm_plane_funcs { int (*update_plane)(struct drm_plane *, struct drm_crtc *, struct drm_framebuffer *, int, int, unsigned int, unsigned int, uint32_t, uint32_t, uint32_t, uint32_t, struct drm_modeset_acquire_ctx *); int (*disable_plane)(struct drm_plane *, struct drm_modeset_acquire_ctx *); void (*destroy)(struct drm_plane *); void (*reset)(struct drm_plane *); int (*set_property)(struct drm_plane *, struct drm_property *, uint64_t); struct drm_plane_state * (*atomic_duplicate_state)(struct drm_plane *); void (*atomic_destroy_state)(struct drm_plane *, struct drm_plane_state *); int (*atomic_set_property)(struct drm_plane *, struct drm_plane_state *, struct drm_property *, uint64_t); int (*atomic_get_property)(struct drm_plane *, const struct drm_plane_state *, struct drm_property *, uint64_t *); int (*late_register)(struct drm_plane *); void (*early_unregister)(struct drm_plane *); void (*atomic_print_state)(struct drm_printer *, const struct drm_plane_state *); bool (*format_mod_supported)(struct drm_plane *, uint32_t, uint64_t); }; struct drm_rect { int x1; int y1; int x2; int y2; }; struct drm_property_blob; struct drm_plane_state { struct drm_plane *plane; struct drm_crtc *crtc; struct drm_framebuffer *fb; struct dma_fence *fence; int32_t crtc_x; int32_t crtc_y; uint32_t crtc_w; uint32_t crtc_h; uint32_t src_x; uint32_t src_y; uint32_t src_h; uint32_t src_w; u16 alpha; uint16_t pixel_blend_mode; unsigned int rotation; unsigned int zpos; unsigned int normalized_zpos; enum drm_color_encoding color_encoding; enum drm_color_range color_range; struct drm_property_blob *fb_damage_clips; bool ignore_damage_clips; struct drm_rect src; struct drm_rect dst; bool visible; enum drm_scaling_filter scaling_filter; struct drm_crtc_commit *commit; struct drm_atomic_state *state; }; struct dma_fence_ops; struct dma_fence { spinlock_t *lock; const struct dma_fence_ops *ops; union { struct list_head cb_list; ktime_t timestamp; struct callback_head rcu; }; u64 context; u64 seqno; unsigned long flags; struct kref refcount; int error; }; struct dma_fence_ops { bool use_64bit_seqno; const char * (*get_driver_name)(struct dma_fence *); const char * (*get_timeline_name)(struct dma_fence *); bool (*enable_signaling)(struct dma_fence *); bool (*signaled)(struct dma_fence *); long (*wait)(struct dma_fence *, bool, long); void (*release)(struct dma_fence *); void (*fence_value_str)(struct dma_fence *, char *, int); void (*timeline_value_str)(struct dma_fence *, char *, int); void (*set_deadline)(struct dma_fence *, ktime_t); }; struct drm_property_blob { struct drm_mode_object base; struct drm_device *dev; struct list_head head_global; struct list_head head_file; size_t length; void *data; }; struct drm_plane_helper_funcs { int (*prepare_fb)(struct drm_plane *, struct drm_plane_state *); void (*cleanup_fb)(struct drm_plane *, struct drm_plane_state *); int (*begin_fb_access)(struct drm_plane *, struct drm_plane_state *); void (*end_fb_access)(struct drm_plane *, struct drm_plane_state *); int (*atomic_check)(struct drm_plane *, struct drm_atomic_state *); void (*atomic_update)(struct drm_plane *, struct drm_atomic_state *); void (*atomic_enable)(struct drm_plane *, struct drm_atomic_state *); void (*atomic_disable)(struct drm_plane *, struct drm_atomic_state *); int (*atomic_async_check)(struct drm_plane *, struct drm_atomic_state *); void (*atomic_async_update)(struct drm_plane *, struct drm_atomic_state *); }; struct __drm_crtcs_state { struct drm_crtc *ptr; struct drm_crtc_state *state; struct drm_crtc_state *old_state; struct drm_crtc_state *new_state; struct drm_crtc_commit *commit; s32 __attribute__((btf_type_tag("user"))) *out_fence_ptr; u64 last_vblank_count; }; struct drm_crtc_state { struct drm_crtc *crtc; bool enable; bool active; bool planes_changed: 1; bool mode_changed: 1; bool active_changed: 1; bool connectors_changed: 1; bool zpos_changed: 1; bool color_mgmt_changed: 1; bool no_vblank: 1; u32 plane_mask; u32 connector_mask; u32 encoder_mask; struct drm_display_mode adjusted_mode; struct drm_display_mode mode; struct drm_property_blob *mode_blob; struct drm_property_blob *degamma_lut; struct drm_property_blob *ctm; struct drm_property_blob *gamma_lut; u32 target_vblank; bool async_flip; bool vrr_enabled; bool self_refresh_active; enum drm_scaling_filter scaling_filter; struct drm_pending_vblank_event *event; struct drm_crtc_commit *commit; struct drm_atomic_state *state; }; struct drm_event { __u32 type; __u32 length; }; struct drm_event_vblank { struct drm_event base; __u64 user_data; __u32 tv_sec; __u32 tv_usec; __u32 sequence; __u32 crtc_id; }; struct drm_event_crtc_sequence { struct drm_event base; __u64 user_data; __s64 time_ns; __u64 sequence; }; struct drm_pending_event { struct completion *completion; void (*completion_release)(struct completion *); struct drm_event *event; struct dma_fence *fence; struct drm_file *file_priv; struct list_head link; struct list_head pending_link; }; struct drm_pending_vblank_event { struct drm_pending_event base; unsigned int pipe; u64 sequence; union { struct drm_event base; struct drm_event_vblank vbl; struct drm_event_crtc_sequence seq; } event; }; struct drm_connector; struct drm_connector_state; struct __drm_connnectors_state { struct drm_connector *ptr; struct drm_connector_state *state; struct drm_connector_state *old_state; struct drm_connector_state *new_state; s32 __attribute__((btf_type_tag("user"))) *out_fence_ptr; }; struct drm_scrambling { bool supported; bool low_rates; }; struct drm_scdc { bool supported; bool read_request; struct drm_scrambling scrambling; }; struct drm_hdmi_dsc_cap { bool v_1p2; bool native_420; bool all_bpp; u8 bpc_supported; u8 max_slices; int clk_per_slice; u8 max_lanes; u8 max_frl_rate_per_lane; u8 total_chunk_kbytes; }; struct drm_hdmi_info { struct drm_scdc scdc; unsigned long y420_vdb_modes[4]; unsigned long y420_cmdb_modes[4]; u8 y420_dc_modes; u8 max_frl_rate_per_lane; u8 max_lanes; struct drm_hdmi_dsc_cap dsc_cap; }; struct drm_monitor_range_info { u16 min_vfreq; u16 max_vfreq; }; struct drm_luminance_range_info { u32 min_luminance; u32 max_luminance; }; struct drm_display_info { unsigned int width_mm; unsigned int height_mm; unsigned int bpc; enum subpixel_order subpixel_order; int panel_orientation; u32 color_formats; const u32 *bus_formats; unsigned int num_bus_formats; u32 bus_flags; int max_tmds_clock; bool dvi_dual; bool is_hdmi; bool has_audio; bool has_hdmi_infoframe; bool rgb_quant_range_selectable; u8 edid_hdmi_rgb444_dc_modes; u8 edid_hdmi_ycbcr444_dc_modes; u8 cea_rev; struct drm_hdmi_info hdmi; bool non_desktop; struct drm_monitor_range_info monitor_range; struct drm_luminance_range_info luminance_range; u8 mso_stream_count; u8 mso_pixel_overlap; u32 max_dsc_bpp; u8 *vics; int vics_len; u32 quirks; }; struct drm_privacy_screen; struct drm_connector_tv_margins { unsigned int bottom; unsigned int left; unsigned int right; unsigned int top; }; struct drm_cmdline_mode { char name[32]; bool specified; bool refresh_specified; bool bpp_specified; unsigned int pixel_clock; int xres; int yres; int bpp; int refresh; bool rb; bool interlace; bool cvt; bool margins; enum drm_connector_force force; unsigned int rotation_reflection; enum drm_panel_orientation panel_orientation; struct drm_connector_tv_margins tv_margins; enum drm_connector_tv_mode tv_mode; bool tv_mode_specified; }; struct hdr_static_metadata { __u8 eotf; __u8 metadata_type; __u16 max_cll; __u16 max_fall; __u16 min_cll; }; struct hdr_sink_metadata { __u32 metadata_type; union { struct hdr_static_metadata hdmi_type1; }; }; struct drm_connector_funcs; struct drm_connector_helper_funcs; struct drm_edid; struct drm_encoder; struct drm_tile_group; struct drm_connector { struct drm_device *dev; struct device *kdev; struct device_attribute *attr; struct fwnode_handle *fwnode; struct list_head head; struct list_head global_connector_list_entry; struct drm_mode_object base; char *name; struct mutex mutex; unsigned int index; int connector_type; int connector_type_id; bool interlace_allowed; bool doublescan_allowed; bool stereo_allowed; bool ycbcr_420_allowed; enum drm_connector_registration_state registration_state; struct list_head modes; enum drm_connector_status status; struct list_head probed_modes; struct drm_display_info display_info; const struct drm_connector_funcs *funcs; struct drm_property_blob *edid_blob_ptr; struct drm_object_properties properties; struct drm_property *scaling_mode_property; struct drm_property *vrr_capable_property; struct drm_property *colorspace_property; struct drm_property_blob *path_blob_ptr; struct drm_property *max_bpc_property; struct drm_privacy_screen *privacy_screen; struct notifier_block privacy_screen_notifier; struct drm_property *privacy_screen_sw_state_property; struct drm_property *privacy_screen_hw_state_property; uint8_t polled; int dpms; const struct drm_connector_helper_funcs *helper_private; struct drm_cmdline_mode cmdline_mode; enum drm_connector_force force; const struct drm_edid *edid_override; struct mutex edid_override_mutex; u64 epoch_counter; u32 possible_encoders; struct drm_encoder *encoder; uint8_t eld[128]; bool latency_present[2]; int video_latency[2]; int audio_latency[2]; struct i2c_adapter *ddc; int null_edid_counter; unsigned int bad_edid_counter; bool edid_corrupt; u8 real_edid_checksum; struct dentry *debugfs_entry; struct drm_connector_state *state; struct drm_property_blob *tile_blob_ptr; bool has_tile; struct drm_tile_group *tile_group; bool tile_is_single_monitor; uint8_t num_h_tile; uint8_t num_v_tile; uint8_t tile_h_loc; uint8_t tile_v_loc; uint16_t tile_h_size; uint16_t tile_v_size; struct llist_node free_node; struct hdr_sink_metadata hdr_sink_metadata; }; struct drm_connector_funcs { int (*dpms)(struct drm_connector *, int); void (*reset)(struct drm_connector *); enum drm_connector_status (*detect)(struct drm_connector *, bool); void (*force)(struct drm_connector *); int (*fill_modes)(struct drm_connector *, uint32_t, uint32_t); int (*set_property)(struct drm_connector *, struct drm_property *, uint64_t); int (*late_register)(struct drm_connector *); void (*early_unregister)(struct drm_connector *); void (*destroy)(struct drm_connector *); struct drm_connector_state * (*atomic_duplicate_state)(struct drm_connector *); void (*atomic_destroy_state)(struct drm_connector *, struct drm_connector_state *); int (*atomic_set_property)(struct drm_connector *, struct drm_connector_state *, struct drm_property *, uint64_t); int (*atomic_get_property)(struct drm_connector *, const struct drm_connector_state *, struct drm_property *, uint64_t *); void (*atomic_print_state)(struct drm_printer *, const struct drm_connector_state *); void (*oob_hotplug_event)(struct drm_connector *); void (*debugfs_init)(struct drm_connector *, struct dentry *); }; struct drm_tv_connector_state { enum drm_mode_subconnector select_subconnector; enum drm_mode_subconnector subconnector; struct drm_connector_tv_margins margins; unsigned int legacy_mode; unsigned int mode; unsigned int brightness; unsigned int contrast; unsigned int flicker_reduction; unsigned int overscan; unsigned int saturation; unsigned int hue; }; struct drm_writeback_job; struct drm_connector_state { struct drm_connector *connector; struct drm_crtc *crtc; struct drm_encoder *best_encoder; enum drm_link_status link_status; struct drm_atomic_state *state; struct drm_crtc_commit *commit; struct drm_tv_connector_state tv; bool self_refresh_aware; enum hdmi_picture_aspect picture_aspect_ratio; unsigned int content_type; unsigned int hdcp_content_type; unsigned int scaling_mode; unsigned int content_protection; enum drm_colorspace colorspace; struct drm_writeback_job *writeback_job; u8 max_requested_bpc; u8 max_bpc; enum drm_privacy_screen_status privacy_screen_sw_state; struct drm_property_blob *hdr_output_metadata; }; struct drm_encoder_funcs; struct drm_encoder_helper_funcs; struct drm_encoder { struct drm_device *dev; struct list_head head; struct drm_mode_object base; char *name; int encoder_type; unsigned int index; uint32_t possible_crtcs; uint32_t possible_clones; struct drm_crtc *crtc; struct list_head bridge_chain; const struct drm_encoder_funcs *funcs; const struct drm_encoder_helper_funcs *helper_private; }; struct drm_encoder_funcs { void (*reset)(struct drm_encoder *); void (*destroy)(struct drm_encoder *); int (*late_register)(struct drm_encoder *); void (*early_unregister)(struct drm_encoder *); }; struct drm_encoder_helper_funcs { void (*dpms)(struct drm_encoder *, int); enum drm_mode_status (*mode_valid)(struct drm_encoder *, const struct drm_display_mode *); bool (*mode_fixup)(struct drm_encoder *, const struct drm_display_mode *, struct drm_display_mode *); void (*prepare)(struct drm_encoder *); void (*commit)(struct drm_encoder *); void (*mode_set)(struct drm_encoder *, struct drm_display_mode *, struct drm_display_mode *); void (*atomic_mode_set)(struct drm_encoder *, struct drm_crtc_state *, struct drm_connector_state *); enum drm_connector_status (*detect)(struct drm_encoder *, struct drm_connector *); void (*atomic_disable)(struct drm_encoder *, struct drm_atomic_state *); void (*atomic_enable)(struct drm_encoder *, struct drm_atomic_state *); void (*disable)(struct drm_encoder *); void (*enable)(struct drm_encoder *); int (*atomic_check)(struct drm_encoder *, struct drm_crtc_state *, struct drm_connector_state *); }; struct drm_writeback_connector; struct drm_writeback_job { struct drm_writeback_connector *connector; bool prepared; struct work_struct cleanup_work; struct list_head list_entry; struct drm_framebuffer *fb; struct dma_fence *out_fence; void *priv; }; struct drm_writeback_connector { struct drm_connector base; struct drm_encoder encoder; struct drm_property_blob *pixel_formats_blob_ptr; spinlock_t job_lock; struct list_head job_queue; unsigned int fence_context; spinlock_t fence_lock; unsigned long fence_seqno; char timeline_name[32]; }; struct drm_connector_helper_funcs { int (*get_modes)(struct drm_connector *); int (*detect_ctx)(struct drm_connector *, struct drm_modeset_acquire_ctx *, bool); enum drm_mode_status (*mode_valid)(struct drm_connector *, struct drm_display_mode *); int (*mode_valid_ctx)(struct drm_connector *, struct drm_display_mode *, struct drm_modeset_acquire_ctx *, enum drm_mode_status *); struct drm_encoder * (*best_encoder)(struct drm_connector *); struct drm_encoder * (*atomic_best_encoder)(struct drm_connector *, struct drm_atomic_state *); int (*atomic_check)(struct drm_connector *, struct drm_atomic_state *); void (*atomic_commit)(struct drm_connector *, struct drm_atomic_state *); int (*prepare_writeback_job)(struct drm_writeback_connector *, struct drm_writeback_job *); void (*cleanup_writeback_job)(struct drm_writeback_connector *, struct drm_writeback_job *); void (*enable_hpd)(struct drm_connector *); void (*disable_hpd)(struct drm_connector *); }; struct edid; struct drm_edid { size_t size; const struct edid *edid; }; struct drm_tile_group { struct kref refcount; struct drm_device *dev; int id; u8 group_data[8]; }; struct drm_private_obj; struct drm_private_state; struct __drm_private_objs_state { struct drm_private_obj *ptr; struct drm_private_state *state; struct drm_private_state *old_state; struct drm_private_state *new_state; }; struct drm_private_state_funcs; struct drm_private_obj { struct list_head head; struct drm_modeset_lock lock; struct drm_private_state *state; const struct drm_private_state_funcs *funcs; }; struct drm_private_state { struct drm_atomic_state *state; struct drm_private_obj *obj; }; struct drm_private_state_funcs { struct drm_private_state * (*atomic_duplicate_state)(struct drm_private_obj *); void (*atomic_destroy_state)(struct drm_private_obj *, struct drm_private_state *); void (*atomic_print_state)(struct drm_printer *, const struct drm_private_state *); }; struct drm_vma_offset_manager { rwlock_t vm_lock; struct drm_mm vm_addr_space_mm; }; struct drm_mode_set; struct drm_crtc_funcs { void (*reset)(struct drm_crtc *); int (*cursor_set)(struct drm_crtc *, struct drm_file *, uint32_t, uint32_t, uint32_t); int (*cursor_set2)(struct drm_crtc *, struct drm_file *, uint32_t, uint32_t, uint32_t, int32_t, int32_t); int (*cursor_move)(struct drm_crtc *, int, int); int (*gamma_set)(struct drm_crtc *, u16 *, u16 *, u16 *, uint32_t, struct drm_modeset_acquire_ctx *); void (*destroy)(struct drm_crtc *); int (*set_config)(struct drm_mode_set *, struct drm_modeset_acquire_ctx *); int (*page_flip)(struct drm_crtc *, struct drm_framebuffer *, struct drm_pending_vblank_event *, uint32_t, struct drm_modeset_acquire_ctx *); int (*page_flip_target)(struct drm_crtc *, struct drm_framebuffer *, struct drm_pending_vblank_event *, uint32_t, uint32_t, struct drm_modeset_acquire_ctx *); int (*set_property)(struct drm_crtc *, struct drm_property *, uint64_t); struct drm_crtc_state * (*atomic_duplicate_state)(struct drm_crtc *); void (*atomic_destroy_state)(struct drm_crtc *, struct drm_crtc_state *); int (*atomic_set_property)(struct drm_crtc *, struct drm_crtc_state *, struct drm_property *, uint64_t); int (*atomic_get_property)(struct drm_crtc *, const struct drm_crtc_state *, struct drm_property *, uint64_t *); int (*late_register)(struct drm_crtc *); void (*early_unregister)(struct drm_crtc *); int (*set_crc_source)(struct drm_crtc *, const char *); int (*verify_crc_source)(struct drm_crtc *, const char *, size_t *); const char * const * (*get_crc_sources)(struct drm_crtc *, size_t *); void (*atomic_print_state)(struct drm_printer *, const struct drm_crtc_state *); u32 (*get_vblank_counter)(struct drm_crtc *); int (*enable_vblank)(struct drm_crtc *); void (*disable_vblank)(struct drm_crtc *); bool (*get_vblank_timestamp)(struct drm_crtc *, int *, ktime_t *, bool); }; struct drm_mode_set { struct drm_framebuffer *fb; struct drm_crtc *crtc; struct drm_display_mode *mode; uint32_t x; uint32_t y; struct drm_connector **connectors; size_t num_connectors; }; enum mode_set_atomic { LEAVE_ATOMIC_MODE_SET = 0, ENTER_ATOMIC_MODE_SET = 1, }; struct drm_crtc_helper_funcs { void (*dpms)(struct drm_crtc *, int); void (*prepare)(struct drm_crtc *); void (*commit)(struct drm_crtc *); enum drm_mode_status (*mode_valid)(struct drm_crtc *, const struct drm_display_mode *); bool (*mode_fixup)(struct drm_crtc *, const struct drm_display_mode *, struct drm_display_mode *); int (*mode_set)(struct drm_crtc *, struct drm_display_mode *, struct drm_display_mode *, int, int, struct drm_framebuffer *); void (*mode_set_nofb)(struct drm_crtc *); int (*mode_set_base)(struct drm_crtc *, int, int, struct drm_framebuffer *); int (*mode_set_base_atomic)(struct drm_crtc *, struct drm_framebuffer *, int, int, enum mode_set_atomic); void (*disable)(struct drm_crtc *); int (*atomic_check)(struct drm_crtc *, struct drm_atomic_state *); void (*atomic_begin)(struct drm_crtc *, struct drm_atomic_state *); void (*atomic_flush)(struct drm_crtc *, struct drm_atomic_state *); void (*atomic_enable)(struct drm_crtc *, struct drm_atomic_state *); void (*atomic_disable)(struct drm_crtc *, struct drm_atomic_state *); bool (*get_scanout_position)(struct drm_crtc *, bool, int *, int *, ktime_t *, ktime_t *, const struct drm_display_mode *); }; struct drm_crtc_crc_entry { bool has_frame_counter; uint32_t frame; uint32_t crcs[10]; }; struct drm_bridge_timings; struct drm_bridge_funcs; struct drm_bridge { struct drm_private_obj base; struct drm_device *dev; struct drm_encoder *encoder; struct list_head chain_node; struct device_node *of_node; struct list_head list; const struct drm_bridge_timings *timings; const struct drm_bridge_funcs *funcs; void *driver_private; enum drm_bridge_ops ops; int type; bool interlace_allowed; bool pre_enable_prev_first; struct i2c_adapter *ddc; struct mutex hpd_mutex; void (*hpd_cb)(void *, enum drm_connector_status); void *hpd_data; }; struct drm_bridge_timings { u32 input_bus_flags; u32 setup_time_ps; u32 hold_time_ps; bool dual_link; }; struct drm_bridge_state; struct drm_bridge_funcs { int (*attach)(struct drm_bridge *, enum drm_bridge_attach_flags); void (*detach)(struct drm_bridge *); enum drm_mode_status (*mode_valid)(struct drm_bridge *, const struct drm_display_info *, const struct drm_display_mode *); bool (*mode_fixup)(struct drm_bridge *, const struct drm_display_mode *, struct drm_display_mode *); void (*disable)(struct drm_bridge *); void (*post_disable)(struct drm_bridge *); void (*mode_set)(struct drm_bridge *, const struct drm_display_mode *, const struct drm_display_mode *); void (*pre_enable)(struct drm_bridge *); void (*enable)(struct drm_bridge *); void (*atomic_pre_enable)(struct drm_bridge *, struct drm_bridge_state *); void (*atomic_enable)(struct drm_bridge *, struct drm_bridge_state *); void (*atomic_disable)(struct drm_bridge *, struct drm_bridge_state *); void (*atomic_post_disable)(struct drm_bridge *, struct drm_bridge_state *); struct drm_bridge_state * (*atomic_duplicate_state)(struct drm_bridge *); void (*atomic_destroy_state)(struct drm_bridge *, struct drm_bridge_state *); u32 * (*atomic_get_output_bus_fmts)(struct drm_bridge *, struct drm_bridge_state *, struct drm_crtc_state *, struct drm_connector_state *, unsigned int *); u32 * (*atomic_get_input_bus_fmts)(struct drm_bridge *, struct drm_bridge_state *, struct drm_crtc_state *, struct drm_connector_state *, u32, unsigned int *); int (*atomic_check)(struct drm_bridge *, struct drm_bridge_state *, struct drm_crtc_state *, struct drm_connector_state *); struct drm_bridge_state * (*atomic_reset)(struct drm_bridge *); enum drm_connector_status (*detect)(struct drm_bridge *); int (*get_modes)(struct drm_bridge *, struct drm_connector *); const struct drm_edid * (*edid_read)(struct drm_bridge *, struct drm_connector *); struct edid * (*get_edid)(struct drm_bridge *, struct drm_connector *); void (*hpd_notify)(struct drm_bridge *, enum drm_connector_status); void (*hpd_enable)(struct drm_bridge *); void (*hpd_disable)(struct drm_bridge *); void (*debugfs_init)(struct drm_bridge *, struct dentry *); }; struct drm_bus_cfg { u32 format; u32 flags; }; struct drm_bridge_state { struct drm_private_state base; struct drm_bridge *bridge; struct drm_bus_cfg input_bus_cfg; struct drm_bus_cfg output_bus_cfg; }; struct drm_connector_list_iter { struct drm_device *dev; struct drm_connector *conn; }; struct drm_mode_rect { __s32 x1; __s32 y1; __s32 x2; __s32 y2; }; struct drm_debugfs_entry { struct drm_device *dev; struct drm_debugfs_info file; struct list_head list; }; struct drm_mode_modeinfo { __u32 clock; __u16 hdisplay; __u16 hsync_start; __u16 hsync_end; __u16 htotal; __u16 hskew; __u16 vdisplay; __u16 vsync_start; __u16 vsync_end; __u16 vtotal; __u16 vscan; __u32 vrefresh; __u32 flags; __u32 type; char name[32]; }; struct dma_fence_cb; typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *); struct dma_fence_cb { struct list_head node; dma_fence_func_t func; }; struct sync_file { struct file *file; char user_name[32]; struct list_head sync_file_list; wait_queue_head_t wq; unsigned long flags; struct dma_fence *fence; struct dma_fence_cb cb; }; struct drm_out_fence_state { s32 __attribute__((btf_type_tag("user"))) *out_fence_ptr; struct sync_file *sync_file; int fd; }; struct drm_mode_atomic { __u32 flags; __u32 count_objs; __u64 objs_ptr; __u64 count_props_ptr; __u64 props_ptr; __u64 prop_values_ptr; __u64 reserved; __u64 user_data; }; struct drm_auth { drm_magic_t magic; }; struct drm_prop_enum_list { int type; const char *name; }; struct est_timings { u8 t1; u8 t2; u8 mfg_rsvd; }; struct std_timing { u8 hsize; u8 vfreq_aspect; }; struct detailed_pixel_timing { u8 hactive_lo; u8 hblank_lo; u8 hactive_hblank_hi; u8 vactive_lo; u8 vblank_lo; u8 vactive_vblank_hi; u8 hsync_offset_lo; u8 hsync_pulse_width_lo; u8 vsync_offset_pulse_width_lo; u8 hsync_vsync_offset_pulse_width_hi; u8 width_mm_lo; u8 height_mm_lo; u8 width_height_mm_hi; u8 hborder; u8 vborder; u8 misc; }; struct detailed_data_string { u8 str[13]; }; struct detailed_data_monitor_range { u8 min_vfreq; u8 max_vfreq; u8 min_hfreq_khz; u8 max_hfreq_khz; u8 pixel_clock_mhz; u8 flags; union { struct { u8 reserved; u8 hfreq_start_khz; u8 c; __le16 m; u8 k; u8 j; } __attribute__((packed)) gtf2; struct { u8 version; u8 data1; u8 data2; u8 supported_aspects; u8 flags; u8 supported_scalings; u8 preferred_refresh; } cvt; } formula; }; struct detailed_data_wpindex { u8 white_yx_lo; u8 white_x_hi; u8 white_y_hi; u8 gamma; }; struct cvt_timing { u8 code[3]; }; struct detailed_non_pixel { u8 pad1; u8 type; u8 pad2; union { struct detailed_data_string str; struct detailed_data_monitor_range range; struct detailed_data_wpindex color; struct std_timing timings[6]; struct cvt_timing cvt[4]; } data; }; struct detailed_timing { __le16 pixel_clock; union { struct detailed_pixel_timing pixel_data; struct detailed_non_pixel other_data; } data; }; struct edid { u8 header[8]; u8 mfg_id[2]; u8 prod_code[2]; u32 serial; u8 mfg_week; u8 mfg_year; u8 version; u8 revision; u8 input; u8 width_cm; u8 height_cm; u8 gamma; u8 features; u8 red_green_lo; u8 blue_white_lo; u8 red_x; u8 red_y; u8 green_x; u8 green_y; u8 blue_x; u8 blue_y; u8 white_x; u8 white_y; struct est_timings established_timings; struct std_timing standard_timings[8]; struct detailed_timing detailed_timings[4]; u8 extensions; u8 checksum; }; struct drm_client_funcs; struct drm_client_dev { struct drm_device *dev; const char *name; struct list_head list; const struct drm_client_funcs *funcs; struct drm_file *file; struct mutex modeset_mutex; struct drm_mode_set *modesets; bool hotplug_failed; }; struct drm_client_funcs { struct module *owner; void (*unregister)(struct drm_client_dev *); int (*restore)(struct drm_client_dev *); int (*hotplug)(struct drm_client_dev *); }; struct drm_client_buffer { struct drm_client_dev *client; u32 pitch; struct drm_gem_object *gem; struct iosys_map map; struct drm_framebuffer *fb; }; struct drm_mode_fb_cmd { __u32 fb_id; __u32 width; __u32 height; __u32 pitch; __u32 bpp; __u32 depth; __u32 handle; }; struct drm_client_offset { int x; int y; }; enum drm_color_lut_tests { DRM_COLOR_LUT_EQUAL_CHANNELS = 1, DRM_COLOR_LUT_NON_DECREASING = 2, }; struct drm_color_lut { __u16 red; __u16 green; __u16 blue; __u16 reserved; }; struct drm_mode_crtc_lut { __u32 crtc_id; __u32 gamma_size; __u64 red; __u64 green; __u64 blue; }; struct drm_conn_prop_enum_list { int type; const char *name; struct ida ida; }; typedef void (*drmres_release_t)(struct drm_device *, void *); struct drm_panel_funcs; struct drm_panel { struct device *dev; struct backlight_device *backlight; const struct drm_panel_funcs *funcs; int connector_type; struct list_head list; struct list_head followers; struct mutex follower_lock; bool prepare_prev_first; bool prepared; bool enabled; }; struct drm_panel_funcs { int (*prepare)(struct drm_panel *); int (*enable)(struct drm_panel *); int (*disable)(struct drm_panel *); int (*unprepare)(struct drm_panel *); int (*get_modes)(struct drm_panel *, struct drm_connector *); enum drm_panel_orientation (*get_orientation)(struct drm_panel *); int (*get_timings)(struct drm_panel *, unsigned int, struct display_timing *); void (*debugfs_init)(struct drm_panel *, struct dentry *); }; struct drm_mode_connector_set_property { __u64 value; __u32 prop_id; __u32 connector_id; }; struct drm_mode_obj_set_property { __u64 value; __u32 prop_id; __u32 obj_id; __u32 obj_type; }; struct drm_mode_get_connector { __u64 encoders_ptr; __u64 modes_ptr; __u64 props_ptr; __u64 prop_values_ptr; __u32 count_modes; __u32 count_props; __u32 count_encoders; __u32 encoder_id; __u32 connector_id; __u32 connector_type; __u32 connector_type_id; __u32 connection; __u32 mm_width; __u32 mm_height; __u32 subpixel; __u32 pad; }; struct drm_mode_crtc { __u64 set_connectors_ptr; __u32 count_connectors; __u32 crtc_id; __u32 fb_id; __u32 x; __u32 y; __u32 gamma_size; __u32 mode_valid; struct drm_mode_modeinfo mode; }; struct displayid_block { u8 tag; u8 rev; u8 num_bytes; }; struct displayid_header { u8 rev; u8 bytes; u8 prod_id; u8 ext_count; }; struct displayid_iter { const struct drm_edid *drm_edid; const u8 *section; int length; int idx; int ext_index; u8 version; u8 primary_use; }; enum drm_minor_type { DRM_MINOR_PRIMARY = 0, DRM_MINOR_CONTROL = 1, DRM_MINOR_RENDER = 2, DRM_MINOR_ACCEL = 32, }; struct drm_mode_map_dumb { __u32 handle; __u32 pad; __u64 offset; }; struct drm_mode_destroy_dumb { __u32 handle; }; struct edid_quirk { u32 panel_id; u32 quirks; }; struct minimode { short w; short h; short r; short rb; }; struct stereo_mandatory_mode { int width; int height; int vrefresh; unsigned int flags; }; enum edid_block_status { EDID_BLOCK_OK = 0, EDID_BLOCK_READ_FAIL = 1, EDID_BLOCK_NULL = 2, EDID_BLOCK_ZERO = 3, EDID_BLOCK_HEADER_CORRUPT = 4, EDID_BLOCK_HEADER_REPAIR = 5, EDID_BLOCK_HEADER_FIXED = 6, EDID_BLOCK_CHECKSUM = 7, EDID_BLOCK_VERSION = 8, }; struct cea_db { u8 tag_length; u8 data[0]; }; struct displayid_vesa_vendor_specific_block { struct displayid_block base; u8 oui[3]; u8 data_structure_type; u8 mso; }; struct displayid_detailed_timings_1 { u8 pixel_clock[3]; u8 flags; u8 hactive[2]; u8 hblank[2]; u8 hsync[2]; u8 hsw[2]; u8 vactive[2]; u8 vblank[2]; u8 vsync[2]; u8 vsw[2]; }; struct displayid_detailed_timing_block { struct displayid_block base; struct displayid_detailed_timings_1 timings[0]; }; struct displayid_tiled_block { struct displayid_block base; u8 tile_cap; u8 topo[3]; u8 tile_size[4]; u8 tile_pixel_bezel[5]; u8 topology_id[8]; }; typedef void detailed_cb(const struct detailed_timing *, void *); struct drm_edid_iter { const struct drm_edid *drm_edid; int index; }; struct cea_db_iter { struct drm_edid_iter edid_iter; struct displayid_iter displayid_iter; const u8 *collection; int index; int end; }; struct cea_sad { u8 format; u8 channels; u8 freq; u8 byte2; }; struct detailed_mode_closure { struct drm_connector *connector; const struct drm_edid *drm_edid; bool preferred; int modes; }; typedef int read_block_fn(void *, u8 *, unsigned int, size_t); struct drm_mode_get_encoder { __u32 encoder_id; __u32 encoder_type; __u32 crtc_id; __u32 possible_crtcs; __u32 possible_clones; }; enum dma_resv_usage { DMA_RESV_USAGE_KERNEL = 0, DMA_RESV_USAGE_WRITE = 1, DMA_RESV_USAGE_READ = 2, DMA_RESV_USAGE_BOOKKEEP = 3, }; struct drm_memory_stats { u64 shared; u64 private; u64 resident; u64 purgeable; u64 active; }; struct drm_mode_rmfb_work { struct work_struct work; struct list_head fbs; }; struct drm_mode_fb_dirty_cmd { __u32 fb_id; __u32 flags; __u32 color; __u32 num_clips; __u64 clips_ptr; }; struct dma_buf_poll_cb_t { struct dma_fence_cb cb; wait_queue_head_t *poll; __poll_t active; }; struct dma_buf_ops; struct dma_buf_sysfs_entry; struct dma_buf { size_t size; struct file *file; struct list_head attachments; const struct dma_buf_ops *ops; unsigned int vmapping_counter; struct iosys_map vmap_ptr; const char *exp_name; const char *name; spinlock_t name_lock; struct module *owner; struct list_head list_node; void *priv; struct dma_resv *resv; wait_queue_head_t poll; struct dma_buf_poll_cb_t cb_in; struct dma_buf_poll_cb_t cb_out; struct dma_buf_sysfs_entry *sysfs_entry; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct dma_buf_ops { bool cache_sgt_mapping; int (*attach)(struct dma_buf *, struct dma_buf_attachment *); void (*detach)(struct dma_buf *, struct dma_buf_attachment *); int (*pin)(struct dma_buf_attachment *); void (*unpin)(struct dma_buf_attachment *); struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); void (*release)(struct dma_buf *); int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); int (*begin_cpu_access_partial)(struct dma_buf *, enum dma_data_direction, unsigned int, unsigned int); int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); int (*end_cpu_access_partial)(struct dma_buf *, enum dma_data_direction, unsigned int, unsigned int); int (*mmap)(struct dma_buf *, struct vm_area_struct *); int (*vmap)(struct dma_buf *, struct iosys_map *); void (*vunmap)(struct dma_buf *, struct iosys_map *); int (*get_flags)(struct dma_buf *, unsigned long *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct dma_buf_attach_ops; struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; struct sg_table *sgt; enum dma_data_direction dir; bool peer2peer; const struct dma_buf_attach_ops *importer_ops; void *importer_priv; void *priv; unsigned long dma_map_attrs; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct dma_buf_attach_ops { bool allow_peer2peer; void (*move_notify)(struct dma_buf_attachment *); u64 android_kabi_reserved1; }; struct dma_buf_sysfs_entry { union { struct kobject kobj; struct work_struct sysfs_add_work; }; struct dma_buf *dmabuf; }; struct drm_gem_close { __u32 handle; __u32 pad; }; struct drm_gem_flink { __u32 handle; __u32 name; }; struct drm_gem_open { __u32 name; __u32 handle; __u64 size; }; enum drm_stat_type { _DRM_STAT_LOCK = 0, _DRM_STAT_OPENS = 1, _DRM_STAT_CLOSES = 2, _DRM_STAT_IOCTLS = 3, _DRM_STAT_LOCKS = 4, _DRM_STAT_UNLOCKS = 5, _DRM_STAT_VALUE = 6, _DRM_STAT_BYTE = 7, _DRM_STAT_COUNT = 8, _DRM_STAT_IRQ = 9, _DRM_STAT_PRIMARY = 10, _DRM_STAT_SECONDARY = 11, _DRM_STAT_DMA = 12, _DRM_STAT_SPECIAL = 13, _DRM_STAT_MISSED = 14, }; struct drm_unique { __kernel_size_t unique_len; char __attribute__((btf_type_tag("user"))) *unique; }; struct drm_client { int idx; int auth; unsigned long pid; unsigned long uid; unsigned long magic; unsigned long iocs; }; struct drm_version { int version_major; int version_minor; int version_patchlevel; __kernel_size_t name_len; char __attribute__((btf_type_tag("user"))) *name; __kernel_size_t date_len; char __attribute__((btf_type_tag("user"))) *date; __kernel_size_t desc_len; char __attribute__((btf_type_tag("user"))) *desc; }; struct drm_stats { unsigned long count; struct { unsigned long value; enum drm_stat_type type; } data[15]; }; struct drm_set_version { int drm_di_major; int drm_di_minor; int drm_dd_major; int drm_dd_minor; }; struct drm_get_cap { __u64 capability; __u64 value; }; struct drm_set_client_cap { __u64 capability; __u64 value; }; struct drm_mode_create_lease { __u64 object_ids; __u32 object_count; __u32 flags; __u32 lessee_id; __u32 fd; }; struct drm_mode_list_lessees { __u32 count_lessees; __u32 pad; __u64 lessees_ptr; }; struct drm_mode_get_lease { __u32 count_objects; __u32 pad; __u64 objects_ptr; }; struct drm_mode_revoke_lease { __u32 lessee_id; }; struct drmres_node { struct list_head entry; drmres_release_t release; const char *name; size_t size; }; struct drmres { struct drmres_node node; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u8 data[0]; }; enum drm_mm_insert_mode { DRM_MM_INSERT_BEST = 0, DRM_MM_INSERT_LOW = 1, DRM_MM_INSERT_HIGH = 2, DRM_MM_INSERT_EVICT = 3, DRM_MM_INSERT_ONCE = 2147483648, DRM_MM_INSERT_HIGHEST = 2147483650, DRM_MM_INSERT_LOWEST = 2147483649, }; struct drm_mm_scan { struct drm_mm *mm; u64 size; u64 alignment; u64 remainder_mask; u64 range_start; u64 range_end; u64 hit_start; u64 hit_end; unsigned long color; enum drm_mm_insert_mode mode; }; struct drm_mode_card_res { __u64 fb_id_ptr; __u64 crtc_id_ptr; __u64 connector_id_ptr; __u64 encoder_id_ptr; __u32 count_fbs; __u32 count_crtcs; __u32 count_connectors; __u32 count_encoders; __u32 min_width; __u32 max_width; __u32 min_height; __u32 max_height; }; struct drm_mode_obj_get_properties { __u64 props_ptr; __u64 prop_values_ptr; __u32 count_props; __u32 obj_id; __u32 obj_type; }; struct analog_param_range { unsigned int min; unsigned int typ; unsigned int max; }; struct analog_param_field { unsigned int even; unsigned int odd; }; struct analog_parameters { unsigned int num_lines; unsigned int line_duration_ns; struct analog_param_range hact_ns; struct analog_param_range hfp_ns; struct analog_param_range hslen_ns; struct analog_param_range hbp_ns; struct analog_param_range hblk_ns; unsigned int bt601_hfp; struct analog_param_field vfp_lines; struct analog_param_field vslen_lines; struct analog_param_field vbp_lines; }; struct drm_named_mode { const char *name; unsigned int pixel_clock_khz; unsigned int xres; unsigned int yres; unsigned int flags; unsigned int tv_mode; }; enum drm_mode_analog { DRM_MODE_ANALOG_NTSC = 0, DRM_MODE_ANALOG_PAL = 1, }; enum drm_bus_flags { DRM_BUS_FLAG_DE_LOW = 1, DRM_BUS_FLAG_DE_HIGH = 2, DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE = 4, DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE = 8, DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE = 8, DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE = 4, DRM_BUS_FLAG_DATA_MSB_TO_LSB = 16, DRM_BUS_FLAG_DATA_LSB_TO_MSB = 32, DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE = 64, DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = 128, DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = 128, DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = 64, DRM_BUS_FLAG_SHARP_SIGNALS = 256, }; struct drm_format_modifier { __u64 formats; __u32 offset; __u32 pad; __u64 modifier; }; struct drm_format_modifier_blob { __u32 version; __u32 flags; __u32 count_formats; __u32 formats_offset; __u32 count_modifiers; __u32 modifiers_offset; }; struct drm_mode_cursor2 { __u32 flags; __u32 crtc_id; __s32 x; __s32 y; __u32 width; __u32 height; __u32 handle; __s32 hot_x; __s32 hot_y; }; struct drm_mode_get_plane_res { __u64 plane_id_ptr; __u32 count_planes; }; struct drm_mode_get_plane { __u32 plane_id; __u32 crtc_id; __u32 fb_id; __u32 possible_crtcs; __u32 gamma_size; __u32 count_format_types; __u64 format_type_ptr; }; struct drm_mode_set_plane { __u32 plane_id; __u32 crtc_id; __u32 fb_id; __u32 flags; __s32 crtc_x; __s32 crtc_y; __u32 crtc_w; __u32 crtc_h; __u32 src_x; __u32 src_y; __u32 src_h; __u32 src_w; }; struct drm_mode_cursor { __u32 flags; __u32 crtc_id; __s32 x; __s32 y; __u32 width; __u32 height; __u32 handle; }; struct drm_mode_crtc_page_flip_target { __u32 crtc_id; __u32 fb_id; __u32 flags; __u32 sequence; __u64 user_data; }; struct drm_prime_member { struct dma_buf *dma_buf; uint32_t handle; struct rb_node dmabuf_rb; struct rb_node handle_rb; }; struct dma_buf_export_info { const char *exp_name; struct module *owner; const struct dma_buf_ops *ops; size_t size; int flags; struct dma_resv *resv; void *priv; unsigned long dma_map_attrs; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct drm_prime_handle { __u32 handle; __u32 flags; __s32 fd; }; struct drm_print_iterator { void *data; ssize_t start; ssize_t remain; ssize_t offset; }; struct drm_property_enum { uint64_t value; struct list_head head; char name[32]; }; struct drm_mode_get_property { __u64 values_ptr; __u64 enum_blob_ptr; __u32 prop_id; __u32 flags; char name[32]; __u32 count_values; __u32 count_enum_blobs; }; struct drm_mode_property_enum { __u64 value; char name[32]; }; struct drm_mode_get_blob { __u32 blob_id; __u32 length; __u64 data; }; struct drm_mode_create_blob { __u64 data; __u32 length; __u32 blob_id; }; struct drm_mode_destroy_blob { __u32 blob_id; }; enum dma_fence_flag_bits { DMA_FENCE_FLAG_SIGNALED_BIT = 0, DMA_FENCE_FLAG_TIMESTAMP_BIT = 1, DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2, DMA_FENCE_FLAG_USER_BITS = 3, }; struct syncobj_wait_entry { struct list_head node; struct task_struct *task; struct dma_fence *fence; struct dma_fence_cb fence_cb; u64 point; }; struct drm_syncobj; struct syncobj_eventfd_entry { struct list_head node; struct dma_fence *fence; struct dma_fence_cb fence_cb; struct drm_syncobj *syncobj; struct eventfd_ctx *ev_fd_ctx; u64 point; u32 flags; }; struct drm_syncobj { struct kref refcount; struct dma_fence __attribute__((btf_type_tag("rcu"))) *fence; struct list_head cb_list; struct list_head ev_fd_list; spinlock_t lock; struct file *file; }; struct dma_fence_chain { struct dma_fence base; struct dma_fence __attribute__((btf_type_tag("rcu"))) *prev; u64 prev_seqno; struct dma_fence *fence; union { struct dma_fence_cb cb; struct irq_work work; }; spinlock_t lock; }; struct drm_syncobj_transfer { __u32 src_handle; __u32 dst_handle; __u64 src_point; __u64 dst_point; __u32 flags; __u32 pad; }; struct dma_fence_unwrap { struct dma_fence *chain; struct dma_fence *array; unsigned int index; }; struct drm_syncobj_wait { __u64 handles; __s64 timeout_nsec; __u32 count_handles; __u32 flags; __u32 first_signaled; __u32 pad; }; struct drm_syncobj_timeline_wait { __u64 handles; __u64 points; __s64 timeout_nsec; __u32 count_handles; __u32 flags; __u32 first_signaled; __u32 pad; }; struct drm_syncobj_create { __u32 handle; __u32 flags; }; struct drm_syncobj_destroy { __u32 handle; __u32 pad; }; struct drm_syncobj_handle { __u32 handle; __u32 flags; __s32 fd; __u32 pad; }; struct drm_syncobj_eventfd { __u32 handle; __u32 flags; __u64 point; __s32 fd; __u32 pad; }; struct drm_syncobj_array { __u64 handles; __u32 count_handles; __u32 pad; }; struct drm_syncobj_timeline_array { __u64 handles; __u64 points; __u32 count_handles; __u32 flags; }; struct class_attribute { struct attribute attr; ssize_t (*show)(const struct class *, const struct class_attribute *, char *); ssize_t (*store)(const struct class *, const struct class_attribute *, const char *, size_t); }; struct class_attribute_string { struct class_attribute attr; char *str; }; struct component_ops { int (*bind)(struct device *, struct device *, void *); void (*unbind)(struct device *, struct device *, void *); }; typedef void (*btf_trace_drm_vblank_event)(void *, int, unsigned int, ktime_t, bool); typedef void (*btf_trace_drm_vblank_event_queued)(void *, struct drm_file *, int, unsigned int); typedef void (*btf_trace_drm_vblank_event_delivered)(void *, struct drm_file *, int, unsigned int); struct trace_event_raw_drm_vblank_event { struct trace_entry ent; int crtc; unsigned int seq; ktime_t time; bool high_prec; char __data[0]; }; struct trace_event_raw_drm_vblank_event_queued { struct trace_entry ent; struct drm_file *file; int crtc; unsigned int seq; char __data[0]; }; struct trace_event_raw_drm_vblank_event_delivered { struct trace_entry ent; struct drm_file *file; int crtc; unsigned int seq; char __data[0]; }; struct trace_event_data_offsets_drm_vblank_event {}; struct trace_event_data_offsets_drm_vblank_event_queued {}; struct trace_event_data_offsets_drm_vblank_event_delivered {}; enum drm_vblank_seq_type { _DRM_VBLANK_ABSOLUTE = 0, _DRM_VBLANK_RELATIVE = 1, _DRM_VBLANK_HIGH_CRTC_MASK = 62, _DRM_VBLANK_EVENT = 67108864, _DRM_VBLANK_FLIP = 134217728, _DRM_VBLANK_NEXTONMISS = 268435456, _DRM_VBLANK_SECONDARY = 536870912, _DRM_VBLANK_SIGNAL = 1073741824, }; struct drm_wait_vblank_request { enum drm_vblank_seq_type type; unsigned int sequence; unsigned long signal; }; struct drm_wait_vblank_reply { enum drm_vblank_seq_type type; unsigned int sequence; long tval_sec; long tval_usec; }; union drm_wait_vblank { struct drm_wait_vblank_request request; struct drm_wait_vblank_reply reply; }; typedef bool (*drm_vblank_get_scanout_position_func)(struct drm_crtc *, bool, int *, int *, ktime_t *, ktime_t *, const struct drm_display_mode *); struct drm_modeset_ctl { __u32 crtc; __u32 cmd; }; struct drm_crtc_get_sequence { __u32 crtc_id; __u32 active; __u64 sequence; __s64 sequence_ns; }; struct drm_crtc_queue_sequence { __u32 crtc_id; __u32 flags; __u64 sequence; __u64 user_data; }; struct drm_vblank_work { struct kthread_work base; struct drm_vblank_crtc *vblank; u64 count; int cancelling; struct list_head node; }; struct drm_vma_offset_file { struct rb_node vm_rb; struct drm_file *vm_tag; unsigned long vm_count; }; struct drm_gpuva_op; struct drm_gpuva_fn_ops { struct drm_gpuva_op * (*op_alloc)(); void (*op_free)(struct drm_gpuva_op *); int (*sm_step_map)(struct drm_gpuva_op *, void *); int (*sm_step_remap)(struct drm_gpuva_op *, void *); int (*sm_step_unmap)(struct drm_gpuva_op *, void *); }; struct drm_gpuva_op_map { struct { u64 addr; u64 range; } va; struct { u64 offset; struct drm_gem_object *obj; } gem; }; struct drm_gpuva_op_unmap; struct drm_gpuva_op_remap { struct drm_gpuva_op_map *prev; struct drm_gpuva_op_map *next; struct drm_gpuva_op_unmap *unmap; }; struct drm_gpuva; struct drm_gpuva_op_unmap { struct drm_gpuva *va; bool keep; }; struct drm_gpuva_op_prefetch { struct drm_gpuva *va; }; enum drm_gpuva_op_type { DRM_GPUVA_OP_MAP = 0, DRM_GPUVA_OP_REMAP = 1, DRM_GPUVA_OP_UNMAP = 2, DRM_GPUVA_OP_PREFETCH = 3, }; struct drm_gpuva_op { struct list_head entry; enum drm_gpuva_op_type op; union { struct drm_gpuva_op_map map; struct drm_gpuva_op_remap remap; struct drm_gpuva_op_unmap unmap; struct drm_gpuva_op_prefetch prefetch; }; }; enum drm_gpuva_flags { DRM_GPUVA_INVALIDATED = 1, DRM_GPUVA_SPARSE = 2, DRM_GPUVA_USERBITS = 4, }; struct drm_gpuva_manager; struct drm_gpuva { struct drm_gpuva_manager *mgr; enum drm_gpuva_flags flags; struct { u64 addr; u64 range; } va; struct { u64 offset; struct drm_gem_object *obj; struct list_head entry; } gem; struct { struct rb_node node; struct list_head entry; u64 __subtree_last; } rb; }; struct drm_gpuva_manager { const char *name; u64 mm_start; u64 mm_range; struct { struct rb_root_cached tree; struct list_head list; } rb; struct drm_gpuva kernel_alloc_node; const struct drm_gpuva_fn_ops *ops; }; struct drm_gpuva_ops { struct list_head list; }; typedef int drm_ioctl_compat_t(struct file *, unsigned int, unsigned long); struct drm_version_32 { int version_major; int version_minor; int version_patchlevel; u32 name_len; u32 name; u32 date_len; u32 date; u32 desc_len; u32 desc; }; typedef struct drm_version_32 drm_version32_t; struct drm_unique32 { u32 unique_len; u32 unique; }; typedef struct drm_unique32 drm_unique32_t; struct drm_client32 { int idx; int auth; u32 pid; u32 uid; u32 magic; u32 iocs; }; typedef struct drm_client32 drm_client32_t; struct drm_stats32 { u32 count; struct { u32 value; enum drm_stat_type type; } data[15]; }; typedef struct drm_stats32 drm_stats32_t; struct drm_wait_vblank_request32 { enum drm_vblank_seq_type type; unsigned int sequence; u32 signal; }; struct drm_wait_vblank_reply32 { enum drm_vblank_seq_type type; unsigned int sequence; s32 tval_sec; s32 tval_usec; }; union drm_wait_vblank32 { struct drm_wait_vblank_request32 request; struct drm_wait_vblank_reply32 reply; }; typedef union drm_wait_vblank32 drm_wait_vblank32_t; enum { FB_BLANK_UNBLANK = 0, FB_BLANK_NORMAL = 1, FB_BLANK_VSYNC_SUSPEND = 2, FB_BLANK_HSYNC_SUSPEND = 3, FB_BLANK_POWERDOWN = 4, }; struct drm_panel_follower_funcs; struct drm_panel_follower { const struct drm_panel_follower_funcs *funcs; struct list_head list; struct drm_panel *panel; }; struct drm_panel_follower_funcs { int (*panel_prepared)(struct drm_panel_follower *); int (*panel_unpreparing)(struct drm_panel_follower *); }; enum drm_of_lvds_pixels { DRM_OF_LVDS_EVEN = 1, DRM_OF_LVDS_ODD = 2, }; enum drm_lvds_dual_link_pixels { DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 0, DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 1, }; enum mipi_dsi_pixel_format { MIPI_DSI_FMT_RGB888 = 0, MIPI_DSI_FMT_RGB666 = 1, MIPI_DSI_FMT_RGB666_PACKED = 2, MIPI_DSI_FMT_RGB565 = 3, }; struct component_master_ops { int (*bind)(struct device *); void (*unbind)(struct device *); }; struct of_endpoint { unsigned int port; unsigned int id; const struct device_node *local_node; }; struct mipi_dsi_host_ops; struct mipi_dsi_host { struct device *dev; const struct mipi_dsi_host_ops *ops; struct list_head list; }; struct mipi_dsi_device; struct mipi_dsi_msg; struct mipi_dsi_host_ops { int (*attach)(struct mipi_dsi_host *, struct mipi_dsi_device *); int (*detach)(struct mipi_dsi_host *, struct mipi_dsi_device *); ssize_t (*transfer)(struct mipi_dsi_host *, const struct mipi_dsi_msg *); }; struct drm_dsc_config; struct mipi_dsi_device { struct mipi_dsi_host *host; struct device dev; bool attached; char name[20]; unsigned int channel; unsigned int lanes; enum mipi_dsi_pixel_format format; unsigned long mode_flags; unsigned long hs_rate; unsigned long lp_rate; struct drm_dsc_config *dsc; }; struct mipi_dsi_msg { u8 channel; u8 type; u16 flags; size_t tx_len; const void *tx_buf; size_t rx_len; void *rx_buf; }; struct drm_info_list; struct drm_info_node { struct drm_minor *minor; const struct drm_info_list *info_ent; struct list_head list; struct dentry *dent; }; struct drm_info_list { const char *name; int (*show)(struct seq_file *, void *); u32 driver_features; void *data; }; struct drm_gem_shmem_object { struct drm_gem_object base; struct page **pages; unsigned int pages_use_count; int madv; struct list_head madv_list; struct sg_table *sgt; void *vaddr; unsigned int vmap_use_count; bool pages_mark_dirty_on_put: 1; bool pages_mark_accessed_on_put: 1; bool map_wc: 1; }; struct drm_bridge_connector { struct drm_connector base; struct drm_encoder *encoder; struct drm_bridge *bridge_edid; struct drm_bridge *bridge_hpd; struct drm_bridge *bridge_detect; struct drm_bridge *bridge_modes; }; struct drm_atomic_helper_damage_iter { struct drm_rect plane_src; const struct drm_rect *clips; uint32_t num_clips; uint32_t curr_clip; bool full_update; }; enum i2c_alert_protocol { I2C_PROTOCOL_SMBUS_ALERT = 0, I2C_PROTOCOL_SMBUS_HOST_NOTIFY = 1, }; struct i2c_device_id; struct i2c_board_info; struct i2c_driver { unsigned int class; int (*probe)(struct i2c_client *); void (*remove)(struct i2c_client *); void (*shutdown)(struct i2c_client *); void (*alert)(struct i2c_client *, enum i2c_alert_protocol, unsigned int); int (*command)(struct i2c_client *, unsigned int, void *); struct device_driver driver; const struct i2c_device_id *id_table; int (*detect)(struct i2c_client *, struct i2c_board_info *); const unsigned short *address_list; struct list_head clients; u32 flags; }; struct i2c_device_id { char name[20]; kernel_ulong_t driver_data; }; struct i2c_board_info { char type[20]; unsigned short flags; unsigned short addr; const char *dev_name; void *platform_data; struct device_node *of_node; struct fwnode_handle *fwnode; const struct software_node *swnode; const struct resource *resources; unsigned int num_resources; int irq; }; struct drm_encoder_slave; struct drm_i2c_encoder_driver { struct i2c_driver i2c_driver; int (*encoder_init)(struct i2c_client *, struct drm_device *, struct drm_encoder_slave *); }; struct drm_encoder_slave_funcs; struct drm_encoder_slave { struct drm_encoder base; const struct drm_encoder_slave_funcs *slave_funcs; void *slave_priv; void *bus_priv; }; struct drm_encoder_slave_funcs { void (*set_config)(struct drm_encoder *, void *); void (*destroy)(struct drm_encoder *); void (*dpms)(struct drm_encoder *, int); void (*save)(struct drm_encoder *); void (*restore)(struct drm_encoder *); bool (*mode_fixup)(struct drm_encoder *, const struct drm_display_mode *, struct drm_display_mode *); int (*mode_valid)(struct drm_encoder *, struct drm_display_mode *); void (*mode_set)(struct drm_encoder *, struct drm_display_mode *, struct drm_display_mode *); enum drm_connector_status (*detect)(struct drm_encoder *, struct drm_connector *); int (*get_modes)(struct drm_encoder *, struct drm_connector *); int (*create_resources)(struct drm_encoder *, struct drm_connector *); int (*set_property)(struct drm_encoder *, struct drm_connector *, struct drm_property *, uint64_t); }; struct drm_flip_work; typedef void (*drm_flip_func_t)(struct drm_flip_work *, void *); struct drm_flip_work { const char *name; drm_flip_func_t func; struct work_struct worker; struct list_head queued; struct list_head commited; spinlock_t lock; }; struct drm_flip_task { struct list_head node; void *data; }; struct drm_shadow_plane_state { struct drm_plane_state base; struct iosys_map map[4]; struct iosys_map data[4]; }; struct drm_simple_display_pipe_funcs; struct drm_simple_display_pipe { struct drm_crtc crtc; struct drm_plane plane; struct drm_encoder encoder; struct drm_connector *connector; const struct drm_simple_display_pipe_funcs *funcs; }; struct drm_simple_display_pipe_funcs { enum drm_mode_status (*mode_valid)(struct drm_simple_display_pipe *, const struct drm_display_mode *); void (*enable)(struct drm_simple_display_pipe *, struct drm_crtc_state *, struct drm_plane_state *); void (*disable)(struct drm_simple_display_pipe *); int (*check)(struct drm_simple_display_pipe *, struct drm_plane_state *, struct drm_crtc_state *); void (*update)(struct drm_simple_display_pipe *, struct drm_plane_state *); int (*prepare_fb)(struct drm_simple_display_pipe *, struct drm_plane_state *); void (*cleanup_fb)(struct drm_simple_display_pipe *, struct drm_plane_state *); int (*begin_fb_access)(struct drm_simple_display_pipe *, struct drm_plane_state *); void (*end_fb_access)(struct drm_simple_display_pipe *, struct drm_plane_state *); int (*enable_vblank)(struct drm_simple_display_pipe *); void (*disable_vblank)(struct drm_simple_display_pipe *); void (*reset_crtc)(struct drm_simple_display_pipe *); struct drm_crtc_state * (*duplicate_crtc_state)(struct drm_simple_display_pipe *); void (*destroy_crtc_state)(struct drm_simple_display_pipe *, struct drm_crtc_state *); void (*reset_plane)(struct drm_simple_display_pipe *); struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *); void (*destroy_plane_state)(struct drm_simple_display_pipe *, struct drm_plane_state *); }; struct drm_afbc_framebuffer { struct drm_framebuffer base; u32 block_width; u32 block_height; u32 aligned_width; u32 aligned_height; u32 offset; u32 afbc_size; }; struct drm_fb_helper_funcs; struct drm_fb_helper { struct drm_client_dev client; struct drm_client_buffer *buffer; struct drm_framebuffer *fb; struct drm_device *dev; const struct drm_fb_helper_funcs *funcs; struct fb_info *info; u32 pseudo_palette[17]; struct drm_clip_rect damage_clip; spinlock_t damage_lock; struct work_struct damage_work; struct work_struct resume_work; struct mutex lock; struct list_head kernel_fb_list; bool delayed_hotplug; bool deferred_setup; int preferred_bpp; }; struct drm_fb_helper_surface_size; struct drm_fb_helper_funcs { int (*fb_probe)(struct drm_fb_helper *, struct drm_fb_helper_surface_size *); int (*fb_dirty)(struct drm_fb_helper *, struct drm_clip_rect *); }; struct drm_fb_helper_surface_size { u32 fb_width; u32 fb_height; u32 surface_width; u32 surface_height; u32 surface_bpp; u32 surface_depth; }; struct ewma_psr_time { unsigned long internal; }; struct drm_self_refresh_data { struct drm_crtc *crtc; struct delayed_work entry_work; struct mutex avg_mutex; struct ewma_psr_time entry_avg_ms; struct ewma_psr_time exit_avg_ms; }; struct panel_bridge { struct drm_bridge bridge; struct drm_connector connector; struct drm_panel *panel; u32 connector_type; }; enum { MIPI_DSI_V_SYNC_START = 1, MIPI_DSI_V_SYNC_END = 17, MIPI_DSI_H_SYNC_START = 33, MIPI_DSI_H_SYNC_END = 49, MIPI_DSI_COMPRESSION_MODE = 7, MIPI_DSI_END_OF_TRANSMISSION = 8, MIPI_DSI_COLOR_MODE_OFF = 2, MIPI_DSI_COLOR_MODE_ON = 18, MIPI_DSI_SHUTDOWN_PERIPHERAL = 34, MIPI_DSI_TURN_ON_PERIPHERAL = 50, MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM = 3, MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM = 19, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM = 35, MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM = 4, MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM = 20, MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM = 36, MIPI_DSI_DCS_SHORT_WRITE = 5, MIPI_DSI_DCS_SHORT_WRITE_PARAM = 21, MIPI_DSI_DCS_READ = 6, MIPI_DSI_EXECUTE_QUEUE = 22, MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 55, MIPI_DSI_NULL_PACKET = 9, MIPI_DSI_BLANKING_PACKET = 25, MIPI_DSI_GENERIC_LONG_WRITE = 41, MIPI_DSI_DCS_LONG_WRITE = 57, MIPI_DSI_PICTURE_PARAMETER_SET = 10, MIPI_DSI_COMPRESSED_PIXEL_STREAM = 11, MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20 = 12, MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24 = 28, MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16 = 44, MIPI_DSI_PACKED_PIXEL_STREAM_30 = 13, MIPI_DSI_PACKED_PIXEL_STREAM_36 = 29, MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12 = 61, MIPI_DSI_PACKED_PIXEL_STREAM_16 = 14, MIPI_DSI_PACKED_PIXEL_STREAM_18 = 30, MIPI_DSI_PIXEL_STREAM_3BYTE_18 = 46, MIPI_DSI_PACKED_PIXEL_STREAM_24 = 62, }; enum { MIPI_DCS_NOP = 0, MIPI_DCS_SOFT_RESET = 1, MIPI_DCS_GET_COMPRESSION_MODE = 3, MIPI_DCS_GET_DISPLAY_ID = 4, MIPI_DCS_GET_ERROR_COUNT_ON_DSI = 5, MIPI_DCS_GET_RED_CHANNEL = 6, MIPI_DCS_GET_GREEN_CHANNEL = 7, MIPI_DCS_GET_BLUE_CHANNEL = 8, MIPI_DCS_GET_DISPLAY_STATUS = 9, MIPI_DCS_GET_POWER_MODE = 10, MIPI_DCS_GET_ADDRESS_MODE = 11, MIPI_DCS_GET_PIXEL_FORMAT = 12, MIPI_DCS_GET_DISPLAY_MODE = 13, MIPI_DCS_GET_SIGNAL_MODE = 14, MIPI_DCS_GET_DIAGNOSTIC_RESULT = 15, MIPI_DCS_ENTER_SLEEP_MODE = 16, MIPI_DCS_EXIT_SLEEP_MODE = 17, MIPI_DCS_ENTER_PARTIAL_MODE = 18, MIPI_DCS_ENTER_NORMAL_MODE = 19, MIPI_DCS_GET_IMAGE_CHECKSUM_RGB = 20, MIPI_DCS_GET_IMAGE_CHECKSUM_CT = 21, MIPI_DCS_EXIT_INVERT_MODE = 32, MIPI_DCS_ENTER_INVERT_MODE = 33, MIPI_DCS_SET_GAMMA_CURVE = 38, MIPI_DCS_SET_DISPLAY_OFF = 40, MIPI_DCS_SET_DISPLAY_ON = 41, MIPI_DCS_SET_COLUMN_ADDRESS = 42, MIPI_DCS_SET_PAGE_ADDRESS = 43, MIPI_DCS_WRITE_MEMORY_START = 44, MIPI_DCS_WRITE_LUT = 45, MIPI_DCS_READ_MEMORY_START = 46, MIPI_DCS_SET_PARTIAL_ROWS = 48, MIPI_DCS_SET_PARTIAL_COLUMNS = 49, MIPI_DCS_SET_SCROLL_AREA = 51, MIPI_DCS_SET_TEAR_OFF = 52, MIPI_DCS_SET_TEAR_ON = 53, MIPI_DCS_SET_ADDRESS_MODE = 54, MIPI_DCS_SET_SCROLL_START = 55, MIPI_DCS_EXIT_IDLE_MODE = 56, MIPI_DCS_ENTER_IDLE_MODE = 57, MIPI_DCS_SET_PIXEL_FORMAT = 58, MIPI_DCS_WRITE_MEMORY_CONTINUE = 60, MIPI_DCS_SET_3D_CONTROL = 61, MIPI_DCS_READ_MEMORY_CONTINUE = 62, MIPI_DCS_GET_3D_CONTROL = 63, MIPI_DCS_SET_VSYNC_TIMING = 64, MIPI_DCS_SET_TEAR_SCANLINE = 68, MIPI_DCS_GET_SCANLINE = 69, MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 81, MIPI_DCS_GET_DISPLAY_BRIGHTNESS = 82, MIPI_DCS_WRITE_CONTROL_DISPLAY = 83, MIPI_DCS_GET_CONTROL_DISPLAY = 84, MIPI_DCS_WRITE_POWER_SAVE = 85, MIPI_DCS_GET_POWER_SAVE = 86, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 94, MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 95, MIPI_DCS_READ_DDB_START = 161, MIPI_DCS_READ_PPS_START = 162, MIPI_DCS_READ_DDB_CONTINUE = 168, MIPI_DCS_READ_PPS_CONTINUE = 169, }; enum mipi_dsi_dcs_tear_mode { MIPI_DSI_DCS_TEAR_MODE_VBLANK = 0, MIPI_DSI_DCS_TEAR_MODE_VHBLANK = 1, }; struct drm_dsc_rc_range_parameters { u8 range_min_qp; u8 range_max_qp; u8 range_bpg_offset; }; struct drm_dsc_config { u8 line_buf_depth; u8 bits_per_component; bool convert_rgb; u8 slice_count; u16 slice_width; u16 slice_height; bool simple_422; u16 pic_width; u16 pic_height; u8 rc_tgt_offset_high; u8 rc_tgt_offset_low; u16 bits_per_pixel; u8 rc_edge_factor; u8 rc_quant_incr_limit1; u8 rc_quant_incr_limit0; u16 initial_xmit_delay; u16 initial_dec_delay; bool block_pred_enable; u8 first_line_bpg_offset; u16 initial_offset; u16 rc_buf_thresh[14]; struct drm_dsc_rc_range_parameters rc_range_params[15]; u16 rc_model_size; u8 flatness_min_qp; u8 flatness_max_qp; u8 initial_scale_value; u16 scale_decrement_interval; u16 scale_increment_interval; u16 nfl_bpg_offset; u16 slice_bpg_offset; u16 final_offset; bool vbr_enable; u8 mux_word_size; u16 slice_chunk_size; u16 rc_bits; u8 dsc_version_minor; u8 dsc_version_major; bool native_422; bool native_420; u8 second_line_bpg_offset; u16 nsl_bpg_offset; u16 second_line_offset_adj; }; struct mipi_dsi_driver { struct device_driver driver; int (*probe)(struct mipi_dsi_device *); void (*remove)(struct mipi_dsi_device *); void (*shutdown)(struct mipi_dsi_device *); }; struct mipi_dsi_device_info { char type[20]; u32 channel; struct device_node *node; }; struct mipi_dsi_packet { size_t size; u8 header[4]; size_t payload_length; const u8 *payload; }; struct drm_dsc_picture_parameter_set { u8 dsc_version; u8 pps_identifier; u8 pps_reserved; u8 pps_3; u8 pps_4; u8 bits_per_pixel_low; __be16 pic_height; __be16 pic_width; __be16 slice_height; __be16 slice_width; __be16 chunk_size; u8 initial_xmit_delay_high; u8 initial_xmit_delay_low; __be16 initial_dec_delay; u8 pps20_reserved; u8 initial_scale_value; __be16 scale_increment_interval; u8 scale_decrement_interval_high; u8 scale_decrement_interval_low; u8 pps26_reserved; u8 first_line_bpg_offset; __be16 nfl_bpg_offset; __be16 slice_bpg_offset; __be16 initial_offset; __be16 final_offset; u8 flatness_min_qp; u8 flatness_max_qp; __be16 rc_model_size; u8 rc_edge_factor; u8 rc_quant_incr_limit0; u8 rc_quant_incr_limit1; u8 rc_tgt_offset; u8 rc_buf_thresh[14]; __be16 rc_range_parameters[15]; u8 native_422_420; u8 second_line_bpg_offset; __be16 nsl_bpg_offset; __be16 second_line_offset_adj; u32 pps_long_94_reserved; u32 pps_long_98_reserved; u32 pps_long_102_reserved; u32 pps_long_106_reserved; u32 pps_long_110_reserved; u32 pps_long_114_reserved; u32 pps_long_118_reserved; u32 pps_long_122_reserved; __be16 pps_short_126_reserved; } __attribute__((packed)); struct ttm_kmap_iter; struct ttm_kmap_iter_ops { void (*map_local)(struct ttm_kmap_iter *, struct iosys_map *, unsigned long); void (*unmap_local)(struct ttm_kmap_iter *, struct iosys_map *); bool maps_tt; }; struct ttm_kmap_iter { const struct ttm_kmap_iter_ops *ops; }; enum ttm_caching { ttm_uncached = 0, ttm_write_combined = 1, ttm_cached = 2, }; enum ttm_bo_type { ttm_bo_type_device = 0, ttm_bo_type_kernel = 1, ttm_bo_type_sg = 2, }; struct ttm_tt; struct ttm_kmap_iter_tt { struct ttm_kmap_iter base; struct ttm_tt *tt; pgprot_t prot; }; struct ttm_tt { struct page **pages; uint32_t page_flags; uint32_t num_pages; struct sg_table *sg; dma_addr_t *dma_address; struct file *swap_storage; enum ttm_caching caching; }; struct ttm_device; struct ttm_resource; struct ttm_lru_bulk_move; struct ttm_buffer_object { struct drm_gem_object base; struct ttm_device *bdev; enum ttm_bo_type type; uint32_t page_alignment; void (*destroy)(struct ttm_buffer_object *); struct kref kref; struct ttm_resource *resource; struct ttm_tt *ttm; bool deleted; struct ttm_lru_bulk_move *bulk_move; unsigned int priority; unsigned int pin_count; struct work_struct delayed_delete; struct sg_table *sg; }; struct ttm_resource_manager_func; struct ttm_resource_manager { bool use_type; bool use_tt; struct ttm_device *bdev; uint64_t size; const struct ttm_resource_manager_func *func; spinlock_t move_lock; struct dma_fence *move; struct list_head lru[4]; uint64_t usage; }; struct ttm_pool; struct ttm_pool_type { struct ttm_pool *pool; unsigned int order; enum ttm_caching caching; struct list_head shrinker_list; spinlock_t lock; struct list_head pages; }; struct ttm_pool { struct device *dev; int nid; bool use_dma_alloc; bool use_dma32; struct { struct ttm_pool_type orders[11]; } caching[3]; }; struct ttm_device_funcs; struct ttm_device { struct list_head device_list; const struct ttm_device_funcs *funcs; struct ttm_resource_manager sysman; struct ttm_resource_manager *man_drv[8]; struct drm_vma_offset_manager *vma_manager; struct ttm_pool pool; spinlock_t lru_lock; struct list_head pinned; struct address_space *dev_mapping; struct workqueue_struct *wq; }; struct ttm_operation_ctx; struct ttm_place; struct ttm_placement; struct ttm_device_funcs { struct ttm_tt * (*ttm_tt_create)(struct ttm_buffer_object *, uint32_t); int (*ttm_tt_populate)(struct ttm_device *, struct ttm_tt *, struct ttm_operation_ctx *); void (*ttm_tt_unpopulate)(struct ttm_device *, struct ttm_tt *); void (*ttm_tt_destroy)(struct ttm_device *, struct ttm_tt *); bool (*eviction_valuable)(struct ttm_buffer_object *, const struct ttm_place *); void (*evict_flags)(struct ttm_buffer_object *, struct ttm_placement *); int (*move)(struct ttm_buffer_object *, bool, struct ttm_operation_ctx *, struct ttm_resource *, struct ttm_place *); void (*delete_mem_notify)(struct ttm_buffer_object *); void (*swap_notify)(struct ttm_buffer_object *); int (*io_mem_reserve)(struct ttm_device *, struct ttm_resource *); void (*io_mem_free)(struct ttm_device *, struct ttm_resource *); unsigned long (*io_mem_pfn)(struct ttm_buffer_object *, unsigned long); int (*access_memory)(struct ttm_buffer_object *, unsigned long, void *, int, int); void (*release_notify)(struct ttm_buffer_object *); }; struct ttm_operation_ctx { bool interruptible; bool no_wait_gpu; bool gfp_retry_mayfail; bool allow_res_evict; bool force_alloc; struct dma_resv *resv; uint64_t bytes_moved; }; struct ttm_place { unsigned int fpfn; unsigned int lpfn; uint32_t mem_type; uint32_t flags; }; struct ttm_bus_placement { void *addr; phys_addr_t offset; bool is_iomem; enum ttm_caching caching; }; struct ttm_resource { unsigned long start; size_t size; uint32_t mem_type; uint32_t placement; struct ttm_bus_placement bus; struct ttm_buffer_object *bo; struct list_head lru; }; struct ttm_resource_manager_func { int (*alloc)(struct ttm_resource_manager *, struct ttm_buffer_object *, const struct ttm_place *, struct ttm_resource **); void (*free)(struct ttm_resource_manager *, struct ttm_resource *); bool (*intersects)(struct ttm_resource_manager *, struct ttm_resource *, const struct ttm_place *, size_t); bool (*compatible)(struct ttm_resource_manager *, struct ttm_resource *, const struct ttm_place *, size_t); void (*debug)(struct ttm_resource_manager *, struct drm_printer *); }; struct ttm_lru_bulk_move_pos { struct ttm_resource *first; struct ttm_resource *last; }; struct ttm_lru_bulk_move { struct ttm_lru_bulk_move_pos pos[32]; }; struct ttm_placement { unsigned int num_placement; const struct ttm_place *placement; unsigned int num_busy_placement; const struct ttm_place *busy_placement; }; struct dma_resv_iter { struct dma_resv *obj; enum dma_resv_usage usage; struct dma_fence *fence; enum dma_resv_usage fence_usage; unsigned int index; struct dma_resv_list *fences; unsigned int num_fences; bool is_restarted; }; struct ttm_resource_cursor { unsigned int priority; }; struct ttm_bo_kmap_obj { void *virtual; struct page *page; enum { ttm_bo_map_iomap = 129, ttm_bo_map_vmap = 2, ttm_bo_map_kmap = 3, ttm_bo_map_premapped = 132, } bo_kmap_type; struct ttm_buffer_object *bo; }; struct ttm_transfer_obj { struct ttm_buffer_object base; struct ttm_buffer_object *bo; }; struct ttm_kmap_iter_linear_io { struct ttm_kmap_iter base; struct iosys_map dmap; bool needs_unmap; }; struct ttm_validate_buffer { struct list_head head; struct ttm_buffer_object *bo; unsigned int num_shared; }; struct ttm_range_mgr_node { struct ttm_resource base; struct drm_mm_node mm_nodes[0]; }; struct ttm_range_manager { struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; }; struct io_mapping; struct ttm_kmap_iter_iomap { struct ttm_kmap_iter base; struct io_mapping *iomap; struct sg_table *st; resource_size_t start; struct { struct scatterlist *sg; unsigned long i; unsigned long end; unsigned long offs; } cache; }; struct io_mapping { resource_size_t base; unsigned long size; pgprot_t prot; void *iomem; }; struct ttm_pool_dma { dma_addr_t addr; unsigned long vaddr; }; struct ttm_global { struct page *dummy_read_page; struct list_head device_list; atomic_t bo_count; }; typedef void (*btf_trace_gpu_mem_total)(void *, uint32_t, uint32_t, uint64_t); struct trace_event_raw_gpu_mem_total { struct trace_entry ent; uint32_t gpu_id; uint32_t pid; uint64_t size; char __data[0]; }; struct trace_event_data_offsets_gpu_mem_total {}; struct aggregate_device; struct component { struct list_head node; struct aggregate_device *adev; bool bound; const struct component_ops *ops; int subcomponent; struct device *dev; }; struct component_match; struct aggregate_device { struct list_head node; bool bound; const struct component_master_ops *ops; struct device *parent; struct component_match *match; }; struct component_match_array; struct component_match { size_t alloc; size_t num; struct component_match_array *compare; }; struct component_match_array { void *data; int (*compare)(struct device *, void *); int (*compare_typed)(struct device *, int, void *); void (*release)(struct device *, void *); struct component *component; bool duplicate; }; struct device_private { struct klist klist_children; struct klist_node knode_parent; struct klist_node knode_driver; struct klist_node knode_bus; struct klist_node knode_class; struct list_head deferred_probe; struct device_driver *async_driver; char *deferred_probe_reason; struct device *device; u8 dead: 1; }; struct driver_private { struct kobject kobj; struct klist klist_devices; struct klist_node knode_bus; struct module_kobject *mkobj; struct device_driver *driver; }; struct wake_irq { struct device *dev; unsigned int status; int irq; const char *name; }; enum dpm_order { DPM_ORDER_NONE = 0, DPM_ORDER_DEV_AFTER_PARENT = 1, DPM_ORDER_PARENT_BEFORE_DEV = 2, DPM_ORDER_DEV_LAST = 3, }; struct dev_ext_attribute { struct device_attribute attr; void *var; }; struct fwnode_link { struct fwnode_handle *supplier; struct list_head s_hook; struct fwnode_handle *consumer; struct list_head c_hook; u8 flags; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; }; struct class_dir { struct kobject kobj; const struct class *class; }; struct root_device { struct device dev; struct module *owner; }; struct subsys_private { struct kset subsys; struct kset *devices_kset; struct list_head interfaces; struct mutex mutex; struct kset *drivers_kset; struct klist klist_devices; struct klist klist_drivers; struct blocking_notifier_head bus_notifier; unsigned int drivers_autoprobe: 1; const struct bus_type *bus; struct device *dev_root; struct kset glue_dirs; const struct class *class; struct lock_class_key lock_key; }; union device_attr_group_devres { const struct attribute_group *group; const struct attribute_group **groups; }; struct subsys_interface { const char *name; struct bus_type *subsys; struct list_head node; int (*add_dev)(struct device *, struct subsys_interface *); void (*remove_dev)(struct device *, struct subsys_interface *); }; struct subsys_dev_iter { struct klist_iter ki; const struct device_type *type; }; struct device_attach_data { struct device *dev; bool check_async; bool want_async; bool have_async; }; struct class_compat { struct kobject *kobj; }; struct platform_object { struct platform_device pdev; char name[0]; }; struct irq_affinity_devres { unsigned int count; unsigned int irq[0]; }; struct cpu_attr { struct device_attribute attr; const struct cpumask * const map; }; struct probe; struct kobj_map { struct probe *probes[255]; struct mutex *lock; }; struct probe { struct probe *next; dev_t dev; unsigned long range; struct module *owner; kobj_probe_t *get; int (*lock)(dev_t, void *); void *data; }; struct devres_node { struct list_head entry; dr_release_t release; const char *name; size_t size; }; struct devres { struct devres_node node; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u8 data[0]; }; struct devres_group { struct devres_node node[2]; void *id; int color; }; struct action_devres { void *data; void (*action)(void *); }; struct pages_devres { unsigned long addr; unsigned int order; }; struct attribute_container; struct internal_container { struct klist_node node; struct attribute_container *cont; struct device classdev; }; struct attribute_container { struct list_head node; struct klist containers; struct class *class; const struct attribute_group *grp; struct device_attribute **attrs; int (*match)(struct attribute_container *, struct device *); unsigned long flags; }; struct transport_container; struct transport_class { struct class class; int (*setup)(struct transport_container *, struct device *, struct device *); int (*configure)(struct transport_container *, struct device *, struct device *); int (*remove)(struct transport_container *, struct device *, struct device *); }; struct transport_container { struct attribute_container ac; const struct attribute_group *statistics; }; struct anon_transport_class { struct transport_class tclass; struct attribute_container container; }; struct container_dev { struct device dev; int (*offline)(struct container_dev *); }; typedef enum { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_INTERNAL = 1, PHY_INTERFACE_MODE_MII = 2, PHY_INTERFACE_MODE_GMII = 3, PHY_INTERFACE_MODE_SGMII = 4, PHY_INTERFACE_MODE_TBI = 5, PHY_INTERFACE_MODE_REVMII = 6, PHY_INTERFACE_MODE_RMII = 7, PHY_INTERFACE_MODE_REVRMII = 8, PHY_INTERFACE_MODE_RGMII = 9, PHY_INTERFACE_MODE_RGMII_ID = 10, PHY_INTERFACE_MODE_RGMII_RXID = 11, PHY_INTERFACE_MODE_RGMII_TXID = 12, PHY_INTERFACE_MODE_RTBI = 13, PHY_INTERFACE_MODE_SMII = 14, PHY_INTERFACE_MODE_XGMII = 15, PHY_INTERFACE_MODE_XLGMII = 16, PHY_INTERFACE_MODE_MOCA = 17, PHY_INTERFACE_MODE_PSGMII = 18, PHY_INTERFACE_MODE_QSGMII = 19, PHY_INTERFACE_MODE_TRGMII = 20, PHY_INTERFACE_MODE_100BASEX = 21, PHY_INTERFACE_MODE_1000BASEX = 22, PHY_INTERFACE_MODE_2500BASEX = 23, PHY_INTERFACE_MODE_5GBASER = 24, PHY_INTERFACE_MODE_RXAUI = 25, PHY_INTERFACE_MODE_XAUI = 26, PHY_INTERFACE_MODE_10GBASER = 27, PHY_INTERFACE_MODE_25GBASER = 28, PHY_INTERFACE_MODE_USXGMII = 29, PHY_INTERFACE_MODE_10GKR = 30, PHY_INTERFACE_MODE_QUSGMII = 31, PHY_INTERFACE_MODE_1000BASEKX = 32, PHY_INTERFACE_MODE_MAX = 33, } phy_interface_t; typedef void * (*devcon_match_fn_t)(const struct fwnode_handle *, const char *, void *); struct cache_type_info { const char *size_prop; const char *line_size_props[2]; const char *nr_sets_prop; }; struct swnode { struct kobject kobj; struct fwnode_handle fwnode; const struct software_node *node; int id; struct ida child_ids; struct list_head entry; struct list_head children; struct swnode *parent; unsigned int allocated: 1; unsigned int managed: 1; }; struct software_node_ref_args { const struct software_node *node; unsigned int nargs; u64 args[8]; }; struct auxiliary_device; struct auxiliary_device_id; struct auxiliary_driver { int (*probe)(struct auxiliary_device *, const struct auxiliary_device_id *); void (*remove)(struct auxiliary_device *); void (*shutdown)(struct auxiliary_device *); int (*suspend)(struct auxiliary_device *, pm_message_t); int (*resume)(struct auxiliary_device *); const char *name; struct device_driver driver; const struct auxiliary_device_id *id_table; }; struct auxiliary_device { struct device dev; const char *name; u32 id; }; struct auxiliary_device_id { char name[32]; kernel_ulong_t driver_data; }; struct dev_pm_domain_attach_data { const char * const *pd_names; const u32 num_pd_names; const u32 pd_flags; }; struct dev_pm_domain_list { struct device **pd_devs; struct device_link **pd_links; u32 num_pds; }; enum pm_qos_flags_status { PM_QOS_FLAGS_UNDEFINED = -1, PM_QOS_FLAGS_NONE = 0, PM_QOS_FLAGS_SOME = 1, PM_QOS_FLAGS_ALL = 2, }; typedef int (*pm_callback_t)(struct device *); struct suspend_stats { int success; int fail; int failed_freeze; int failed_prepare; int failed_suspend; int failed_suspend_late; int failed_suspend_noirq; int failed_resume; int failed_resume_early; int failed_resume_noirq; int last_failed_dev; char failed_devs[80]; int last_failed_errno; int errno[2]; int last_failed_step; u64 last_hw_sleep; u64 total_hw_sleep; u64 max_hw_sleep; enum suspend_stat_step failed_steps[2]; }; enum genpd_notication { GENPD_NOTIFY_PRE_OFF = 0, GENPD_NOTIFY_OFF = 1, GENPD_NOTIFY_PRE_ON = 2, GENPD_NOTIFY_ON = 3, }; struct gpd_link { struct generic_pm_domain *parent; struct list_head parent_node; struct generic_pm_domain *child; struct list_head child_node; unsigned int performance_state; unsigned int prev_performance_state; }; struct of_genpd_provider { struct list_head link; struct device_node *node; genpd_xlate_t xlate; void *data; }; struct gpd_timing_data; struct generic_pm_domain_data { struct pm_domain_data base; struct gpd_timing_data *td; struct notifier_block nb; struct notifier_block *power_nb; int cpu; unsigned int performance_state; unsigned int default_pstate; unsigned int rpm_pstate; void *data; }; struct gpd_timing_data { s64 suspend_latency_ns; s64 resume_latency_ns; s64 effective_constraint_ns; ktime_t next_wakeup; bool constraint_changed; bool cached_suspend_ok; }; enum pce_status { PCE_STATUS_NONE = 0, PCE_STATUS_ACQUIRED = 1, PCE_STATUS_PREPARED = 2, PCE_STATUS_ENABLED = 3, PCE_STATUS_ERROR = 4, }; struct pm_clock_entry { struct list_head node; char *con_id; struct clk *clk; enum pce_status status; bool enabled_when_prepared; }; struct pm_clk_notifier_block { struct notifier_block nb; struct dev_pm_domain *pm_domain; char *con_ids[0]; }; struct firmware_fallback_config { unsigned int force_sysfs_fallback; unsigned int ignore_sysfs_fallback; int old_timeout; int loading_timeout; }; struct firmware_cache { spinlock_t lock; struct list_head head; int state; }; enum fw_status { FW_STATUS_UNKNOWN = 0, FW_STATUS_LOADING = 1, FW_STATUS_DONE = 2, FW_STATUS_ABORTED = 3, }; enum fw_opt { FW_OPT_UEVENT = 1, FW_OPT_NOWAIT = 2, FW_OPT_USERHELPER = 4, FW_OPT_NO_WARN = 8, FW_OPT_NOCACHE = 16, FW_OPT_NOFALLBACK_SYSFS = 32, FW_OPT_FALLBACK_PLATFORM = 64, FW_OPT_PARTIAL = 128, }; struct fw_state { struct completion completion; enum fw_status status; }; struct fw_priv { struct kref ref; struct list_head list; struct firmware_cache *fwc; struct fw_state fw_st; void *data; size_t size; size_t allocated_size; size_t offset; u32 opt_flags; bool is_paged_buf; struct page **pages; int nr_pages; int page_array_size; bool need_uevent; struct list_head pending_list; const char *fw_name; }; struct firmware; struct firmware_work { struct work_struct work; struct module *module; const char *name; struct device *device; void *context; void (*cont)(const struct firmware *, void *); u32 opt_flags; }; struct firmware { size_t size; const u8 *data; void *priv; }; struct fw_sysfs { bool nowait; struct device dev; struct fw_priv *fw_priv; struct firmware *fw; void *fw_upload_priv; }; struct builtin_fw { char *name; void *data; unsigned long size; }; struct for_each_memory_block_cb_data { walk_memory_blocks_func_t func; void *arg; }; typedef void (*btf_trace_regmap_reg_write)(void *, struct regmap *, unsigned int, unsigned int); struct regmap_format { size_t buf_size; size_t reg_bytes; size_t pad_bytes; size_t val_bytes; s8 reg_shift; void (*format_write)(struct regmap *, unsigned int, unsigned int); void (*format_reg)(void *, unsigned int, unsigned int); void (*format_val)(void *, unsigned int, unsigned int); unsigned int (*parse_val)(const void *); void (*parse_inplace)(void *); }; struct regcache_ops; struct reg_sequence; struct hwspinlock; struct regmap { union { struct mutex mutex; struct { spinlock_t spinlock; unsigned long spinlock_flags; }; struct { raw_spinlock_t raw_spinlock; unsigned long raw_spinlock_flags; }; }; regmap_lock lock; regmap_unlock unlock; void *lock_arg; gfp_t alloc_flags; unsigned int reg_base; struct device *dev; void *work_buf; struct regmap_format format; const struct regmap_bus *bus; void *bus_context; const char *name; bool async; spinlock_t async_lock; wait_queue_head_t async_waitq; struct list_head async_list; struct list_head async_free; int async_ret; bool debugfs_disable; struct dentry *debugfs; const char *debugfs_name; unsigned int debugfs_reg_len; unsigned int debugfs_val_len; unsigned int debugfs_tot_len; struct list_head debugfs_off_cache; struct mutex cache_lock; unsigned int max_register; bool (*writeable_reg)(struct device *, unsigned int); bool (*readable_reg)(struct device *, unsigned int); bool (*volatile_reg)(struct device *, unsigned int); bool (*precious_reg)(struct device *, unsigned int); bool (*writeable_noinc_reg)(struct device *, unsigned int); bool (*readable_noinc_reg)(struct device *, unsigned int); const struct regmap_access_table *wr_table; const struct regmap_access_table *rd_table; const struct regmap_access_table *volatile_table; const struct regmap_access_table *precious_table; const struct regmap_access_table *wr_noinc_table; const struct regmap_access_table *rd_noinc_table; int (*reg_read)(void *, unsigned int, unsigned int *); int (*reg_write)(void *, unsigned int, unsigned int); int (*reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); int (*read)(void *, const void *, size_t, void *, size_t); int (*write)(void *, const void *, size_t); bool defer_caching; unsigned long read_flag_mask; unsigned long write_flag_mask; int reg_shift; int reg_stride; int reg_stride_order; bool force_write_field; const struct regcache_ops *cache_ops; enum regcache_type cache_type; unsigned int cache_size_raw; unsigned int cache_word_size; unsigned int num_reg_defaults; unsigned int num_reg_defaults_raw; bool cache_only; bool cache_bypass; bool cache_free; struct reg_default *reg_defaults; const void *reg_defaults_raw; void *cache; bool cache_dirty; bool no_sync_defaults; struct reg_sequence *patch; int patch_regs; bool use_single_read; bool use_single_write; bool can_multi_write; size_t max_raw_read; size_t max_raw_write; struct rb_root range_tree; void *selector_work_buf; struct hwspinlock *hwlock; bool can_sleep; }; struct regmap_async { struct list_head list; struct regmap *map; void *work_buf; }; struct regcache_ops { const char *name; enum regcache_type type; int (*init)(struct regmap *); int (*exit)(struct regmap *); void (*debugfs_init)(struct regmap *); int (*read)(struct regmap *, unsigned int, unsigned int *); int (*write)(struct regmap *, unsigned int, unsigned int); int (*sync)(struct regmap *, unsigned int, unsigned int); int (*drop)(struct regmap *, unsigned int, unsigned int); }; struct reg_sequence { unsigned int reg; unsigned int def; unsigned int delay_us; }; typedef void (*btf_trace_regmap_reg_read)(void *, struct regmap *, unsigned int, unsigned int); typedef void (*btf_trace_regmap_reg_read_cache)(void *, struct regmap *, unsigned int, unsigned int); typedef void (*btf_trace_regmap_bulk_write)(void *, struct regmap *, unsigned int, const void *, int); typedef void (*btf_trace_regmap_bulk_read)(void *, struct regmap *, unsigned int, const void *, int); typedef void (*btf_trace_regmap_hw_read_start)(void *, struct regmap *, unsigned int, int); typedef void (*btf_trace_regmap_hw_read_done)(void *, struct regmap *, unsigned int, int); typedef void (*btf_trace_regmap_hw_write_start)(void *, struct regmap *, unsigned int, int); typedef void (*btf_trace_regmap_hw_write_done)(void *, struct regmap *, unsigned int, int); typedef void (*btf_trace_regcache_sync)(void *, struct regmap *, const char *, const char *); typedef void (*btf_trace_regmap_cache_only)(void *, struct regmap *, bool); typedef void (*btf_trace_regmap_cache_bypass)(void *, struct regmap *, bool); typedef void (*btf_trace_regmap_async_write_start)(void *, struct regmap *, unsigned int, int); typedef void (*btf_trace_regmap_async_io_complete)(void *, struct regmap *); typedef void (*btf_trace_regmap_async_complete_start)(void *, struct regmap *); typedef void (*btf_trace_regmap_async_complete_done)(void *, struct regmap *); typedef void (*btf_trace_regcache_drop_region)(void *, struct regmap *, unsigned int, unsigned int); struct trace_event_raw_regmap_reg { struct trace_entry ent; u32 __data_loc_name; unsigned int reg; unsigned int val; char __data[0]; }; struct trace_event_raw_regmap_bulk { struct trace_entry ent; u32 __data_loc_name; unsigned int reg; u32 __data_loc_buf; int val_len; char __data[0]; }; struct trace_event_raw_regmap_block { struct trace_entry ent; u32 __data_loc_name; unsigned int reg; int count; char __data[0]; }; struct trace_event_raw_regcache_sync { struct trace_entry ent; u32 __data_loc_name; u32 __data_loc_status; u32 __data_loc_type; char __data[0]; }; struct trace_event_raw_regmap_bool { struct trace_entry ent; u32 __data_loc_name; int flag; char __data[0]; }; struct trace_event_raw_regmap_async { struct trace_entry ent; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_regcache_drop_region { struct trace_entry ent; u32 __data_loc_name; unsigned int from; unsigned int to; char __data[0]; }; struct regmap_range_node { struct rb_node node; const char *name; struct regmap *map; unsigned int range_min; unsigned int range_max; unsigned int selector_reg; unsigned int selector_mask; int selector_shift; unsigned int window_start; unsigned int window_len; }; struct trace_event_data_offsets_regmap_reg { u32 name; }; struct trace_event_data_offsets_regmap_bulk { u32 name; u32 buf; }; struct trace_event_data_offsets_regmap_block { u32 name; }; struct trace_event_data_offsets_regmap_bool { u32 name; }; struct trace_event_data_offsets_regmap_async { u32 name; }; struct trace_event_data_offsets_regcache_drop_region { u32 name; }; struct regmap_field { struct regmap *regmap; unsigned int mask; unsigned int shift; unsigned int reg; unsigned int id_size; unsigned int id_offset; }; struct reg_field { unsigned int reg; unsigned int lsb; unsigned int msb; unsigned int id_size; unsigned int id_offset; }; struct trace_event_data_offsets_regcache_sync { u32 name; u32 status; u32 type; }; struct regcache_rbtree_node { void *block; unsigned long *cache_present; unsigned int base_reg; unsigned int blklen; struct rb_node node; }; struct regcache_rbtree_ctx { struct rb_root root; struct regcache_rbtree_node *cached_rbnode; }; struct regmap_debugfs_node { struct regmap *map; struct list_head link; }; struct regmap_debugfs_off_cache { struct list_head list; off_t min; off_t max; unsigned int base_reg; unsigned int max_reg; }; struct spi_delay { u16 value; u8 unit; }; struct spi_controller; struct spi_statistics; struct spi_device { struct device dev; struct spi_controller *controller; struct spi_controller *master; u32 max_speed_hz; u8 chip_select; u8 bits_per_word; bool rt; u32 mode; int irq; void *controller_state; void *controller_data; char modalias[32]; const char *driver_override; struct gpio_desc *cs_gpiod; struct spi_delay word_delay; struct spi_delay cs_setup; struct spi_delay cs_hold; struct spi_delay cs_inactive; struct spi_statistics __attribute__((btf_type_tag("percpu"))) *pcpu_statistics; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct spi_message; struct spi_transfer; struct spi_controller_mem_ops; struct spi_controller_mem_caps; struct spi_controller { struct device dev; struct list_head list; s16 bus_num; u16 num_chipselect; u16 dma_alignment; u32 mode_bits; u32 buswidth_override_bits; u32 bits_per_word_mask; u32 min_speed_hz; u32 max_speed_hz; u16 flags; bool devm_allocated; union { bool slave; bool target; }; size_t (*max_transfer_size)(struct spi_device *); size_t (*max_message_size)(struct spi_device *); struct mutex io_mutex; struct mutex add_lock; spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; bool bus_lock_flag; int (*setup)(struct spi_device *); int (*set_cs_timing)(struct spi_device *); int (*transfer)(struct spi_device *, struct spi_message *); void (*cleanup)(struct spi_device *); bool (*can_dma)(struct spi_controller *, struct spi_device *, struct spi_transfer *); struct device *dma_map_dev; struct device *cur_rx_dma_dev; struct device *cur_tx_dma_dev; bool queued; struct kthread_worker *kworker; struct kthread_work pump_messages; spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; struct completion cur_msg_completion; bool cur_msg_incomplete; bool cur_msg_need_completion; bool busy; bool running; bool rt; bool auto_runtime_pm; bool cur_msg_mapped; char last_cs; bool last_cs_mode_high; bool fallback; struct completion xfer_completion; size_t max_dma_len; int (*prepare_transfer_hardware)(struct spi_controller *); int (*transfer_one_message)(struct spi_controller *, struct spi_message *); int (*unprepare_transfer_hardware)(struct spi_controller *); int (*prepare_message)(struct spi_controller *, struct spi_message *); int (*unprepare_message)(struct spi_controller *, struct spi_message *); union { int (*slave_abort)(struct spi_controller *); int (*target_abort)(struct spi_controller *); }; void (*set_cs)(struct spi_device *, bool); int (*transfer_one)(struct spi_controller *, struct spi_device *, struct spi_transfer *); void (*handle_err)(struct spi_controller *, struct spi_message *); const struct spi_controller_mem_ops *mem_ops; const struct spi_controller_mem_caps *mem_caps; struct gpio_desc **cs_gpiods; bool use_gpio_descriptors; s8 unused_native_cs; s8 max_native_cs; struct spi_statistics __attribute__((btf_type_tag("percpu"))) *pcpu_statistics; struct dma_chan *dma_tx; struct dma_chan *dma_rx; void *dummy_rx; void *dummy_tx; int (*fw_translate_cs)(struct spi_controller *, unsigned int); bool ptp_sts_supported; unsigned long irq_flags; bool queue_empty; bool must_async; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct ptp_system_timestamp; struct spi_transfer { const void *tx_buf; void *rx_buf; unsigned int len; u16 error; dma_addr_t tx_dma; dma_addr_t rx_dma; struct sg_table tx_sg; struct sg_table rx_sg; unsigned int dummy_data: 1; unsigned int cs_off: 1; unsigned int cs_change: 1; unsigned int tx_nbits: 3; unsigned int rx_nbits: 3; unsigned int timestamped: 1; u8 bits_per_word; struct spi_delay delay; struct spi_delay cs_change_delay; struct spi_delay word_delay; u32 speed_hz; u32 effective_speed_hz; unsigned int ptp_sts_word_pre; unsigned int ptp_sts_word_post; struct ptp_system_timestamp *ptp_sts; struct list_head transfer_list; u64 android_kabi_reserved1; }; struct spi_message { struct list_head transfers; struct spi_device *spi; unsigned int is_dma_mapped: 1; bool prepared; int status; void (*complete)(void *); void *context; unsigned int frame_length; unsigned int actual_length; struct list_head queue; void *state; struct list_head resources; u64 android_kabi_reserved1; struct spi_transfer t[0]; }; struct spi_mem; struct spi_mem_op; struct spi_mem_dirmap_desc; struct spi_controller_mem_ops { int (*adjust_op_size)(struct spi_mem *, struct spi_mem_op *); bool (*supports_op)(struct spi_mem *, const struct spi_mem_op *); int (*exec_op)(struct spi_mem *, const struct spi_mem_op *); const char * (*get_name)(struct spi_mem *); int (*dirmap_create)(struct spi_mem_dirmap_desc *); void (*dirmap_destroy)(struct spi_mem_dirmap_desc *); ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *, u64, size_t, void *); ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *, u64, size_t, const void *); int (*poll_status)(struct spi_mem *, const struct spi_mem_op *, u16, u16, unsigned long, unsigned long, unsigned long); }; struct spi_controller_mem_caps { bool dtr; bool ecc; }; struct spi_statistics { struct u64_stats_sync syncp; u64_stats_t messages; u64_stats_t transfers; u64_stats_t errors; u64_stats_t timedout; u64_stats_t spi_sync; u64_stats_t spi_sync_immediate; u64_stats_t spi_async; u64_stats_t bytes; u64_stats_t bytes_rx; u64_stats_t bytes_tx; u64_stats_t transfer_bytes_histo[17]; u64_stats_t transfers_split_maxsize; }; struct regmap_async_spi { struct regmap_async core; struct spi_message m; struct spi_transfer t[2]; }; struct spmi_controller; struct spmi_device { struct device dev; struct spmi_controller *ctrl; u8 usid; }; struct spmi_controller { struct device dev; unsigned int nr; int (*cmd)(struct spmi_controller *, u8, u8); int (*read_cmd)(struct spmi_controller *, u8, u8, u16, u8 *, size_t); int (*write_cmd)(struct spmi_controller *, u8, u8, u16, const u8 *, size_t); u64 android_kabi_reserved1; }; struct regmap_mmio_context { void *regs; unsigned int val_bytes; bool big_endian; bool attached_clk; struct clk *clk; void (*reg_write)(struct regmap_mmio_context *, unsigned int, unsigned int); unsigned int (*reg_read)(struct regmap_mmio_context *, unsigned int); }; struct regmap_irq_chip; struct regmap_irq_chip_data { struct mutex lock; struct irq_chip irq_chip; struct regmap *map; const struct regmap_irq_chip *chip; int irq_base; struct irq_domain *domain; int irq; int wake_count; void *status_reg_buf; unsigned int *main_status_buf; unsigned int *status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; unsigned int *wake_buf; unsigned int *type_buf; unsigned int *type_buf_def; unsigned int **config_buf; unsigned int irq_reg_stride; unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *, unsigned int, int); unsigned int clear_status: 1; }; struct regmap_irq_sub_irq_map; struct regmap_irq; struct regmap_irq_chip { const char *name; unsigned int main_status; unsigned int num_main_status_bits; struct regmap_irq_sub_irq_map *sub_reg_offsets; int num_main_regs; unsigned int status_base; unsigned int mask_base; unsigned int unmask_base; unsigned int ack_base; unsigned int wake_base; const unsigned int *config_base; unsigned int irq_reg_stride; unsigned int init_ack_masked: 1; unsigned int mask_unmask_non_inverted: 1; unsigned int use_ack: 1; unsigned int ack_invert: 1; unsigned int clear_ack: 1; unsigned int status_invert: 1; unsigned int wake_invert: 1; unsigned int type_in_mask: 1; unsigned int clear_on_unmask: 1; unsigned int runtime_pm: 1; unsigned int no_status: 1; int num_regs; const struct regmap_irq *irqs; int num_irqs; int num_config_bases; int num_config_regs; int (*handle_pre_irq)(void *); int (*handle_post_irq)(void *); int (*handle_mask_sync)(int, unsigned int, unsigned int, void *); int (*set_type_config)(unsigned int **, unsigned int, const struct regmap_irq *, int, void *); unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *, unsigned int, int); void *irq_drv_data; }; struct regmap_irq_sub_irq_map { unsigned int num_regs; unsigned int *offset; }; struct regmap_irq_type { unsigned int type_reg_offset; unsigned int type_reg_mask; unsigned int type_rising_val; unsigned int type_falling_val; unsigned int type_level_low_val; unsigned int type_level_high_val; unsigned int types_supported; }; struct regmap_irq { unsigned int reg_offset; unsigned int mask; struct regmap_irq_type type; }; struct soc_device { struct device dev; struct soc_device_attribute *attr; int soc_dev_num; }; struct devcd_entry { struct device devcd_dev; void *data; size_t datalen; struct mutex mutex; bool delete_work; struct module *owner; ssize_t (*read)(char *, loff_t, size_t, void *, size_t); void (*free)(void *); struct delayed_work del_wk; struct device *failing_dev; }; struct platform_msi_priv_data { struct device *dev; void *host_data; msi_alloc_info_t arg; irq_write_msi_msg_t write_msg; int devid; }; typedef void (*btf_trace_thermal_pressure_update)(void *, int, unsigned long); struct cpu_topology { int thread_id; int core_id; int cluster_id; int package_id; cpumask_t thread_sibling; cpumask_t core_sibling; cpumask_t cluster_sibling; cpumask_t llc_sibling; }; struct trace_event_raw_thermal_pressure_update { struct trace_entry ent; unsigned long thermal_pressure; int cpu; char __data[0]; }; struct trace_event_data_offsets_thermal_pressure_update {}; typedef void (*btf_trace_devres_log)(void *, struct device *, const char *, void *, const char *, size_t); struct trace_event_raw_devres { struct trace_entry ent; u32 __data_loc_devname; struct device *dev; const char *op; void *node; const char *name; size_t size; char __data[0]; }; struct trace_event_data_offsets_devres { u32 devname; }; struct brd_device { int brd_number; struct gendisk *brd_disk; struct list_head brd_list; struct xarray brd_pages; u64 brd_nr_pages; }; enum { Lo_unbound = 0, Lo_bound = 1, Lo_rundown = 2, Lo_deleting = 3, }; enum { LO_FLAGS_READ_ONLY = 1, LO_FLAGS_AUTOCLEAR = 4, LO_FLAGS_PARTSCAN = 8, LO_FLAGS_DIRECT_IO = 16, }; struct loop_device { int lo_number; loff_t lo_offset; loff_t lo_sizelimit; int lo_flags; char lo_file_name[64]; struct file *lo_backing_file; struct block_device *lo_device; gfp_t old_gfp_mask; spinlock_t lo_lock; int lo_state; spinlock_t lo_work_lock; struct workqueue_struct *workqueue; struct work_struct rootcg_work; struct list_head rootcg_cmd_list; struct list_head idle_worker_list; struct rb_root worker_tree; struct timer_list timer; bool use_dio; bool sysfs_inited; struct request_queue *lo_queue; struct blk_mq_tag_set tag_set; struct gendisk *lo_disk; struct mutex lo_mutex; bool idr_visible; }; struct loop_worker { struct rb_node rb_node; struct work_struct work; struct list_head cmd_list; struct list_head idle_list; struct loop_device *lo; struct cgroup_subsys_state *blkcg_css; unsigned long last_ran_at; }; struct loop_cmd { struct list_head list_entry; bool use_aio; atomic_t ref; long ret; struct kiocb iocb; struct bio_vec *bvec; struct cgroup_subsys_state *blkcg_css; struct cgroup_subsys_state *memcg_css; }; struct compat_loop_info { compat_int_t lo_number; compat_dev_t lo_device; compat_ulong_t lo_inode; compat_dev_t lo_rdevice; compat_int_t lo_offset; compat_int_t lo_encrypt_type; compat_int_t lo_encrypt_key_size; compat_int_t lo_flags; char lo_name[64]; unsigned char lo_encrypt_key[32]; compat_ulong_t lo_init[2]; char reserved[4]; }; typedef unsigned int __kernel_old_dev_t; struct loop_info { int lo_number; __kernel_old_dev_t lo_device; unsigned long lo_inode; __kernel_old_dev_t lo_rdevice; int lo_offset; int lo_encrypt_type; int lo_encrypt_key_size; int lo_flags; char lo_name[64]; unsigned char lo_encrypt_key[32]; unsigned long lo_init[2]; char reserved[4]; }; struct loop_info64 { __u64 lo_device; __u64 lo_inode; __u64 lo_rdevice; __u64 lo_offset; __u64 lo_sizelimit; __u32 lo_number; __u32 lo_encrypt_type; __u32 lo_encrypt_key_size; __u32 lo_flags; __u8 lo_file_name[64]; __u8 lo_crypt_name[64]; __u8 lo_encrypt_key[32]; __u64 lo_init[2]; }; struct loop_config { __u32 fd; __u32 block_size; struct loop_info64 info; __u64 __reserved[8]; }; struct ublk_io { __u64 addr; unsigned int flags; int res; struct io_uring_cmd *cmd; }; struct ublk_device; struct ublk_queue { int q_id; int q_depth; unsigned long flags; struct task_struct *ubq_daemon; char *io_cmd_buf; struct llist_head io_cmds; unsigned long io_addr; unsigned int max_io_sz; bool force_abort; bool timeout; unsigned short nr_io_ready; spinlock_t cancel_lock; struct ublk_device *dev; struct ublk_io ios[0]; }; struct ublksrv_ctrl_dev_info { __u16 nr_hw_queues; __u16 queue_depth; __u16 state; __u16 pad0; __u32 max_io_buf_bytes; __u32 dev_id; __s32 ublksrv_pid; __u32 pad1; __u64 flags; __u64 ublksrv_flags; __u32 owner_uid; __u32 owner_gid; __u64 reserved1; __u64 reserved2; }; struct ublk_param_basic { __u32 attrs; __u8 logical_bs_shift; __u8 physical_bs_shift; __u8 io_opt_shift; __u8 io_min_shift; __u32 max_sectors; __u32 chunk_sectors; __u64 dev_sectors; __u64 virt_boundary_mask; }; struct ublk_param_discard { __u32 discard_alignment; __u32 discard_granularity; __u32 max_discard_sectors; __u32 max_write_zeroes_sectors; __u16 max_discard_segments; __u16 reserved0; }; struct ublk_param_devt { __u32 char_major; __u32 char_minor; __u32 disk_major; __u32 disk_minor; }; struct ublk_param_zoned { __u32 max_open_zones; __u32 max_active_zones; __u32 max_zone_append_sectors; __u8 reserved[20]; }; struct ublk_params { __u32 len; __u32 types; struct ublk_param_basic basic; struct ublk_param_discard discard; struct ublk_param_devt devt; struct ublk_param_zoned zoned; }; struct ublk_device { struct gendisk *ub_disk; char *__queues; unsigned int queue_size; struct ublksrv_ctrl_dev_info dev_info; struct blk_mq_tag_set tag_set; struct cdev cdev; struct device cdev_dev; unsigned long state; int ub_number; struct mutex mutex; spinlock_t mm_lock; struct mm_struct *mm; struct ublk_params params; struct completion completion; unsigned int nr_queues_ready; unsigned int nr_privileged_daemon; struct delayed_work monitor_work; struct work_struct quiesce_work; struct work_struct stop_work; }; struct ublksrv_ctrl_cmd { __u32 dev_id; __u16 queue_id; __u16 len; __u64 addr; __u64 data[1]; __u16 dev_path_len; __u16 pad; __u32 reserved; }; struct ublk_rq_data { struct llist_node node; struct kref ref; __u64 sector; __u32 operation; __u32 nr_zones; }; struct ublksrv_io_desc { __u32 op_flags; union { __u32 nr_sectors; __u32 nr_zones; }; __u64 start_sector; __u64 addr; }; struct ublk_uring_cmd_pdu { struct ublk_queue *ubq; }; struct ublk_io_iter { struct page *pages[32]; struct bio *bio; struct bvec_iter iter; }; struct ublksrv_io_cmd { __u16 q_id; __u16 tag; __s32 result; union { __u64 addr; __u64 zone_append_lba; }; }; struct ublk_params_header { __u32 len; __u32 types; }; struct sram_config { int (*init)(); bool map_only_reserved; }; struct sram_reserve { struct list_head list; u32 start; u32 size; struct resource res; bool export; bool pool; bool protect_exec; const char *label; }; struct sram_partition { void *base; struct gen_pool *pool; struct bin_attribute battr; struct mutex lock; struct list_head list; }; struct sram_dev { const struct sram_config *config; struct device *dev; void *virt_base; bool no_memory_wc; struct gen_pool *pool; struct sram_partition *partition; u32 partitions; }; struct io_stats { u64 read_bytes; u64 write_bytes; u64 rchar; u64 wchar; u64 fsync; }; struct uid_entry { uid_t uid; u64 utime; u64 stime; int state; struct io_stats io[4]; struct hlist_node hash; }; struct update_stats_work { uid_t uid; struct task_io_accounting ioac; u64 utime; u64 stime; struct llist_node node; }; struct task_entry { char comm[256]; pid_t pid; struct io_stats io[4]; struct hlist_node hash; }; struct mfd_cell_acpi_match; struct mfd_cell { const char *name; int id; int level; int (*suspend)(struct platform_device *); int (*resume)(struct platform_device *); void *platform_data; size_t pdata_size; const struct mfd_cell_acpi_match *acpi_match; const struct software_node *swnode; const char *of_compatible; u64 of_reg; bool use_of_reg; int num_resources; const struct resource *resources; bool ignore_resource_conflicts; bool pm_runtime_no_callbacks; int num_parent_supplies; const char * const *parent_supplies; }; struct mfd_cell_acpi_match { const char *pnpid; const unsigned long long adr; }; struct mfd_of_node_entry { struct list_head list; struct device *dev; struct device_node *np; }; struct syscon { struct device_node *np; struct regmap *regmap; struct reset_control *reset; struct list_head list; }; struct syscon_platform_data { const char *label; }; enum nvdimm_passphrase_type { NVDIMM_USER = 0, NVDIMM_MASTER = 1, }; enum nvdimm_fwa_state { NVDIMM_FWA_INVALID = 0, NVDIMM_FWA_IDLE = 1, NVDIMM_FWA_ARMED = 2, NVDIMM_FWA_BUSY = 3, NVDIMM_FWA_ARM_OVERFLOW = 4, }; enum nvdimm_fwa_result { NVDIMM_FWA_RESULT_INVALID = 0, NVDIMM_FWA_RESULT_NONE = 1, NVDIMM_FWA_RESULT_SUCCESS = 2, NVDIMM_FWA_RESULT_NOTSTAGED = 3, NVDIMM_FWA_RESULT_NEEDRESET = 4, NVDIMM_FWA_RESULT_FAIL = 5, }; enum nvdimm_fwa_trigger { NVDIMM_FWA_ARM = 0, NVDIMM_FWA_DISARM = 1, }; enum nvdimm_fwa_capability { NVDIMM_FWA_CAP_INVALID = 0, NVDIMM_FWA_CAP_NONE = 1, NVDIMM_FWA_CAP_QUIESCE = 2, NVDIMM_FWA_CAP_LIVE = 3, }; enum { ND_CMD_IMPLEMENTED = 0, ND_CMD_ARS_CAP = 1, ND_CMD_ARS_START = 2, ND_CMD_ARS_STATUS = 3, ND_CMD_CLEAR_ERROR = 4, ND_CMD_SMART = 1, ND_CMD_SMART_THRESHOLD = 2, ND_CMD_DIMM_FLAGS = 3, ND_CMD_GET_CONFIG_SIZE = 4, ND_CMD_GET_CONFIG_DATA = 5, ND_CMD_SET_CONFIG_DATA = 6, ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7, ND_CMD_VENDOR_EFFECT_LOG = 8, ND_CMD_VENDOR = 9, ND_CMD_CALL = 10, }; struct nvdimm_bus; struct nvdimm_map { struct nvdimm_bus *nvdimm_bus; struct list_head list; resource_size_t offset; unsigned long flags; size_t size; union { void *mem; void *iomem; }; struct kref kref; }; struct badrange { struct list_head list; spinlock_t lock; }; struct nvdimm_bus_descriptor; struct nvdimm_bus { struct nvdimm_bus_descriptor *nd_desc; wait_queue_head_t wait; struct list_head list; struct device dev; int id; int probe_active; atomic_t ioctl_active; struct list_head mapping_list; struct mutex reconfig_mutex; struct badrange badrange; }; struct nvdimm; typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *, struct nvdimm *, unsigned int, void *, unsigned int, int *); struct nvdimm_bus_fw_ops; struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; unsigned long cmd_mask; unsigned long dimm_family_mask; unsigned long bus_family_mask; struct module *module; char *provider_name; struct device_node *of_node; ndctl_fn ndctl; int (*flush_probe)(struct nvdimm_bus_descriptor *); int (*clear_to_send)(struct nvdimm_bus_descriptor *, struct nvdimm *, unsigned int, void *); const struct nvdimm_bus_fw_ops *fw_ops; }; struct nvdimm_security_ops; struct nvdimm_fw_ops; struct nvdimm { unsigned long flags; void *provider_data; unsigned long cmd_mask; struct device dev; atomic_t busy; int id; int num_flush; struct resource *flush_wpq; const char *dimm_id; struct { const struct nvdimm_security_ops *ops; unsigned long flags; unsigned long ext_flags; unsigned int overwrite_tmo; struct kernfs_node *overwrite_state; } sec; struct delayed_work dwork; const struct nvdimm_fw_ops *fw_ops; }; struct nvdimm_key_data; struct nvdimm_security_ops { unsigned long (*get_flags)(struct nvdimm *, enum nvdimm_passphrase_type); int (*freeze)(struct nvdimm *); int (*change_key)(struct nvdimm *, const struct nvdimm_key_data *, const struct nvdimm_key_data *, enum nvdimm_passphrase_type); int (*unlock)(struct nvdimm *, const struct nvdimm_key_data *); int (*disable)(struct nvdimm *, const struct nvdimm_key_data *); int (*erase)(struct nvdimm *, const struct nvdimm_key_data *, enum nvdimm_passphrase_type); int (*overwrite)(struct nvdimm *, const struct nvdimm_key_data *); int (*query_overwrite)(struct nvdimm *); int (*disable_master)(struct nvdimm *, const struct nvdimm_key_data *); }; struct nvdimm_key_data { u8 data[32]; }; struct nvdimm_fw_ops { enum nvdimm_fwa_state (*activate_state)(struct nvdimm *); enum nvdimm_fwa_result (*activate_result)(struct nvdimm *); int (*arm)(struct nvdimm *, enum nvdimm_fwa_trigger); }; struct nvdimm_bus_fw_ops { enum nvdimm_fwa_state (*activate_state)(struct nvdimm_bus_descriptor *); enum nvdimm_fwa_capability (*capability)(struct nvdimm_bus_descriptor *); int (*activate)(struct nvdimm_bus_descriptor *); }; struct nd_cmd_desc { int in_num; int out_num; u32 in_sizes[5]; int out_sizes[5]; }; enum nvdimm_event { NVDIMM_REVALIDATE_POISON = 0, NVDIMM_REVALIDATE_REGION = 1, }; struct nd_device_driver { struct device_driver drv; unsigned long type; int (*probe)(struct device *); void (*remove)(struct device *); void (*shutdown)(struct device *); void (*notify)(struct device *, enum nvdimm_event); }; enum nd_async_mode { ND_SYNC = 0, ND_ASYNC = 1, }; enum nd_ioctl_mode { BUS_IOCTL = 0, DIMM_IOCTL = 1, }; enum { NDD_UNARMED = 1, NDD_LOCKED = 2, NDD_SECURITY_OVERWRITE = 3, NDD_WORK_PENDING = 4, NDD_LABELING = 6, NDD_INCOHERENT = 7, NDD_REGISTER_SYNC = 8, ND_IOCTL_MAX_BUFLEN = 4194304, ND_CMD_MAX_ELEM = 5, ND_CMD_MAX_ENVELOPE = 256, ND_MAX_MAPPINGS = 32, ND_REGION_PAGEMAP = 0, ND_REGION_PERSIST_CACHE = 1, ND_REGION_PERSIST_MEMCTRL = 2, ND_REGION_ASYNC = 3, ND_REGION_CXL = 4, DPA_RESOURCE_ADJUSTED = 1, }; enum nvdimm_claim_class { NVDIMM_CCLASS_NONE = 0, NVDIMM_CCLASS_BTT = 1, NVDIMM_CCLASS_BTT2 = 2, NVDIMM_CCLASS_PFN = 3, NVDIMM_CCLASS_DAX = 4, NVDIMM_CCLASS_UNKNOWN = 5, }; enum nd_pfn_mode { PFN_MODE_NONE = 0, PFN_MODE_RAM = 1, PFN_MODE_PMEM = 2, }; struct nd_cmd_pkg { __u64 nd_family; __u64 nd_command; __u32 nd_size_in; __u32 nd_size_out; __u32 nd_reserved2[9]; __u32 nd_fw_size; unsigned char nd_payload[0]; }; struct nd_cmd_clear_error { __u64 address; __u64 length; __u32 status; __u8 reserved[4]; __u64 cleared; }; struct nd_namespace_common { int force_raw; struct device dev; struct device *claim; enum nvdimm_claim_class claim_class; int (*rw_bytes)(struct nd_namespace_common *, resource_size_t, void *, size_t, int, unsigned long); }; struct nd_namespace_io { struct nd_namespace_common common; struct resource res; resource_size_t size; void *addr; struct badblocks bb; }; struct badrange_entry { u64 start; u64 length; struct list_head list; }; struct clear_badblocks_context { resource_size_t phys; resource_size_t cleared; }; struct nvdimm_drvdata; struct nd_mapping { struct nvdimm *nvdimm; u64 start; u64 size; int position; struct list_head labels; struct mutex lock; struct nvdimm_drvdata *ndd; }; struct nd_interleave_set; struct nd_percpu_lane; struct nd_region { struct device dev; struct ida ns_ida; struct ida btt_ida; struct ida pfn_ida; struct ida dax_ida; unsigned long flags; struct device *ns_seed; struct device *btt_seed; struct device *pfn_seed; struct device *dax_seed; unsigned long align; u16 ndr_mappings; u64 ndr_size; u64 ndr_start; int id; int num_lanes; int ro; int numa_node; int target_node; void *provider_data; struct kernfs_node *bb_state; struct badblocks bb; struct nd_interleave_set *nd_set; struct nd_percpu_lane __attribute__((btf_type_tag("percpu"))) *lane; int (*flush)(struct nd_region *, struct bio *); struct nd_mapping mapping[0]; }; struct nd_interleave_set { u64 cookie1; u64 cookie2; u64 altcookie; guid_t type_guid; }; struct nd_percpu_lane { int count; spinlock_t lock; }; struct nd_cmd_get_config_size { __u32 status; __u32 config_size; __u32 max_xfer; }; struct nvdimm_drvdata { struct device *dev; int nslabel_size; struct nd_cmd_get_config_size nsarea; void *data; bool cxl; int ns_current; int ns_next; struct resource dpa; struct kref kref; }; struct nd_cmd_vendor_hdr { __u32 opcode; __u32 in_length; __u8 in_buf[0]; }; struct nd_cmd_set_config_hdr { __u32 in_offset; __u32 in_length; __u8 in_buf[0]; }; struct btt; struct nd_btt { struct device dev; struct nd_namespace_common *ndns; struct btt *btt; unsigned long lbasize; u64 size; uuid_t *uuid; int id; int initial_offset; u16 version_major; u16 version_minor; }; struct nd_cmd_ars_cap { __u64 address; __u64 length; __u32 status; __u32 max_ars_out; __u32 clear_err_unit; __u16 flags; __u16 reserved; }; struct nd_pfn_sb; struct nd_pfn { int id; uuid_t *uuid; struct device dev; unsigned long align; unsigned long npfns; enum nd_pfn_mode mode; struct nd_pfn_sb *pfn_sb; struct nd_namespace_common *ndns; }; struct nd_pfn_sb { u8 signature[16]; u8 uuid[16]; u8 parent_uuid[16]; __le32 flags; __le16 version_major; __le16 version_minor; __le64 dataoff; __le64 npfns; __le32 mode; __le32 start_pad; __le32 end_trunc; __le32 align; __le32 page_size; __le16 page_struct_size; u8 padding[3994]; __le64 checksum; }; struct nd_dax { struct nd_pfn nd_pfn; }; enum nvdimm_security_bits { NVDIMM_SECURITY_DISABLED = 0, NVDIMM_SECURITY_UNLOCKED = 1, NVDIMM_SECURITY_LOCKED = 2, NVDIMM_SECURITY_FROZEN = 3, NVDIMM_SECURITY_OVERWRITE = 4, }; struct nd_cmd_get_config_data_hdr { __u32 in_offset; __u32 in_length; __u32 status; __u8 out_buf[0]; }; struct nd_label_id { char id[50]; }; struct nvdimm_pmu { struct pmu pmu; struct device *dev; int cpu; struct hlist_node node; enum cpuhp_state cpuhp_state; struct cpumask arch_cpumask; }; enum { ND_MAX_LANES = 256, INT_LBASIZE_ALIGNMENT = 64, NVDIMM_IO_ATOMIC = 1, }; struct nd_namespace_label; struct nd_label_ent { struct list_head list; unsigned long flags; struct nd_namespace_label *label; }; struct nvdimm_cxl_label { u8 type[16]; u8 uuid[16]; u8 name[64]; __le32 flags; __le16 nrange; __le16 position; __le64 dpa; __le64 rawsize; __le32 slot; __le32 align; u8 region_uuid[16]; u8 abstraction_uuid[16]; __le16 lbasize; u8 reserved[86]; __le64 checksum; }; struct nvdimm_efi_label { u8 uuid[16]; u8 name[64]; __le32 flags; __le16 nlabel; __le16 position; __le64 isetcookie; __le64 lbasize; __le64 dpa; __le64 rawsize; __le32 slot; u8 align; u8 reserved[3]; guid_t type_guid; guid_t abstraction_guid; u8 reserved2[88]; __le64 checksum; }; struct nd_namespace_label { union { struct nvdimm_cxl_label cxl; struct nvdimm_efi_label efi; }; }; struct nd_region_data { int ns_count; int ns_active; unsigned int hints_shift; void *flush_wpq[0]; }; struct nd_namespace_index { u8 sig[16]; u8 flags[3]; u8 labelsize; __le32 seq; __le64 myoff; __le64 mysize; __le64 otheroff; __le64 labeloff; __le32 nslot; __le16 major; __le16 minor; __le64 checksum; u8 free[0]; }; struct nd_mapping_desc; struct nd_region_desc { struct resource *res; struct nd_mapping_desc *mapping; u16 num_mappings; const struct attribute_group **attr_groups; struct nd_interleave_set *nd_set; void *provider_data; int num_lanes; int numa_node; int target_node; unsigned long flags; int memregion; struct device_node *of_node; int (*flush)(struct nd_region *, struct bio *); }; struct nd_mapping_desc { struct nvdimm *nvdimm; u64 start; u64 size; int position; }; struct conflict_context { struct nd_region *nd_region; resource_size_t start; resource_size_t size; }; enum { ND_MIN_NAMESPACE_SIZE = 4096, }; enum alloc_loc { ALLOC_ERR = 0, ALLOC_BEFORE = 1, ALLOC_MID = 2, ALLOC_AFTER = 3, }; enum nd_label_flags { ND_LABEL_REAP = 0, }; enum { NSINDEX_SIG_LEN = 16, NSINDEX_ALIGN = 256, NSINDEX_SEQ_MASK = 3, NSLABEL_UUID_LEN = 16, NSLABEL_NAME_LEN = 64, NSLABEL_FLAG_ROLABEL = 1, NSLABEL_FLAG_LOCAL = 2, NSLABEL_FLAG_BTT = 4, NSLABEL_FLAG_UPDATING = 8, BTT_ALIGN = 4096, BTTINFO_SIG_LEN = 16, BTTINFO_UUID_LEN = 16, BTTINFO_FLAG_ERROR = 1, BTTINFO_MAJOR_VERSION = 1, ND_LABEL_MIN_SIZE = 1024, ND_LABEL_ID_SIZE = 50, ND_NSINDEX_INIT = 1, }; struct nd_namespace_pmem { struct nd_namespace_io nsio; unsigned long lbasize; char *alt_name; uuid_t *uuid; int id; }; struct btt { struct gendisk *btt_disk; struct list_head arena_list; struct dentry *debugfs_dir; struct nd_btt *nd_btt; u64 nlba; unsigned long long rawsize; u32 lbasize; u32 sector_size; struct nd_region *nd_region; struct mutex init_lock; int init_state; int num_arenas; struct badblocks *phys_bb; }; struct nd_gen_sb { char reserved[4088]; __le64 checksum; }; struct btt_sb { u8 signature[16]; u8 uuid[16]; u8 parent_uuid[16]; __le32 flags; __le16 version_major; __le16 version_minor; __le32 external_lbasize; __le32 external_nlba; __le32 internal_lbasize; __le32 internal_nlba; __le32 nfree; __le32 infosize; __le64 nextoff; __le64 dataoff; __le64 mapoff; __le64 logoff; __le64 info2off; u8 padding[3968]; __le64 checksum; }; enum dax_access_mode { DAX_ACCESS = 0, DAX_RECOVERY_WRITE = 1, }; struct dax_operations { long (*direct_access)(struct dax_device *, unsigned long, long, enum dax_access_mode, void **, pfn_t *); bool (*dax_supported)(struct dax_device *, struct block_device *, int, sector_t, sector_t); int (*zero_page_range)(struct dax_device *, unsigned long, size_t); size_t (*recovery_write)(struct dax_device *, unsigned long, void *, size_t, struct iov_iter *); }; struct pmem_device { phys_addr_t phys_addr; phys_addr_t data_offset; u64 pfn_flags; void *virt_addr; size_t size; u32 pfn_pad; struct kernfs_node *bb_state; struct badblocks bb; struct dax_device *dax_dev; struct gendisk *disk; struct dev_pagemap pgmap; }; enum btt_init_state { INIT_UNCHECKED = 0, INIT_NOTFOUND = 1, INIT_READY = 2, }; enum log_ent_request { LOG_NEW_ENT = 0, LOG_OLD_ENT = 1, }; struct free_entry; struct aligned_lock; struct arena_info { u64 size; u64 external_lba_start; u32 internal_nlba; u32 internal_lbasize; u32 external_nlba; u32 external_lbasize; u32 nfree; u16 version_major; u16 version_minor; u32 sector_size; u64 nextoff; u64 infooff; u64 dataoff; u64 mapoff; u64 logoff; u64 info2off; struct free_entry *freelist; u32 *rtt; struct aligned_lock *map_locks; struct nd_btt *nd_btt; struct list_head list; struct dentry *debugfs_dir; u32 flags; struct mutex err_lock; int log_index[2]; }; struct free_entry { u32 block; u8 sub; u8 seq; u8 has_err; }; struct aligned_lock { union { spinlock_t lock; u8 cacheline_padding[64]; }; }; struct log_entry { __le32 lba; __le32 old_map; __le32 new_map; __le32 seq; }; struct log_group { struct log_entry ent[4]; }; struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; unsigned short bip_vcnt; unsigned short bip_max_vcnt; unsigned short bip_flags; int: 0; struct bvec_iter bio_iter; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0]; }; struct of_pmem_private { struct nvdimm_bus_descriptor bus_desc; struct nvdimm_bus *bus; }; enum dax_device_flags { DAXDEV_ALIVE = 0, DAXDEV_WRITE_CACHE = 1, DAXDEV_SYNC = 2, DAXDEV_NOCACHE = 3, DAXDEV_NOMC = 4, }; struct dax_holder_operations; struct dax_device { struct inode inode; struct cdev cdev; void *private; unsigned long flags; const struct dax_operations *ops; void *holder_data; const struct dax_holder_operations *holder_ops; }; struct dax_holder_operations { int (*notify_failure)(struct dax_device *, u64, u64, int); }; enum dax_driver_type { DAXDRV_KMEM_TYPE = 0, DAXDRV_DEVICE_TYPE = 1, }; enum id_action { ID_REMOVE = 0, ID_ADD = 1, }; struct dax_id { struct list_head list; char dev_name[30]; }; struct dax_region; struct dev_dax_range; struct dev_dax { struct dax_region *region; struct dax_device *dax_dev; unsigned int align; int target_node; bool dyn_id; int id; struct ida ida; struct device dev; struct dev_pagemap *pgmap; int nr_range; struct dev_dax_range *ranges; }; struct dax_region { int id; int target_node; struct kref kref; struct device *dev; unsigned int align; struct ida ida; struct resource res; struct device *seed; struct device *youngest; }; struct dax_mapping; struct dev_dax_range { unsigned long pgoff; struct range range; struct dax_mapping *mapping; }; struct dax_mapping { struct device dev; int range_id; int id; }; struct dax_device_driver { struct device_driver drv; struct list_head ids; enum dax_driver_type type; int (*probe)(struct dev_dax *); void (*remove)(struct dev_dax *); }; struct dev_dax_data { struct dax_region *dax_region; struct dev_pagemap *pgmap; resource_size_t size; int id; }; struct dma_buf_list { struct list_head head; struct mutex lock; }; struct dma_buf_import_sync_file { __u32 flags; __s32 fd; }; struct dma_buf_export_sync_file { __u32 flags; __s32 fd; }; struct dma_buf_sync { __u64 flags; }; typedef void (*btf_trace_dma_fence_emit)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_init)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_destroy)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_enable_signal)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_signaled)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_wait_start)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_wait_end)(void *, struct dma_fence *); struct trace_event_raw_dma_fence { struct trace_entry ent; u32 __data_loc_driver; u32 __data_loc_timeline; unsigned int context; unsigned int seqno; char __data[0]; }; struct default_wait_cb { struct dma_fence_cb base; struct task_struct *task; }; struct trace_event_data_offsets_dma_fence { u32 driver; u32 timeline; }; struct dma_fence_array; struct dma_fence_array_cb { struct dma_fence_cb cb; struct dma_fence_array *array; }; struct dma_fence_array { struct dma_fence base; spinlock_t lock; unsigned int num_fences; atomic_t num_pending; struct dma_fence **fences; struct irq_work work; }; struct dma_heap_ops; struct dma_heap { const char *name; const struct dma_heap_ops *ops; void *priv; dev_t heap_devt; struct list_head list; struct cdev heap_cdev; struct kref refcount; struct device *heap_dev; }; struct dma_heap_ops { struct dma_buf * (*allocate)(struct dma_heap *, unsigned long, u32, u64); long (*get_pool_size)(struct dma_heap *); }; struct dma_heap_allocation_data { __u64 len; __u32 fd; __u32 fd_flags; __u64 heap_flags; }; struct dma_heap_export_info { const char *name; const struct dma_heap_ops *ops; void *priv; }; enum df_reason { DF_NORMAL = 0, DF_UNDER_PRESSURE = 1, }; struct deferred_freelist_item { size_t nr_pages; void (*free)(struct deferred_freelist_item *, enum df_reason); struct list_head list; }; enum { POOL_LOWPAGE = 0, POOL_HIGHPAGE = 1, POOL_TYPE_SIZE = 2, }; struct dmabuf_page_pool { int count[2]; struct list_head items[2]; spinlock_t lock; gfp_t gfp_mask; unsigned int order; struct list_head list; }; struct sync_merge_data { char name[32]; __s32 fd2; __s32 fence; __u32 flags; __u32 pad; }; struct sync_file_info { char name[32]; __s32 status; __u32 flags; __u32 num_fences; __u32 pad; __u64 sync_fence_info; }; struct sync_fence_info { char obj_name[32]; char driver_name[32]; __s32 status; __u32 flags; __u64 timestamp_ns; }; struct dma_buf_stats_attribute { struct attribute attr; ssize_t (*show)(struct dma_buf *, struct dma_buf_stats_attribute *, char *); }; struct scsi_cmnd; typedef void (*btf_trace_scsi_dispatch_cmd_start)(void *, struct scsi_cmnd *); enum scsi_cmnd_submitter { SUBMITTED_BY_BLOCK_LAYER = 0, SUBMITTED_BY_SCSI_ERROR_HANDLER = 1, SUBMITTED_BY_SCSI_RESET_IOCTL = 2, } __attribute__((mode(byte))); struct scsi_data_buffer { struct sg_table table; unsigned int length; }; struct scsi_device; struct scsi_cmnd { struct scsi_device *device; struct list_head eh_entry; struct delayed_work abort_work; struct callback_head rcu; int eh_eflags; int budget_token; unsigned long jiffies_at_alloc; int retries; int allowed; unsigned char prot_op; unsigned char prot_type; unsigned char prot_flags; enum scsi_cmnd_submitter submitter; unsigned short cmd_len; enum dma_data_direction sc_data_direction; unsigned char cmnd[32]; struct scsi_data_buffer sdb; struct scsi_data_buffer *prot_sdb; unsigned int underflow; unsigned int transfersize; unsigned int resid_len; unsigned int sense_len; unsigned char *sense_buffer; int flags; unsigned long state; unsigned int extra_len; unsigned char *host_scribble; int result; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; typedef __u64 blist_flags_t; enum scsi_device_state { SDEV_CREATED = 1, SDEV_RUNNING = 2, SDEV_CANCEL = 3, SDEV_DEL = 4, SDEV_QUIESCE = 5, SDEV_OFFLINE = 6, SDEV_TRANSPORT_OFFLINE = 7, SDEV_BLOCK = 8, SDEV_CREATED_BLOCK = 9, }; struct Scsi_Host; struct scsi_vpd; struct scsi_target; struct scsi_device_handler; struct scsi_device { struct Scsi_Host *host; struct request_queue *request_queue; struct list_head siblings; struct list_head same_target_siblings; struct sbitmap budget_map; atomic_t device_blocked; atomic_t restarts; spinlock_t list_lock; struct list_head starved_entry; unsigned short queue_depth; unsigned short max_queue_depth; unsigned short last_queue_full_depth; unsigned short last_queue_full_count; unsigned long last_queue_full_time; unsigned long queue_ramp_up_period; unsigned long last_queue_ramp_up; unsigned int id; unsigned int channel; u64 lun; unsigned int manufacturer; unsigned int sector_size; void *hostdata; unsigned char type; char scsi_level; char inq_periph_qual; struct mutex inquiry_mutex; unsigned char inquiry_len; unsigned char *inquiry; const char *vendor; const char *model; const char *rev; struct scsi_vpd __attribute__((btf_type_tag("rcu"))) *vpd_pg0; struct scsi_vpd __attribute__((btf_type_tag("rcu"))) *vpd_pg83; struct scsi_vpd __attribute__((btf_type_tag("rcu"))) *vpd_pg80; struct scsi_vpd __attribute__((btf_type_tag("rcu"))) *vpd_pg89; struct scsi_vpd __attribute__((btf_type_tag("rcu"))) *vpd_pgb0; struct scsi_vpd __attribute__((btf_type_tag("rcu"))) *vpd_pgb1; struct scsi_vpd __attribute__((btf_type_tag("rcu"))) *vpd_pgb2; struct scsi_target *sdev_target; blist_flags_t sdev_bflags; unsigned int eh_timeout; unsigned int manage_system_start_stop: 1; unsigned int manage_runtime_start_stop: 1; unsigned int manage_shutdown: 1; unsigned int force_runtime_start_on_system_start: 1; unsigned int removable: 1; unsigned int changed: 1; unsigned int busy: 1; unsigned int lockable: 1; unsigned int locked: 1; unsigned int borken: 1; unsigned int disconnect: 1; unsigned int soft_reset: 1; unsigned int sdtr: 1; unsigned int wdtr: 1; unsigned int ppr: 1; unsigned int tagged_supported: 1; unsigned int simple_tags: 1; unsigned int was_reset: 1; unsigned int expecting_cc_ua: 1; unsigned int use_10_for_rw: 1; unsigned int use_10_for_ms: 1; unsigned int set_dbd_for_ms: 1; unsigned int read_before_ms: 1; unsigned int no_report_opcodes: 1; unsigned int no_write_same: 1; unsigned int use_16_for_rw: 1; unsigned int use_16_for_sync: 1; unsigned int skip_ms_page_8: 1; unsigned int skip_ms_page_3f: 1; unsigned int skip_vpd_pages: 1; unsigned int try_vpd_pages: 1; unsigned int use_192_bytes_for_3f: 1; unsigned int no_start_on_add: 1; unsigned int allow_restart: 1; unsigned int no_start_on_resume: 1; unsigned int start_stop_pwr_cond: 1; unsigned int no_uld_attach: 1; unsigned int select_no_atn: 1; unsigned int fix_capacity: 1; unsigned int guess_capacity: 1; unsigned int retry_hwerror: 1; unsigned int last_sector_bug: 1; unsigned int no_read_disc_info: 1; unsigned int no_read_capacity_16: 1; unsigned int try_rc_10_first: 1; unsigned int security_supported: 1; unsigned int is_visible: 1; unsigned int wce_default_on: 1; unsigned int no_dif: 1; unsigned int broken_fua: 1; unsigned int lun_in_cdb: 1; unsigned int unmap_limit_for_ws: 1; unsigned int rpm_autosuspend: 1; unsigned int ignore_media_change: 1; unsigned int silence_suspend: 1; unsigned int no_vpd_size: 1; unsigned int cdl_supported: 1; unsigned int cdl_enable: 1; unsigned int queue_stopped; bool offline_already; atomic_t disk_events_disable_depth; unsigned long supported_events[1]; unsigned long pending_events[1]; struct list_head event_list; struct work_struct event_work; unsigned int max_device_blocked; atomic_t iorequest_cnt; atomic_t iodone_cnt; atomic_t ioerr_cnt; atomic_t iotmo_cnt; struct device sdev_gendev; struct device sdev_dev; struct work_struct requeue_work; struct scsi_device_handler *handler; void *handler_data; size_t dma_drain_len; void *dma_drain_buf; unsigned int sg_timeout; unsigned int sg_reserved_size; struct bsg_device *bsg_dev; unsigned char access_state; struct mutex state_mutex; enum scsi_device_state sdev_state; struct task_struct *quiesced_by; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; unsigned long sdev_data[0]; }; enum scsi_host_state { SHOST_CREATED = 1, SHOST_RUNNING = 2, SHOST_CANCEL = 3, SHOST_DEL = 4, SHOST_RECOVERY = 5, SHOST_CANCEL_RECOVERY = 6, SHOST_DEL_RECOVERY = 7, }; struct scsi_host_template; struct scsi_transport_template; struct Scsi_Host { struct list_head __devices; struct list_head __targets; struct list_head starved_list; spinlock_t default_lock; spinlock_t *host_lock; struct mutex scan_mutex; struct list_head eh_abort_list; struct list_head eh_cmd_q; struct task_struct *ehandler; struct completion *eh_action; wait_queue_head_t host_wait; const struct scsi_host_template *hostt; struct scsi_transport_template *transportt; struct kref tagset_refcnt; struct completion tagset_freed; struct blk_mq_tag_set tag_set; atomic_t host_blocked; unsigned int host_failed; unsigned int host_eh_scheduled; unsigned int host_no; int eh_deadline; unsigned long last_reset; unsigned int max_channel; unsigned int max_id; u64 max_lun; unsigned int unique_id; unsigned short max_cmd_len; int this_id; int can_queue; short cmd_per_lun; unsigned short sg_tablesize; unsigned short sg_prot_tablesize; unsigned int max_sectors; unsigned int opt_sectors; unsigned int max_segment_size; unsigned long dma_boundary; unsigned long virt_boundary_mask; unsigned int nr_hw_queues; unsigned int nr_maps; unsigned int active_mode: 2; unsigned int host_self_blocked: 1; unsigned int reverse_ordering: 1; unsigned int tmf_in_progress: 1; unsigned int async_scan: 1; unsigned int eh_noresume: 1; unsigned int no_write_same: 1; unsigned int host_tagset: 1; unsigned int queuecommand_may_block: 1; unsigned int short_inquiry: 1; unsigned int no_scsi2_lun_in_cdb: 1; char work_q_name[20]; struct workqueue_struct *work_q; struct workqueue_struct *tmf_work_q; unsigned int max_host_blocked; unsigned int prot_capabilities; unsigned char prot_guard_type; unsigned long base; unsigned long io_port; unsigned char n_io_port; unsigned char dma_channel; unsigned int irq; enum scsi_host_state shost_state; struct device shost_gendev; struct device shost_dev; void *shost_data; struct device *dma_dev; int rpm_autosuspend_delay; u64 android_kabi_reserved1; unsigned long hostdata[0]; }; enum scsi_timeout_action { SCSI_EH_DONE = 0, SCSI_EH_RESET_TIMER = 1, SCSI_EH_NOT_HANDLED = 2, }; struct scsi_host_template { unsigned int cmd_size; int (*queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); void (*commit_rqs)(struct Scsi_Host *, u16); struct module *module; const char *name; const char * (*info)(struct Scsi_Host *); int (*ioctl)(struct scsi_device *, unsigned int, void __attribute__((btf_type_tag("user"))) *); int (*compat_ioctl)(struct scsi_device *, unsigned int, void __attribute__((btf_type_tag("user"))) *); int (*init_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); int (*exit_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); int (*eh_abort_handler)(struct scsi_cmnd *); int (*eh_device_reset_handler)(struct scsi_cmnd *); int (*eh_target_reset_handler)(struct scsi_cmnd *); int (*eh_bus_reset_handler)(struct scsi_cmnd *); int (*eh_host_reset_handler)(struct scsi_cmnd *); int (*slave_alloc)(struct scsi_device *); int (*slave_configure)(struct scsi_device *); void (*slave_destroy)(struct scsi_device *); int (*target_alloc)(struct scsi_target *); void (*target_destroy)(struct scsi_target *); int (*scan_finished)(struct Scsi_Host *, unsigned long); void (*scan_start)(struct Scsi_Host *); int (*change_queue_depth)(struct scsi_device *, int); void (*map_queues)(struct Scsi_Host *); int (*mq_poll)(struct Scsi_Host *, unsigned int); bool (*dma_need_drain)(struct request *); int (*bios_param)(struct scsi_device *, struct block_device *, sector_t, int *); void (*unlock_native_capacity)(struct scsi_device *); int (*show_info)(struct seq_file *, struct Scsi_Host *); int (*write_info)(struct Scsi_Host *, char *, int); enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *); bool (*eh_should_retry_cmd)(struct scsi_cmnd *); int (*host_reset)(struct Scsi_Host *, int); const char *proc_name; int can_queue; int this_id; unsigned short sg_tablesize; unsigned short sg_prot_tablesize; unsigned int max_sectors; unsigned int max_segment_size; unsigned long dma_boundary; unsigned long virt_boundary_mask; short cmd_per_lun; int tag_alloc_policy; unsigned int track_queue_depth: 1; unsigned int supported_mode: 2; unsigned int emulated: 1; unsigned int skip_settle_delay: 1; unsigned int no_write_same: 1; unsigned int host_tagset: 1; unsigned int queuecommand_may_block: 1; unsigned int max_host_blocked; const struct attribute_group **shost_groups; const struct attribute_group **sdev_groups; u64 vendor_id; u64 android_oem_data1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING = 2, STARGET_REMOVE = 3, STARGET_CREATED_REMOVE = 4, STARGET_DEL = 5, }; struct scsi_target { struct scsi_device *starget_sdev_user; struct list_head siblings; struct list_head devices; struct device dev; struct kref reap_ref; unsigned int channel; unsigned int id; unsigned int create: 1; unsigned int single_lun: 1; unsigned int pdt_1f_for_no_lun: 1; unsigned int no_report_luns: 1; unsigned int expecting_lun_change: 1; atomic_t target_busy; atomic_t target_blocked; unsigned int can_queue; unsigned int max_target_blocked; char scsi_level; enum scsi_target_state state; void *hostdata; unsigned long starget_data[0]; }; struct scsi_vpd { struct callback_head rcu; int len; unsigned char data[0]; }; typedef void (*btf_trace_scsi_dispatch_cmd_error)(void *, struct scsi_cmnd *, int); typedef void (*btf_trace_scsi_dispatch_cmd_done)(void *, struct scsi_cmnd *); typedef void (*btf_trace_scsi_dispatch_cmd_timeout)(void *, struct scsi_cmnd *); typedef void (*btf_trace_scsi_eh_wakeup)(void *, struct Scsi_Host *); enum scsi_vpd_parameters { SCSI_VPD_HEADER_SIZE = 4, SCSI_VPD_LIST_SIZE = 36, }; struct trace_event_raw_scsi_dispatch_cmd_start { struct trace_entry ent; unsigned int host_no; unsigned int channel; unsigned int id; unsigned int lun; unsigned int opcode; unsigned int cmd_len; int driver_tag; int scheduler_tag; unsigned int data_sglen; unsigned int prot_sglen; unsigned char prot_op; u32 __data_loc_cmnd; char __data[0]; }; struct trace_event_raw_scsi_dispatch_cmd_error { struct trace_entry ent; unsigned int host_no; unsigned int channel; unsigned int id; unsigned int lun; int rtn; unsigned int opcode; unsigned int cmd_len; int driver_tag; int scheduler_tag; unsigned int data_sglen; unsigned int prot_sglen; unsigned char prot_op; u32 __data_loc_cmnd; char __data[0]; }; struct trace_event_raw_scsi_cmd_done_timeout_template { struct trace_entry ent; unsigned int host_no; unsigned int channel; unsigned int id; unsigned int lun; int result; unsigned int opcode; unsigned int cmd_len; int driver_tag; int scheduler_tag; unsigned int data_sglen; unsigned int prot_sglen; unsigned char prot_op; u32 __data_loc_cmnd; u8 sense_key; u8 asc; u8 ascq; char __data[0]; }; struct trace_event_raw_scsi_eh_wakeup { struct trace_entry ent; unsigned int host_no; char __data[0]; }; struct scsi_driver { struct device_driver gendrv; int (*resume)(struct device *); void (*rescan)(struct device *); blk_status_t (*init_command)(struct scsi_cmnd *); void (*uninit_command)(struct scsi_cmnd *); int (*done)(struct scsi_cmnd *); int (*eh_action)(struct scsi_cmnd *, int); void (*eh_reset)(struct scsi_cmnd *); }; struct trace_event_data_offsets_scsi_dispatch_cmd_start { u32 cmnd; }; struct trace_event_data_offsets_scsi_dispatch_cmd_error { u32 cmnd; }; struct trace_event_data_offsets_scsi_cmd_done_timeout_template { u32 cmnd; }; struct scsi_sense_hdr { u8 response_code; u8 sense_key; u8 asc; u8 ascq; u8 byte4; u8 byte5; u8 byte6; u8 additional_length; }; struct scsi_exec_args { unsigned char *sense; unsigned int sense_len; struct scsi_sense_hdr *sshdr; blk_mq_req_flags_t req_flags; int scmd_flags; int *resid; }; struct scsi_mode_data { __u32 length; __u16 block_descriptor_length; __u8 medium_type; __u8 device_specific; __u8 header_length; __u8 longlba: 1; }; struct trace_event_data_offsets_scsi_eh_wakeup {}; enum scsi_host_status { DID_OK = 0, DID_NO_CONNECT = 1, DID_BUS_BUSY = 2, DID_TIME_OUT = 3, DID_BAD_TARGET = 4, DID_ABORT = 5, DID_PARITY = 6, DID_ERROR = 7, DID_RESET = 8, DID_BAD_INTR = 9, DID_PASSTHROUGH = 10, DID_SOFT_ERROR = 11, DID_IMM_RETRY = 12, DID_REQUEUE = 13, DID_TRANSPORT_DISRUPTED = 14, DID_TRANSPORT_FAILFAST = 15, DID_TRANSPORT_MARGINAL = 20, }; struct scsi_transport_template { struct transport_container host_attrs; struct transport_container target_attrs; struct transport_container device_attrs; int (*user_scan)(struct Scsi_Host *, uint, uint, u64); int device_size; int device_private_offset; int target_size; int target_private_offset; int host_size; unsigned int create_work_queue: 1; void (*eh_strategy_handler)(struct Scsi_Host *); }; struct scsi_host_busy_iter_data { bool (*fn)(struct scsi_cmnd *, void *); void *priv; }; enum scsi_msg_byte { COMMAND_COMPLETE = 0, EXTENDED_MESSAGE = 1, SAVE_POINTERS = 2, RESTORE_POINTERS = 3, DISCONNECT = 4, INITIATOR_ERROR = 5, ABORT_TASK_SET = 6, MESSAGE_REJECT = 7, NOP = 8, MSG_PARITY_ERROR = 9, LINKED_CMD_COMPLETE = 10, LINKED_FLG_CMD_COMPLETE = 11, TARGET_RESET = 12, ABORT_TASK = 13, CLEAR_TASK_SET = 14, INITIATE_RECOVERY = 15, RELEASE_RECOVERY = 16, TERMINATE_IO_PROC = 17, CLEAR_ACA = 22, LOGICAL_UNIT_RESET = 23, SIMPLE_QUEUE_TAG = 32, HEAD_OF_QUEUE_TAG = 33, ORDERED_QUEUE_TAG = 34, IGNORE_WIDE_RESIDUE = 35, ACA = 36, QAS_REQUEST = 85, BUS_DEVICE_RESET = 12, ABORT = 6, }; enum sam_status { SAM_STAT_GOOD = 0, SAM_STAT_CHECK_CONDITION = 2, SAM_STAT_CONDITION_MET = 4, SAM_STAT_BUSY = 8, SAM_STAT_INTERMEDIATE = 16, SAM_STAT_INTERMEDIATE_CONDITION_MET = 20, SAM_STAT_RESERVATION_CONFLICT = 24, SAM_STAT_COMMAND_TERMINATED = 34, SAM_STAT_TASK_SET_FULL = 40, SAM_STAT_ACA_ACTIVE = 48, SAM_STAT_TASK_ABORTED = 64, }; struct request_sense; struct cdrom_generic_command { unsigned char cmd[12]; unsigned char __attribute__((btf_type_tag("user"))) *buffer; unsigned int buflen; int stat; struct request_sense __attribute__((btf_type_tag("user"))) *sense; unsigned char data_direction; int quiet; int timeout; union { void __attribute__((btf_type_tag("user"))) *reserved[1]; void __attribute__((btf_type_tag("user"))) *unused; }; }; struct request_sense { __u8 error_code: 7; __u8 valid: 1; __u8 segment_number; __u8 sense_key: 4; __u8 reserved2: 1; __u8 ili: 1; __u8 reserved1: 2; __u8 information[4]; __u8 add_sense_len; __u8 command_info[4]; __u8 asc; __u8 ascq; __u8 fruc; __u8 sks[3]; __u8 asb[46]; }; struct scsi_ioctl_command { unsigned int inlen; unsigned int outlen; unsigned char data[0]; }; struct scsi_idlun { __u32 dev_id; __u32 host_unique_id; }; struct sg_io_hdr { int interface_id; int dxfer_direction; unsigned char cmd_len; unsigned char mx_sb_len; unsigned short iovec_count; unsigned int dxfer_len; void __attribute__((btf_type_tag("user"))) *dxferp; unsigned char __attribute__((btf_type_tag("user"))) *cmdp; void __attribute__((btf_type_tag("user"))) *sbp; unsigned int timeout; unsigned int flags; int pack_id; void __attribute__((btf_type_tag("user"))) *usr_ptr; unsigned char status; unsigned char masked_status; unsigned char msg_status; unsigned char sb_len_wr; unsigned short host_status; unsigned short driver_status; int resid; unsigned int duration; unsigned int info; }; struct compat_cdrom_generic_command { unsigned char cmd[12]; compat_caddr_t buffer; compat_uint_t buflen; compat_int_t stat; compat_caddr_t sense; unsigned char data_direction; unsigned char pad[3]; compat_int_t quiet; compat_int_t timeout; compat_caddr_t unused; }; struct compat_sg_io_hdr { compat_int_t interface_id; compat_int_t dxfer_direction; unsigned char cmd_len; unsigned char mx_sb_len; unsigned short iovec_count; compat_uint_t dxfer_len; compat_uint_t dxferp; compat_uptr_t cmdp; compat_uptr_t sbp; compat_uint_t timeout; compat_uint_t flags; compat_int_t pack_id; compat_uptr_t usr_ptr; unsigned char status; unsigned char masked_status; unsigned char msg_status; unsigned char sb_len_wr; unsigned short host_status; unsigned short driver_status; compat_int_t resid; compat_uint_t duration; compat_uint_t info; }; enum scsi_disposition { NEEDS_RETRY = 8193, SUCCESS = 8194, FAILED = 8195, QUEUED = 8196, SOFT_ERROR = 8197, ADD_TO_MLQUEUE = 8198, TIMEOUT_ERROR = 8199, SCSI_RETURN_NOT_HANDLED = 8200, FAST_IO_FAIL = 8201, }; enum scsi_ml_status { SCSIML_STAT_OK = 0, SCSIML_STAT_RESV_CONFLICT = 1, SCSIML_STAT_NOSPC = 2, SCSIML_STAT_MED_ERROR = 3, SCSIML_STAT_TGT_FAILURE = 4, SCSIML_STAT_DL_TIMEOUT = 5, }; enum scsi_prot_operations { SCSI_PROT_NORMAL = 0, SCSI_PROT_READ_INSERT = 1, SCSI_PROT_WRITE_STRIP = 2, SCSI_PROT_READ_STRIP = 3, SCSI_PROT_WRITE_INSERT = 4, SCSI_PROT_READ_PASS = 5, SCSI_PROT_WRITE_PASS = 6, }; enum scsi_device_event { SDEV_EVT_MEDIA_CHANGE = 1, SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2, SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3, SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4, SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5, SDEV_EVT_LUN_CHANGE_REPORTED = 6, SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7, SDEV_EVT_POWER_ON_RESET_OCCURRED = 8, SDEV_EVT_FIRST = 1, SDEV_EVT_LAST = 8, SDEV_EVT_MAXBITS = 9, }; typedef void (*activate_complete)(void *, int); struct scsi_device_handler { struct list_head list; struct module *module; const char *name; enum scsi_disposition (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); int (*attach)(struct scsi_device *); void (*detach)(struct scsi_device *); int (*activate)(struct scsi_device *, activate_complete, void *); blk_status_t (*prep_fn)(struct scsi_device *, struct request *); int (*set_params)(struct scsi_device *, const char *); void (*rescan)(struct scsi_device *); }; struct scsi_eh_save { int result; unsigned int resid_len; int eh_eflags; enum dma_data_direction data_direction; unsigned int underflow; unsigned char cmd_len; unsigned char prot_op; unsigned char cmnd[32]; struct scsi_data_buffer sdb; struct scatterlist sense_sgl; }; enum { ACTION_FAIL = 0, ACTION_REPREP = 1, ACTION_DELAYED_REPREP = 2, ACTION_RETRY = 3, ACTION_DELAYED_RETRY = 4, }; enum scsi_host_prot_capabilities { SHOST_DIF_TYPE1_PROTECTION = 1, SHOST_DIF_TYPE2_PROTECTION = 2, SHOST_DIF_TYPE3_PROTECTION = 4, SHOST_DIX_TYPE0_PROTECTION = 8, SHOST_DIX_TYPE1_PROTECTION = 16, SHOST_DIX_TYPE2_PROTECTION = 32, SHOST_DIX_TYPE3_PROTECTION = 64, }; struct scsi_event { enum scsi_device_event evt_type; struct list_head node; }; enum scsi_scan_mode { SCSI_SCAN_INITIAL = 0, SCSI_SCAN_RESCAN = 1, SCSI_SCAN_MANUAL = 2, }; enum scsi_timeouts { SCSI_DEFAULT_EH_TIMEOUT = 2500, }; struct async_scan_data { struct list_head list; struct Scsi_Host *shost; struct completion prev_finished; }; struct scsi_lun { __u8 scsi_lun[8]; }; enum scsi_devinfo_key { SCSI_DEVINFO_GLOBAL = 0, SCSI_DEVINFO_SPI = 1, }; struct scsi_dev_info_list { struct list_head dev_info_list; char vendor[8]; char model[16]; blist_flags_t flags; unsigned int compatible; }; struct scsi_dev_info_list_table { struct list_head node; struct list_head scsi_dev_info_list; const char *name; int key; }; struct scsi_varlen_cdb_hdr { __u8 opcode; __u8 control; __u8 misc[5]; __u8 additional_cdb_length; __be16 service_action; }; enum scsi_pr_type { SCSI_PR_WRITE_EXCLUSIVE = 1, SCSI_PR_EXCLUSIVE_ACCESS = 3, SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 5, SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 6, SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 7, SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 8, }; enum pr_status { PR_STS_SUCCESS = 0, PR_STS_IOERR = 2, PR_STS_RESERVATION_CONFLICT = 24, PR_STS_RETRY_PATH_FAILURE = 917504, PR_STS_PATH_FAST_FAILED = 983040, PR_STS_PATH_FAILED = 65536, }; enum { SD_DEF_XFER_BLOCKS = 65535, SD_MAX_XFER_BLOCKS = 4294967295, SD_MAX_WS10_BLOCKS = 65535, SD_MAX_WS16_BLOCKS = 8388607, }; enum { SD_LBP_FULL = 0, SD_LBP_UNMAP = 1, SD_LBP_WS16 = 2, SD_LBP_WS10 = 3, SD_LBP_ZERO = 4, SD_LBP_DISABLE = 5, }; enum t10_dif_type { T10_PI_TYPE0_PROTECTION = 0, T10_PI_TYPE1_PROTECTION = 1, T10_PI_TYPE2_PROTECTION = 2, T10_PI_TYPE3_PROTECTION = 3, }; enum { SD_ZERO_WRITE = 0, SD_ZERO_WS = 1, SD_ZERO_WS16_UNMAP = 2, SD_ZERO_WS10_UNMAP = 3, }; enum bip_flags { BIP_BLOCK_INTEGRITY = 1, BIP_MAPPED_INTEGRITY = 2, BIP_CTRL_NOCHECK = 4, BIP_DISK_NOCHECK = 8, BIP_IP_CHECKSUM = 16, }; enum scsi_prot_flags { SCSI_PROT_TRANSFER_PI = 1, SCSI_PROT_GUARD_CHECK = 2, SCSI_PROT_REF_CHECK = 4, SCSI_PROT_REF_INCREMENT = 8, SCSI_PROT_IP_CHECKSUM = 16, }; enum { SD_EXT_CDB_SIZE = 32, SD_MEMPOOL_SIZE = 2, }; struct opal_dev; struct zoned_disk_info { u32 nr_zones; u32 zone_blocks; }; struct scsi_disk { struct scsi_device *device; struct device disk_dev; struct gendisk *disk; struct opal_dev *opal_dev; struct zoned_disk_info early_zone_info; struct zoned_disk_info zone_info; u32 zones_optimal_open; u32 zones_optimal_nonseq; u32 zones_max_open; u32 zone_starting_lba_gran; u32 *zones_wp_offset; spinlock_t zones_wp_offset_lock; u32 *rev_wp_offset; struct mutex rev_mutex; struct work_struct zone_wp_offset_work; char *zone_wp_update_buf; atomic_t openers; sector_t capacity; int max_retries; u32 min_xfer_blocks; u32 max_xfer_blocks; u32 opt_xfer_blocks; u32 max_ws_blocks; u32 max_unmap_blocks; u32 unmap_granularity; u32 unmap_alignment; u32 index; unsigned int physical_block_size; unsigned int max_medium_access_timeouts; unsigned int medium_access_timed_out; u8 media_present; u8 write_prot; u8 protection_type; u8 provisioning_mode; u8 zeroing_mode; u8 nr_actuators; bool suspended; unsigned int ATO: 1; unsigned int cache_override: 1; unsigned int WCE: 1; unsigned int RCD: 1; unsigned int DPOFUA: 1; unsigned int first_scan: 1; unsigned int lbpme: 1; unsigned int lbprz: 1; unsigned int lbpu: 1; unsigned int lbpws: 1; unsigned int lbpws10: 1; unsigned int lbpvpd: 1; unsigned int ws10: 1; unsigned int ws16: 1; unsigned int rc_basis: 2; unsigned int zoned: 2; unsigned int urswrz: 1; unsigned int security: 1; unsigned int ignore_medium_access_errors: 1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; typedef void (*btf_trace_scsi_prepare_zone_append)(void *, struct scsi_cmnd *, sector_t, unsigned int); typedef void (*btf_trace_scsi_zone_wp_update)(void *, struct scsi_cmnd *, sector_t, unsigned int, unsigned int); enum zbc_zone_type { ZBC_ZONE_TYPE_CONV = 1, ZBC_ZONE_TYPE_SEQWRITE_REQ = 2, ZBC_ZONE_TYPE_SEQWRITE_PREF = 3, ZBC_ZONE_TYPE_SEQ_OR_BEFORE_REQ = 4, ZBC_ZONE_TYPE_GAP = 5, }; enum zbc_zone_cond { ZBC_ZONE_COND_NO_WP = 0, ZBC_ZONE_COND_EMPTY = 1, ZBC_ZONE_COND_IMP_OPEN = 2, ZBC_ZONE_COND_EXP_OPEN = 3, ZBC_ZONE_COND_CLOSED = 4, ZBC_ZONE_COND_READONLY = 13, ZBC_ZONE_COND_FULL = 14, ZBC_ZONE_COND_OFFLINE = 15, }; enum zbc_zone_alignment_method { ZBC_CONSTANT_ZONE_LENGTH = 1, ZBC_CONSTANT_ZONE_START_OFFSET = 8, }; struct trace_event_raw_scsi_prepare_zone_append { struct trace_entry ent; unsigned int host_no; unsigned int channel; unsigned int id; unsigned int lun; sector_t lba; unsigned int wp_offset; char __data[0]; }; struct trace_event_raw_scsi_zone_wp_update { struct trace_entry ent; unsigned int host_no; unsigned int channel; unsigned int id; unsigned int lun; sector_t rq_sector; unsigned int wp_offset; unsigned int good_bytes; char __data[0]; }; struct trace_event_data_offsets_scsi_prepare_zone_append {}; struct trace_event_data_offsets_scsi_zone_wp_update {}; struct nvme_command; typedef void (*btf_trace_nvme_setup_cmd)(void *, struct request *, struct nvme_command *); struct nvme_sgl_desc { __le64 addr; __le32 length; __u8 rsvd[3]; __u8 type; }; struct nvme_keyed_sgl_desc { __le64 addr; __u8 length[3]; __u8 key[4]; __u8 type; }; union nvme_data_ptr { struct { __le64 prp1; __le64 prp2; }; struct nvme_sgl_desc sgl; struct nvme_keyed_sgl_desc ksgl; }; struct nvme_common_command { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __le32 cdw2[2]; __le64 metadata; union nvme_data_ptr dptr; union { struct { __le32 cdw10; __le32 cdw11; __le32 cdw12; __le32 cdw13; __le32 cdw14; __le32 cdw15; }; struct { __le32 cdw10; __le32 cdw11; __le32 cdw12; __le32 cdw13; __le32 cdw14; __le32 cdw15; } cdws; }; }; struct nvme_rw_command { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __le32 cdw2; __le32 cdw3; __le64 metadata; union nvme_data_ptr dptr; __le64 slba; __le16 length; __le16 control; __le32 dsmgmt; __le32 reftag; __le16 apptag; __le16 appmask; }; struct nvme_identify { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; __u8 cns; __u8 rsvd3; __le16 ctrlid; __u8 rsvd11[3]; __u8 csi; __u32 rsvd12[4]; }; struct nvme_features { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; __le32 fid; __le32 dword11; __le32 dword12; __le32 dword13; __le32 dword14; __le32 dword15; }; struct nvme_create_cq { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[5]; __le64 prp1; __u64 rsvd8; __le16 cqid; __le16 qsize; __le16 cq_flags; __le16 irq_vector; __u32 rsvd12[4]; }; struct nvme_create_sq { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[5]; __le64 prp1; __u64 rsvd8; __le16 sqid; __le16 qsize; __le16 sq_flags; __le16 cqid; __u32 rsvd12[4]; }; struct nvme_delete_queue { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[9]; __le16 qid; __u16 rsvd10; __u32 rsvd11[5]; }; struct nvme_download_firmware { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[5]; union nvme_data_ptr dptr; __le32 numd; __le32 offset; __u32 rsvd12[4]; }; struct nvme_format_cmd { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[4]; __le32 cdw10; __u32 rsvd11[5]; }; struct nvme_dsm_cmd { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; __le32 nr; __le32 attributes; __u32 rsvd12[4]; }; struct nvme_write_zeroes_cmd { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2; __le64 metadata; union nvme_data_ptr dptr; __le64 slba; __le16 length; __le16 control; __le32 dsmgmt; __le32 reftag; __le16 apptag; __le16 appmask; }; struct nvme_zone_mgmt_send_cmd { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __le32 cdw2[2]; __le64 metadata; union nvme_data_ptr dptr; __le64 slba; __le32 cdw12; __u8 zsa; __u8 select_all; __u8 rsvd13[2]; __le32 cdw14[2]; }; struct nvme_zone_mgmt_recv_cmd { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __le64 rsvd2[2]; union nvme_data_ptr dptr; __le64 slba; __le32 numd; __u8 zra; __u8 zrasf; __u8 pr; __u8 rsvd13; __le32 cdw14[2]; }; struct nvme_abort_cmd { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[9]; __le16 sqid; __u16 cid; __u32 rsvd11[5]; }; struct nvme_get_log_page_command { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; __u8 lid; __u8 lsp; __le16 numdl; __le16 numdu; __u16 rsvd11; union { struct { __le32 lpol; __le32 lpou; }; __le64 lpo; }; __u8 rsvd14[3]; __u8 csi; __u32 rsvd15; }; struct nvmf_common_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[35]; __u8 ts[24]; }; struct nvmf_connect_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[19]; union nvme_data_ptr dptr; __le16 recfmt; __le16 qid; __le16 sqsize; __u8 cattr; __u8 resv3; __le32 kato; __u8 resv4[12]; }; struct nvmf_property_set_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[35]; __u8 attrib; __u8 resv3[3]; __le32 offset; __le64 value; __u8 resv4[8]; }; struct nvmf_property_get_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[35]; __u8 attrib; __u8 resv3[3]; __le32 offset; __u8 resv4[16]; }; struct nvmf_auth_common_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[19]; union nvme_data_ptr dptr; __u8 resv3; __u8 spsp0; __u8 spsp1; __u8 secp; __le32 al_tl; __u8 resv4[16]; }; struct nvmf_auth_send_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[19]; union nvme_data_ptr dptr; __u8 resv3; __u8 spsp0; __u8 spsp1; __u8 secp; __le32 tl; __u8 resv4[16]; }; struct nvmf_auth_receive_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[19]; union nvme_data_ptr dptr; __u8 resv3; __u8 spsp0; __u8 spsp1; __u8 secp; __le32 al; __u8 resv4[16]; }; struct nvme_dbbuf { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[5]; __le64 prp1; __le64 prp2; __u32 rsvd12[6]; }; struct nvme_directive_cmd { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; __le32 numd; __u8 doper; __u8 dtype; __le16 dspec; __u8 endir; __u8 tdtype; __u16 rsvd15; __u32 rsvd16[3]; }; struct nvme_command { union { struct nvme_common_command common; struct nvme_rw_command rw; struct nvme_identify identify; struct nvme_features features; struct nvme_create_cq create_cq; struct nvme_create_sq create_sq; struct nvme_delete_queue delete_queue; struct nvme_download_firmware dlfw; struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; struct nvme_write_zeroes_cmd write_zeroes; struct nvme_zone_mgmt_send_cmd zms; struct nvme_zone_mgmt_recv_cmd zmr; struct nvme_abort_cmd abort; struct nvme_get_log_page_command get_log_page; struct nvmf_common_command fabrics; struct nvmf_connect_command connect; struct nvmf_property_set_command prop_set; struct nvmf_property_get_command prop_get; struct nvmf_auth_common_command auth_common; struct nvmf_auth_send_command auth_send; struct nvmf_auth_receive_command auth_receive; struct nvme_dbbuf dbbuf; struct nvme_directive_cmd directive; }; }; typedef void (*btf_trace_nvme_complete_rq)(void *, struct request *); struct nvme_ctrl; typedef void (*btf_trace_nvme_async_event)(void *, struct nvme_ctrl *, u32); enum nvme_ctrl_state { NVME_CTRL_NEW = 0, NVME_CTRL_LIVE = 1, NVME_CTRL_RESETTING = 2, NVME_CTRL_CONNECTING = 3, NVME_CTRL_DELETING = 4, NVME_CTRL_DELETING_NOIO = 5, NVME_CTRL_DEAD = 6, }; struct nvme_id_power_state { __le16 max_power; __u8 rsvd2; __u8 flags; __le32 entry_lat; __le32 exit_lat; __u8 read_tput; __u8 read_lat; __u8 write_tput; __u8 write_lat; __le16 idle_power; __u8 idle_scale; __u8 rsvd19; __le16 active_power; __u8 active_work_scale; __u8 rsvd23[9]; }; struct nvme_fault_inject {}; enum nvme_ctrl_type { NVME_CTRL_IO = 1, NVME_CTRL_DISC = 2, NVME_CTRL_ADMIN = 3, }; enum nvme_dctype { NVME_DCTYPE_NOT_REPORTED = 0, NVME_DCTYPE_DDC = 1, NVME_DCTYPE_CDC = 2, }; struct nvme_ctrl_ops; struct nvme_subsystem; struct nvme_effects_log; struct nvme_ana_rsp_hdr; struct nvmf_ctrl_options; struct nvme_ctrl { bool comp_seen; bool identified; enum nvme_ctrl_state state; spinlock_t lock; struct mutex scan_lock; const struct nvme_ctrl_ops *ops; struct request_queue *admin_q; struct request_queue *connect_q; struct request_queue *fabrics_q; struct device *dev; int instance; int numa_node; struct blk_mq_tag_set *tagset; struct blk_mq_tag_set *admin_tagset; struct list_head namespaces; struct rw_semaphore namespaces_rwsem; struct device ctrl_device; struct device *device; struct cdev cdev; struct work_struct reset_work; struct work_struct delete_work; wait_queue_head_t state_wq; struct nvme_subsystem *subsys; struct list_head subsys_entry; struct opal_dev *opal_dev; char name[12]; u16 cntlid; u16 mtfa; u32 ctrl_config; u32 queue_count; u64 cap; u32 max_hw_sectors; u32 max_segments; u32 max_integrity_segments; u32 max_discard_sectors; u32 max_discard_segments; u32 max_zeroes_sectors; u32 max_zone_append; u16 crdt[3]; u16 oncs; u32 dmrsl; u16 oacs; u16 sqsize; u32 max_namespaces; atomic_t abort_limit; u8 vwc; u32 vs; u32 sgls; u16 kas; u8 npss; u8 apsta; u16 wctemp; u16 cctemp; u32 oaes; u32 aen_result; u32 ctratt; unsigned int shutdown_timeout; unsigned int kato; bool subsystem; unsigned long quirks; struct nvme_id_power_state psd[32]; struct nvme_effects_log *effects; struct xarray cels; struct work_struct scan_work; struct work_struct async_event_work; struct delayed_work ka_work; struct delayed_work failfast_work; struct nvme_command ka_cmd; unsigned long ka_last_check_time; struct work_struct fw_act_work; unsigned long events; u8 anacap; u8 anatt; u32 anagrpmax; u32 nanagrpid; struct mutex ana_lock; struct nvme_ana_rsp_hdr *ana_log_buf; size_t ana_log_size; struct timer_list anatt_timer; struct work_struct ana_work; u64 ps_max_latency_us; bool apst_enabled; u16 hmmaxd; u32 hmpre; u32 hmmin; u32 hmminds; u32 ioccsz; u32 iorcsz; u16 icdoff; u16 maxcmd; int nr_reconnects; unsigned long flags; struct nvmf_ctrl_options *opts; struct page *discard_page; unsigned long discard_page_busy; struct nvme_fault_inject fault_inject; enum nvme_ctrl_type cntrltype; enum nvme_dctype dctype; }; struct nvme_ctrl_ops { const char *name; struct module *module; unsigned int flags; const struct attribute_group **dev_attr_groups; int (*reg_read32)(struct nvme_ctrl *, u32, u32 *); int (*reg_write32)(struct nvme_ctrl *, u32, u32); int (*reg_read64)(struct nvme_ctrl *, u32, u64 *); void (*free_ctrl)(struct nvme_ctrl *); void (*submit_async_event)(struct nvme_ctrl *); void (*delete_ctrl)(struct nvme_ctrl *); void (*stop_ctrl)(struct nvme_ctrl *); int (*get_address)(struct nvme_ctrl *, char *, int); void (*print_device_info)(struct nvme_ctrl *); bool (*supports_pci_p2pdma)(struct nvme_ctrl *); }; enum nvme_subsys_type { NVME_NQN_DISC = 1, NVME_NQN_NVME = 2, NVME_NQN_CURR = 3, }; enum nvme_iopolicy { NVME_IOPOLICY_NUMA = 0, NVME_IOPOLICY_RR = 1, }; struct nvme_subsystem { int instance; struct device dev; struct kref ref; struct list_head entry; struct mutex lock; struct list_head ctrls; struct list_head nsheads; char subnqn[223]; char serial[20]; char model[40]; char firmware_rev[8]; u8 cmic; enum nvme_subsys_type subtype; u16 vendor_id; u16 awupf; struct ida ns_ida; enum nvme_iopolicy iopolicy; }; struct nvme_effects_log { __le32 acs[256]; __le32 iocs[256]; __u8 resv[2048]; }; struct nvme_ana_rsp_hdr { __le64 chgcnt; __le16 ngrps; __le16 rsvd10[3]; }; struct nvmf_host; struct nvmf_ctrl_options { unsigned int mask; int max_reconnects; char *transport; char *subsysnqn; char *traddr; char *trsvcid; char *host_traddr; char *host_iface; size_t queue_size; unsigned int nr_io_queues; unsigned int reconnect_delay; bool discovery_nqn; bool duplicate_connect; unsigned int kato; struct nvmf_host *host; char *dhchap_secret; char *dhchap_ctrl_secret; bool disable_sqflow; bool hdr_digest; bool data_digest; unsigned int nr_write_queues; unsigned int nr_poll_queues; int tos; int fast_io_fail_tmo; }; struct nvmf_host { struct kref ref; struct list_head list; char nqn[223]; uuid_t id; }; typedef void (*btf_trace_nvme_sq)(void *, struct request *, __le16, int); struct nvme_core_quirk_entry { u16 vid; const char *mn; const char *fr; unsigned long quirks; }; enum nvme_disposition { COMPLETE = 0, RETRY = 1, FAILOVER = 2, AUTHENTICATE = 3, }; enum { NVME_SC_SUCCESS = 0, NVME_SC_INVALID_OPCODE = 1, NVME_SC_INVALID_FIELD = 2, NVME_SC_CMDID_CONFLICT = 3, NVME_SC_DATA_XFER_ERROR = 4, NVME_SC_POWER_LOSS = 5, NVME_SC_INTERNAL = 6, NVME_SC_ABORT_REQ = 7, NVME_SC_ABORT_QUEUE = 8, NVME_SC_FUSED_FAIL = 9, NVME_SC_FUSED_MISSING = 10, NVME_SC_INVALID_NS = 11, NVME_SC_CMD_SEQ_ERROR = 12, NVME_SC_SGL_INVALID_LAST = 13, NVME_SC_SGL_INVALID_COUNT = 14, NVME_SC_SGL_INVALID_DATA = 15, NVME_SC_SGL_INVALID_METADATA = 16, NVME_SC_SGL_INVALID_TYPE = 17, NVME_SC_CMB_INVALID_USE = 18, NVME_SC_PRP_INVALID_OFFSET = 19, NVME_SC_ATOMIC_WU_EXCEEDED = 20, NVME_SC_OP_DENIED = 21, NVME_SC_SGL_INVALID_OFFSET = 22, NVME_SC_RESERVED = 23, NVME_SC_HOST_ID_INCONSIST = 24, NVME_SC_KA_TIMEOUT_EXPIRED = 25, NVME_SC_KA_TIMEOUT_INVALID = 26, NVME_SC_ABORTED_PREEMPT_ABORT = 27, NVME_SC_SANITIZE_FAILED = 28, NVME_SC_SANITIZE_IN_PROGRESS = 29, NVME_SC_SGL_INVALID_GRANULARITY = 30, NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 31, NVME_SC_NS_WRITE_PROTECTED = 32, NVME_SC_CMD_INTERRUPTED = 33, NVME_SC_TRANSIENT_TR_ERR = 34, NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 36, NVME_SC_INVALID_IO_CMD_SET = 44, NVME_SC_LBA_RANGE = 128, NVME_SC_CAP_EXCEEDED = 129, NVME_SC_NS_NOT_READY = 130, NVME_SC_RESERVATION_CONFLICT = 131, NVME_SC_FORMAT_IN_PROGRESS = 132, NVME_SC_CQ_INVALID = 256, NVME_SC_QID_INVALID = 257, NVME_SC_QUEUE_SIZE = 258, NVME_SC_ABORT_LIMIT = 259, NVME_SC_ABORT_MISSING = 260, NVME_SC_ASYNC_LIMIT = 261, NVME_SC_FIRMWARE_SLOT = 262, NVME_SC_FIRMWARE_IMAGE = 263, NVME_SC_INVALID_VECTOR = 264, NVME_SC_INVALID_LOG_PAGE = 265, NVME_SC_INVALID_FORMAT = 266, NVME_SC_FW_NEEDS_CONV_RESET = 267, NVME_SC_INVALID_QUEUE = 268, NVME_SC_FEATURE_NOT_SAVEABLE = 269, NVME_SC_FEATURE_NOT_CHANGEABLE = 270, NVME_SC_FEATURE_NOT_PER_NS = 271, NVME_SC_FW_NEEDS_SUBSYS_RESET = 272, NVME_SC_FW_NEEDS_RESET = 273, NVME_SC_FW_NEEDS_MAX_TIME = 274, NVME_SC_FW_ACTIVATE_PROHIBITED = 275, NVME_SC_OVERLAPPING_RANGE = 276, NVME_SC_NS_INSUFFICIENT_CAP = 277, NVME_SC_NS_ID_UNAVAILABLE = 278, NVME_SC_NS_ALREADY_ATTACHED = 280, NVME_SC_NS_IS_PRIVATE = 281, NVME_SC_NS_NOT_ATTACHED = 282, NVME_SC_THIN_PROV_NOT_SUPP = 283, NVME_SC_CTRL_LIST_INVALID = 284, NVME_SC_SELT_TEST_IN_PROGRESS = 285, NVME_SC_BP_WRITE_PROHIBITED = 286, NVME_SC_CTRL_ID_INVALID = 287, NVME_SC_SEC_CTRL_STATE_INVALID = 288, NVME_SC_CTRL_RES_NUM_INVALID = 289, NVME_SC_RES_ID_INVALID = 290, NVME_SC_PMR_SAN_PROHIBITED = 291, NVME_SC_ANA_GROUP_ID_INVALID = 292, NVME_SC_ANA_ATTACH_FAILED = 293, NVME_SC_BAD_ATTRIBUTES = 384, NVME_SC_INVALID_PI = 385, NVME_SC_READ_ONLY = 386, NVME_SC_ONCS_NOT_SUPPORTED = 387, NVME_SC_CONNECT_FORMAT = 384, NVME_SC_CONNECT_CTRL_BUSY = 385, NVME_SC_CONNECT_INVALID_PARAM = 386, NVME_SC_CONNECT_RESTART_DISC = 387, NVME_SC_CONNECT_INVALID_HOST = 388, NVME_SC_DISCOVERY_RESTART = 400, NVME_SC_AUTH_REQUIRED = 401, NVME_SC_ZONE_BOUNDARY_ERROR = 440, NVME_SC_ZONE_FULL = 441, NVME_SC_ZONE_READ_ONLY = 442, NVME_SC_ZONE_OFFLINE = 443, NVME_SC_ZONE_INVALID_WRITE = 444, NVME_SC_ZONE_TOO_MANY_ACTIVE = 445, NVME_SC_ZONE_TOO_MANY_OPEN = 446, NVME_SC_ZONE_INVALID_TRANSITION = 447, NVME_SC_WRITE_FAULT = 640, NVME_SC_READ_ERROR = 641, NVME_SC_GUARD_CHECK = 642, NVME_SC_APPTAG_CHECK = 643, NVME_SC_REFTAG_CHECK = 644, NVME_SC_COMPARE_FAILED = 645, NVME_SC_ACCESS_DENIED = 646, NVME_SC_UNWRITTEN_BLOCK = 647, NVME_SC_INTERNAL_PATH_ERROR = 768, NVME_SC_ANA_PERSISTENT_LOSS = 769, NVME_SC_ANA_INACCESSIBLE = 770, NVME_SC_ANA_TRANSITION = 771, NVME_SC_CTRL_PATH_ERROR = 864, NVME_SC_HOST_PATH_ERROR = 880, NVME_SC_HOST_ABORTED_CMD = 881, NVME_SC_CRD = 6144, NVME_SC_MORE = 8192, NVME_SC_DNR = 16384, }; enum { NVME_REQ_CANCELLED = 1, NVME_REQ_USERCMD = 2, NVME_MPATH_IO_STATS = 4, }; enum nvme_ana_state { NVME_ANA_OPTIMIZED = 1, NVME_ANA_NONOPTIMIZED = 2, NVME_ANA_INACCESSIBLE = 3, NVME_ANA_PERSISTENT_LOSS = 4, NVME_ANA_CHANGE = 15, }; enum { NVME_CMD_FUSE_FIRST = 1, NVME_CMD_FUSE_SECOND = 2, NVME_CMD_SGL_METABUF = 64, NVME_CMD_SGL_METASEG = 128, NVME_CMD_SGL_ALL = 192, }; enum nvme_ctrl_flags { NVME_CTRL_FAILFAST_EXPIRED = 0, NVME_CTRL_ADMIN_Q_STOPPED = 1, NVME_CTRL_STARTED_ONCE = 2, NVME_CTRL_STOPPED = 3, NVME_CTRL_SKIP_ID_CNS_CS = 4, NVME_CTRL_DIRTY_CAPABILITY = 5, NVME_CTRL_FROZEN = 6, }; enum nvmf_capsule_command { nvme_fabrics_type_property_set = 0, nvme_fabrics_type_connect = 1, nvme_fabrics_type_property_get = 4, nvme_fabrics_type_auth_send = 5, nvme_fabrics_type_auth_receive = 6, }; enum nvme_zone_mgmt_action { NVME_ZONE_CLOSE = 1, NVME_ZONE_FINISH = 2, NVME_ZONE_OPEN = 3, NVME_ZONE_RESET = 4, NVME_ZONE_OFFLINE = 5, NVME_ZONE_SET_DESC_EXT = 16, }; enum nvme_opcode { nvme_cmd_flush = 0, nvme_cmd_write = 1, nvme_cmd_read = 2, nvme_cmd_write_uncor = 4, nvme_cmd_compare = 5, nvme_cmd_write_zeroes = 8, nvme_cmd_dsm = 9, nvme_cmd_verify = 12, nvme_cmd_resv_register = 13, nvme_cmd_resv_report = 14, nvme_cmd_resv_acquire = 17, nvme_cmd_resv_release = 21, nvme_cmd_zone_mgmt_send = 121, nvme_cmd_zone_mgmt_recv = 122, nvme_cmd_zone_append = 125, nvme_cmd_vendor_start = 128, }; enum { NVME_CMD_EFFECTS_CSUPP = 1, NVME_CMD_EFFECTS_LBCC = 2, NVME_CMD_EFFECTS_NCC = 4, NVME_CMD_EFFECTS_NIC = 8, NVME_CMD_EFFECTS_CCC = 16, NVME_CMD_EFFECTS_CSE_MASK = 458752, NVME_CMD_EFFECTS_UUID_SEL = 524288, NVME_CMD_EFFECTS_SCOPE_MASK = 4293918720, }; enum nvme_admin_opcode { nvme_admin_delete_sq = 0, nvme_admin_create_sq = 1, nvme_admin_get_log_page = 2, nvme_admin_delete_cq = 4, nvme_admin_create_cq = 5, nvme_admin_identify = 6, nvme_admin_abort_cmd = 8, nvme_admin_set_features = 9, nvme_admin_get_features = 10, nvme_admin_async_event = 12, nvme_admin_ns_mgmt = 13, nvme_admin_activate_fw = 16, nvme_admin_download_fw = 17, nvme_admin_dev_self_test = 20, nvme_admin_ns_attach = 21, nvme_admin_keep_alive = 24, nvme_admin_directive_send = 25, nvme_admin_directive_recv = 26, nvme_admin_virtual_mgmt = 28, nvme_admin_nvme_mi_send = 29, nvme_admin_nvme_mi_recv = 30, nvme_admin_dbbuf = 124, nvme_admin_format_nvm = 128, nvme_admin_security_send = 129, nvme_admin_security_recv = 130, nvme_admin_sanitize_nvm = 132, nvme_admin_get_lba_status = 134, nvme_admin_vendor_start = 192, }; enum { NVME_QUEUE_PHYS_CONTIG = 1, NVME_CQ_IRQ_ENABLED = 2, NVME_SQ_PRIO_URGENT = 0, NVME_SQ_PRIO_HIGH = 2, NVME_SQ_PRIO_MEDIUM = 4, NVME_SQ_PRIO_LOW = 6, NVME_FEAT_ARBITRATION = 1, NVME_FEAT_POWER_MGMT = 2, NVME_FEAT_LBA_RANGE = 3, NVME_FEAT_TEMP_THRESH = 4, NVME_FEAT_ERR_RECOVERY = 5, NVME_FEAT_VOLATILE_WC = 6, NVME_FEAT_NUM_QUEUES = 7, NVME_FEAT_IRQ_COALESCE = 8, NVME_FEAT_IRQ_CONFIG = 9, NVME_FEAT_WRITE_ATOMIC = 10, NVME_FEAT_ASYNC_EVENT = 11, NVME_FEAT_AUTO_PST = 12, NVME_FEAT_HOST_MEM_BUF = 13, NVME_FEAT_TIMESTAMP = 14, NVME_FEAT_KATO = 15, NVME_FEAT_HCTM = 16, NVME_FEAT_NOPSC = 17, NVME_FEAT_RRL = 18, NVME_FEAT_PLM_CONFIG = 19, NVME_FEAT_PLM_WINDOW = 20, NVME_FEAT_HOST_BEHAVIOR = 22, NVME_FEAT_SANITIZE = 23, NVME_FEAT_SW_PROGRESS = 128, NVME_FEAT_HOST_ID = 129, NVME_FEAT_RESV_MASK = 130, NVME_FEAT_RESV_PERSIST = 131, NVME_FEAT_WRITE_PROTECT = 132, NVME_FEAT_VENDOR_START = 192, NVME_FEAT_VENDOR_END = 255, NVME_LOG_ERROR = 1, NVME_LOG_SMART = 2, NVME_LOG_FW_SLOT = 3, NVME_LOG_CHANGED_NS = 4, NVME_LOG_CMD_EFFECTS = 5, NVME_LOG_DEVICE_SELF_TEST = 6, NVME_LOG_TELEMETRY_HOST = 7, NVME_LOG_TELEMETRY_CTRL = 8, NVME_LOG_ENDURANCE_GROUP = 9, NVME_LOG_ANA = 12, NVME_LOG_DISC = 112, NVME_LOG_RESERVATION = 128, NVME_FWACT_REPL = 0, NVME_FWACT_REPL_ACTV = 8, NVME_FWACT_ACTV = 16, }; enum nvme_quirks { NVME_QUIRK_STRIPE_SIZE = 1, NVME_QUIRK_IDENTIFY_CNS = 2, NVME_QUIRK_DEALLOCATE_ZEROES = 4, NVME_QUIRK_DELAY_BEFORE_CHK_RDY = 8, NVME_QUIRK_NO_APST = 16, NVME_QUIRK_NO_DEEPEST_PS = 32, NVME_QUIRK_MEDIUM_PRIO_SQ = 128, NVME_QUIRK_IGNORE_DEV_SUBNQN = 256, NVME_QUIRK_DISABLE_WRITE_ZEROES = 512, NVME_QUIRK_SIMPLE_SUSPEND = 1024, NVME_QUIRK_SINGLE_VECTOR = 2048, NVME_QUIRK_128_BYTES_SQES = 4096, NVME_QUIRK_SHARED_TAGS = 8192, NVME_QUIRK_NO_TEMP_THRESH_CHANGE = 16384, NVME_QUIRK_NO_NS_DESC_LIST = 32768, NVME_QUIRK_DMA_ADDRESS_BITS_48 = 65536, NVME_QUIRK_SKIP_CID_GEN = 131072, NVME_QUIRK_BOGUS_NID = 262144, NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = 524288, NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = 1048576, }; enum { NVME_CAP_CSS_NVM = 1, NVME_CAP_CSS_CSI = 64, }; enum { NVME_CAP_CRMS_CRWMS = 576460752303423488ULL, NVME_CAP_CRMS_CRIMS = 1152921504606846976ULL, }; enum { NVME_AER_ERROR = 0, NVME_AER_SMART = 1, NVME_AER_NOTICE = 2, NVME_AER_CSS = 6, NVME_AER_VS = 7, }; enum { NVME_AER_ERROR_PERSIST_INT_ERR = 3, }; enum nvmf_fabrics_opcode { nvme_fabrics_command = 127, }; enum nvme_ns_features { NVME_NS_EXT_LBAS = 1, NVME_NS_METADATA_SUPPORTED = 2, NVME_NS_DEAC = 3, }; enum { NVME_RW_LR = 32768, NVME_RW_FUA = 16384, NVME_RW_APPEND_PIREMAP = 512, NVME_RW_DSM_FREQ_UNSPEC = 0, NVME_RW_DSM_FREQ_TYPICAL = 1, NVME_RW_DSM_FREQ_RARE = 2, NVME_RW_DSM_FREQ_READS = 3, NVME_RW_DSM_FREQ_WRITES = 4, NVME_RW_DSM_FREQ_RW = 5, NVME_RW_DSM_FREQ_ONCE = 6, NVME_RW_DSM_FREQ_PREFETCH = 7, NVME_RW_DSM_FREQ_TEMP = 8, NVME_RW_DSM_LATENCY_NONE = 0, NVME_RW_DSM_LATENCY_IDLE = 16, NVME_RW_DSM_LATENCY_NORM = 32, NVME_RW_DSM_LATENCY_LOW = 48, NVME_RW_DSM_SEQ_REQ = 64, NVME_RW_DSM_COMPRESSED = 128, NVME_RW_PRINFO_PRCHK_REF = 1024, NVME_RW_PRINFO_PRCHK_APP = 2048, NVME_RW_PRINFO_PRCHK_GUARD = 4096, NVME_RW_PRINFO_PRACT = 8192, NVME_RW_DTYPE_STREAMS = 16, NVME_WZ_DEAC = 512, }; enum { NVME_NS_FEAT_THIN = 1, NVME_NS_FEAT_ATOMICS = 2, NVME_NS_FEAT_IO_OPT = 16, NVME_NS_ATTR_RO = 1, NVME_NS_FLBAS_LBA_MASK = 15, NVME_NS_FLBAS_LBA_UMASK = 96, NVME_NS_FLBAS_LBA_SHIFT = 1, NVME_NS_FLBAS_META_EXT = 16, NVME_NS_NMIC_SHARED = 1, NVME_LBAF_RP_BEST = 0, NVME_LBAF_RP_BETTER = 1, NVME_LBAF_RP_GOOD = 2, NVME_LBAF_RP_DEGRADED = 3, NVME_NS_DPC_PI_LAST = 16, NVME_NS_DPC_PI_FIRST = 8, NVME_NS_DPC_PI_TYPE3 = 4, NVME_NS_DPC_PI_TYPE2 = 2, NVME_NS_DPC_PI_TYPE1 = 1, NVME_NS_DPS_PI_FIRST = 8, NVME_NS_DPS_PI_MASK = 7, NVME_NS_DPS_PI_TYPE1 = 1, NVME_NS_DPS_PI_TYPE2 = 2, NVME_NS_DPS_PI_TYPE3 = 3, }; enum { NVME_NVM_NS_16B_GUARD = 0, NVME_NVM_NS_32B_GUARD = 1, NVME_NVM_NS_64B_GUARD = 2, }; enum { NVME_DSMGMT_IDR = 1, NVME_DSMGMT_IDW = 2, NVME_DSMGMT_AD = 4, }; enum { NVME_ID_CNS_NS = 0, NVME_ID_CNS_CTRL = 1, NVME_ID_CNS_NS_ACTIVE_LIST = 2, NVME_ID_CNS_NS_DESC_LIST = 3, NVME_ID_CNS_CS_NS = 5, NVME_ID_CNS_CS_CTRL = 6, NVME_ID_CNS_NS_CS_INDEP = 8, NVME_ID_CNS_NS_PRESENT_LIST = 16, NVME_ID_CNS_NS_PRESENT = 17, NVME_ID_CNS_CTRL_NS_LIST = 18, NVME_ID_CNS_CTRL_LIST = 19, NVME_ID_CNS_SCNDRY_CTRL_LIST = 21, NVME_ID_CNS_NS_GRANULARITY = 22, NVME_ID_CNS_UUID_LIST = 23, }; enum { NVME_CTRL_CMIC_MULTI_PORT = 1, NVME_CTRL_CMIC_MULTI_CTRL = 2, NVME_CTRL_CMIC_ANA = 8, NVME_CTRL_ONCS_COMPARE = 1, NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 2, NVME_CTRL_ONCS_DSM = 4, NVME_CTRL_ONCS_WRITE_ZEROES = 8, NVME_CTRL_ONCS_RESERVATIONS = 32, NVME_CTRL_ONCS_TIMESTAMP = 64, NVME_CTRL_VWC_PRESENT = 1, NVME_CTRL_OACS_SEC_SUPP = 1, NVME_CTRL_OACS_NS_MNGT_SUPP = 8, NVME_CTRL_OACS_DIRECTIVES = 32, NVME_CTRL_OACS_DBBUF_SUPP = 256, NVME_CTRL_LPA_CMD_EFFECTS_LOG = 2, NVME_CTRL_CTRATT_128_ID = 1, NVME_CTRL_CTRATT_NON_OP_PSP = 2, NVME_CTRL_CTRATT_NVM_SETS = 4, NVME_CTRL_CTRATT_READ_RECV_LVLS = 8, NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 16, NVME_CTRL_CTRATT_PREDICTABLE_LAT = 32, NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 128, NVME_CTRL_CTRATT_UUID_LIST = 512, }; enum { NVME_CSI_NVM = 0, NVME_CSI_ZNS = 2, }; enum { NVME_PS_FLAGS_MAX_POWER_SCALE = 1, NVME_PS_FLAGS_NON_OP_STATE = 2, }; enum { NVME_ENABLE_ACRE = 1, NVME_ENABLE_LBAFEE = 1, }; enum nvme_ctrl_attr { NVME_CTRL_ATTR_HID_128_BIT = 1, NVME_CTRL_ATTR_TBKAS = 64, NVME_CTRL_ATTR_ELBAS = 32768, }; enum { NVME_AER_NOTICE_NS_CHANGED = 0, NVME_AER_NOTICE_FW_ACT_STARTING = 1, NVME_AER_NOTICE_ANA = 3, NVME_AER_NOTICE_DISC_CHANGED = 240, }; enum { NVME_AEN_CFG_NS_ATTR = 256, NVME_AEN_CFG_FW_ACT = 512, NVME_AEN_CFG_ANA_CHANGE = 2048, NVME_AEN_CFG_DISC_CHANGE = -2147483648, }; enum { NVME_NIDT_EUI64 = 1, NVME_NIDT_NGUID = 2, NVME_NIDT_UUID = 3, NVME_NIDT_CSI = 4, }; enum { NVME_NSTAT_NRDY = 1, }; enum { NVME_ID_NS_NVM_STS_MASK = 127, NVME_ID_NS_NVM_GUARD_SHIFT = 7, NVME_ID_NS_NVM_GUARD_MASK = 3, }; struct nvme_ns_head; struct nvme_ns { struct list_head list; struct nvme_ctrl *ctrl; struct request_queue *queue; struct gendisk *disk; enum nvme_ana_state ana_state; u32 ana_grpid; struct list_head siblings; struct kref kref; struct nvme_ns_head *head; int lba_shift; u16 ms; u16 pi_size; u16 sgs; u32 sws; u8 pi_type; u8 guard_type; u64 zsze; unsigned long features; unsigned long flags; struct cdev cdev; struct device cdev_device; struct nvme_fault_inject fault_inject; }; struct nvme_ns_ids { u8 eui64[8]; u8 nguid[16]; uuid_t uuid; u8 csi; }; struct nvme_ns_head { struct list_head list; struct srcu_struct srcu; struct nvme_subsystem *subsys; unsigned int ns_id; struct nvme_ns_ids ids; struct list_head entry; struct kref ref; bool shared; int instance; struct nvme_effects_log *effects; struct cdev cdev; struct device cdev_device; struct gendisk *disk; struct bio_list requeue_list; spinlock_t requeue_lock; struct work_struct requeue_work; struct mutex lock; unsigned long flags; struct nvme_ns __attribute__((btf_type_tag("rcu"))) *current_path[0]; }; struct trace_event_raw_nvme_setup_cmd { struct trace_entry ent; char disk[32]; int ctrl_id; int qid; u8 opcode; u8 flags; u8 fctype; u16 cid; u32 nsid; bool metadata; u8 cdw10[24]; char __data[0]; }; struct trace_event_raw_nvme_complete_rq { struct trace_entry ent; char disk[32]; int ctrl_id; int qid; int cid; u64 result; u8 retries; u8 flags; u16 status; char __data[0]; }; struct trace_event_raw_nvme_async_event { struct trace_entry ent; int ctrl_id; u32 result; char __data[0]; }; struct trace_event_raw_nvme_sq { struct trace_entry ent; int ctrl_id; char disk[32]; int qid; u16 sq_head; u16 sq_tail; char __data[0]; }; union nvme_result { __le16 u16; __le32 u32; __le64 u64; }; struct nvme_id_ctrl { __le16 vid; __le16 ssvid; char sn[20]; char mn[40]; char fr[8]; __u8 rab; __u8 ieee[3]; __u8 cmic; __u8 mdts; __le16 cntlid; __le32 ver; __le32 rtd3r; __le32 rtd3e; __le32 oaes; __le32 ctratt; __u8 rsvd100[11]; __u8 cntrltype; __u8 fguid[16]; __le16 crdt1; __le16 crdt2; __le16 crdt3; __u8 rsvd134[122]; __le16 oacs; __u8 acl; __u8 aerl; __u8 frmw; __u8 lpa; __u8 elpe; __u8 npss; __u8 avscc; __u8 apsta; __le16 wctemp; __le16 cctemp; __le16 mtfa; __le32 hmpre; __le32 hmmin; __u8 tnvmcap[16]; __u8 unvmcap[16]; __le32 rpmbs; __le16 edstt; __u8 dsto; __u8 fwug; __le16 kas; __le16 hctma; __le16 mntmt; __le16 mxtmt; __le32 sanicap; __le32 hmminds; __le16 hmmaxd; __u8 rsvd338[4]; __u8 anatt; __u8 anacap; __le32 anagrpmax; __le32 nanagrpid; __u8 rsvd352[160]; __u8 sqes; __u8 cqes; __le16 maxcmd; __le32 nn; __le16 oncs; __le16 fuses; __u8 fna; __u8 vwc; __le16 awun; __le16 awupf; __u8 nvscc; __u8 nwpc; __le16 acwu; __u8 rsvd534[2]; __le32 sgls; __le32 mnan; __u8 rsvd544[224]; char subnqn[256]; __u8 rsvd1024[768]; __le32 ioccsz; __le32 iorcsz; __le16 icdoff; __u8 ctrattr; __u8 msdbd; __u8 rsvd1804[2]; __u8 dctype; __u8 rsvd1807[241]; struct nvme_id_power_state psd[32]; __u8 vs[1024]; }; struct nvme_feat_host_behavior { __u8 acre; __u8 etdas; __u8 lbafee; __u8 resv1[509]; }; struct nvme_id_ctrl_nvm { __u8 vsl; __u8 wzsl; __u8 wusl; __u8 dmrl; __le32 dmrsl; __le64 dmsl; __u8 rsvd16[4080]; }; struct nvme_fw_slot_info_log { __u8 afi; __u8 rsvd1[7]; __le64 frs[7]; __u8 rsvd64[448]; }; struct nvme_ns_info { struct nvme_ns_ids ids; u32 nsid; __le32 anagrpid; bool is_shared; bool is_readonly; bool is_ready; bool is_removed; }; struct nvme_ns_id_desc { __u8 nidt; __u8 nidl; __le16 reserved; }; struct nvme_lbaf { __le16 ms; __u8 ds; __u8 rp; }; struct nvme_id_ns { __le64 nsze; __le64 ncap; __le64 nuse; __u8 nsfeat; __u8 nlbaf; __u8 flbas; __u8 mc; __u8 dpc; __u8 dps; __u8 nmic; __u8 rescap; __u8 fpi; __u8 dlfeat; __le16 nawun; __le16 nawupf; __le16 nacwu; __le16 nabsn; __le16 nabo; __le16 nabspf; __le16 noiob; __u8 nvmcap[16]; __le16 npwg; __le16 npwa; __le16 npdg; __le16 npda; __le16 nows; __u8 rsvd74[18]; __le32 anagrpid; __u8 rsvd96[3]; __u8 nsattr; __le16 nvmsetid; __le16 endgid; __u8 nguid[16]; __u8 eui64[8]; struct nvme_lbaf lbaf[64]; __u8 vs[3712]; }; struct nvme_id_ns_cs_indep { __u8 nsfeat; __u8 nmic; __u8 rescap; __u8 fpi; __le32 anagrpid; __u8 nsattr; __u8 rsvd9; __le16 nvmsetid; __le16 endgid; __u8 nstat; __u8 rsvd15[4081]; }; struct nvme_id_ns_nvm { __le64 lbstm; __u8 pic; __u8 rsvd9[3]; __le32 elbaf[64]; __u8 rsvd268[3828]; }; struct trace_event_data_offsets_nvme_setup_cmd {}; struct trace_event_data_offsets_nvme_complete_rq {}; struct trace_event_data_offsets_nvme_async_event {}; struct trace_event_data_offsets_nvme_sq {}; struct nvme_request { struct nvme_command *cmd; union nvme_result result; u8 genctr; u8 retries; u8 flags; u16 status; unsigned long start_time; struct nvme_ctrl *ctrl; }; struct nvme_dsm_range { __le32 cattr; __le32 nlb; __le64 slba; }; struct nvme_feat_auto_pst { __le64 entries[32]; }; enum { NVME_IOCTL_VEC = 1, NVME_IOCTL_PARTITION = 2, }; struct nvme_uring_cmd_pdu { union { struct bio *bio; struct request *req; }; u32 meta_len; u32 nvme_status; union { struct { void *meta; void __attribute__((btf_type_tag("user"))) *meta_buffer; }; u64 result; } u; }; struct nvme_user_io { __u8 opcode; __u8 flags; __u16 control; __u16 nblocks; __u16 rsvd; __u64 metadata; __u64 addr; __u64 slba; __u32 dsmgmt; __u32 reftag; __u16 apptag; __u16 appmask; }; struct nvme_uring_cmd { __u8 opcode; __u8 flags; __u16 rsvd1; __u32 nsid; __u32 cdw2; __u32 cdw3; __u64 metadata; __u64 addr; __u32 metadata_len; __u32 data_len; __u32 cdw10; __u32 cdw11; __u32 cdw12; __u32 cdw13; __u32 cdw14; __u32 cdw15; __u32 timeout_ms; __u32 rsvd2; }; struct nvme_uring_data { __u64 metadata; __u64 addr; __u32 data_len; __u32 metadata_len; __u32 timeout_ms; }; struct nvme_passthru_cmd { __u8 opcode; __u8 flags; __u16 rsvd1; __u32 nsid; __u32 cdw2; __u32 cdw3; __u64 metadata; __u64 addr; __u32 metadata_len; __u32 data_len; __u32 cdw10; __u32 cdw11; __u32 cdw12; __u32 cdw13; __u32 cdw14; __u32 cdw15; __u32 timeout_ms; __u32 result; }; struct nvme_passthru_cmd64 { __u8 opcode; __u8 flags; __u16 rsvd1; __u32 nsid; __u32 cdw2; __u32 cdw3; __u64 metadata; __u64 addr; __u32 metadata_len; union { __u32 data_len; __u32 vec_cnt; }; __u32 cdw10; __u32 cdw11; __u32 cdw12; __u32 cdw13; __u32 cdw14; __u32 cdw15; __u32 timeout_ms; __u32 rsvd2; __u64 result; }; enum nvme_pr_type { NVME_PR_WRITE_EXCLUSIVE = 1, NVME_PR_EXCLUSIVE_ACCESS = 2, NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3, NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5, NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, }; enum nvme_eds { NVME_EXTENDED_DATA_STRUCT = 1, }; struct nvme_registered_ctrl { __le16 cntlid; __u8 rcsts; __u8 rsvd3[5]; __le64 hostid; __le64 rkey; }; struct nvme_reservation_status { __le32 gen; __u8 rtype; __u8 regctl[2]; __u8 resv5[2]; __u8 ptpls; __u8 resv10[14]; struct nvme_registered_ctrl regctl_ds[0]; }; struct nvme_registered_ctrl_ext { __le16 cntlid; __u8 rcsts; __u8 rsvd3[5]; __le64 rkey; __u8 hostid[16]; __u8 rsvd32[32]; }; struct nvme_reservation_status_ext { __le32 gen; __u8 rtype; __u8 regctl[2]; __u8 resv5[2]; __u8 ptpls; __u8 resv10[14]; __u8 rsvd24[40]; struct nvme_registered_ctrl_ext regctl_eds[0]; }; struct nvme_ana_group_desc { __le32 grpid; __le32 nnsids; __le64 chgcnt; __u8 state; __u8 rsvd17[15]; __le32 nsids[0]; }; enum { NVME_ZRA_ZONE_REPORT = 0, NVME_ZRASF_ZONE_REPORT_ALL = 0, NVME_ZRASF_ZONE_STATE_EMPTY = 1, NVME_ZRASF_ZONE_STATE_IMP_OPEN = 2, NVME_ZRASF_ZONE_STATE_EXP_OPEN = 3, NVME_ZRASF_ZONE_STATE_CLOSED = 4, NVME_ZRASF_ZONE_STATE_READONLY = 5, NVME_ZRASF_ZONE_STATE_FULL = 6, NVME_ZRASF_ZONE_STATE_OFFLINE = 7, NVME_REPORT_ZONE_PARTIAL = 1, }; enum { NVME_ZONE_TYPE_SEQWRITE_REQ = 2, }; struct nvme_id_ctrl_zns { __u8 zasl; __u8 rsvd1[4095]; }; struct nvme_zone_descriptor { __u8 zt; __u8 zs; __u8 za; __u8 rsvd3[5]; __le64 zcap; __le64 zslba; __le64 wp; __u8 rsvd32[32]; }; struct nvme_zns_lbafe { __le64 zsze; __u8 zdes; __u8 rsvd9[7]; }; struct nvme_id_ns_zns { __le16 zoc; __le16 ozcs; __le32 mar; __le32 mor; __le32 rrl; __le32 frl; __u8 rsvd20[2796]; struct nvme_zns_lbafe lbafe[64]; __u8 vs[256]; }; struct nvme_zone_report { __le64 nr_zones; __u8 resv8[56]; struct nvme_zone_descriptor entries[0]; }; enum { NVME_HOST_MEM_ENABLE = 1, NVME_HOST_MEM_RETURN = 2, }; enum { NVME_CMBMSC_CRE = 1, NVME_CMBMSC_CMSE = 2, }; enum { NVME_CMBSZ_SQS = 1, NVME_CMBSZ_CQS = 2, NVME_CMBSZ_LISTS = 4, NVME_CMBSZ_RDS = 8, NVME_CMBSZ_WDS = 16, NVME_CMBSZ_SZ_SHIFT = 12, NVME_CMBSZ_SZ_MASK = 1048575, NVME_CMBSZ_SZU_SHIFT = 8, NVME_CMBSZ_SZU_MASK = 15, }; enum { NVME_SGL_FMT_DATA_DESC = 0, NVME_SGL_FMT_SEG_DESC = 2, NVME_SGL_FMT_LAST_SEG_DESC = 3, NVME_KEY_SGL_FMT_DATA_DESC = 4, NVME_TRANSPORT_SGL_DATA_DESC = 5, }; struct nvme_queue; struct nvme_host_mem_buf_desc; struct nvme_dev { struct nvme_queue *queues; struct blk_mq_tag_set tagset; struct blk_mq_tag_set admin_tagset; u32 *dbs; struct device *dev; struct dma_pool *prp_page_pool; struct dma_pool *prp_small_pool; unsigned int online_queues; unsigned int max_qid; unsigned int io_queues[3]; unsigned int num_vecs; u32 q_depth; int io_sqes; u32 db_stride; void *bar; unsigned long bar_mapped_size; struct mutex shutdown_lock; bool subsystem; u64 cmb_size; bool cmb_use_sqes; u32 cmbsz; u32 cmbloc; struct nvme_ctrl ctrl; u32 last_ps; bool hmb; mempool_t *iod_mempool; __le32 *dbbuf_dbs; dma_addr_t dbbuf_dbs_dma_addr; __le32 *dbbuf_eis; dma_addr_t dbbuf_eis_dma_addr; u64 host_mem_size; u32 nr_host_mem_descs; dma_addr_t host_mem_descs_dma; struct nvme_host_mem_buf_desc *host_mem_descs; void **host_mem_desc_bufs; unsigned int nr_allocated_queues; unsigned int nr_write_queues; unsigned int nr_poll_queues; }; struct nvme_completion; struct nvme_queue { struct nvme_dev *dev; spinlock_t sq_lock; void *sq_cmds; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t cq_poll_lock; struct nvme_completion *cqes; dma_addr_t sq_dma_addr; dma_addr_t cq_dma_addr; u32 *q_db; u32 q_depth; u16 cq_vector; u16 sq_tail; u16 last_sq_tail; u16 cq_head; u16 qid; u8 cq_phase; u8 sqes; unsigned long flags; __le32 *dbbuf_sq_db; __le32 *dbbuf_cq_db; __le32 *dbbuf_sq_ei; __le32 *dbbuf_cq_ei; struct completion delete_done; }; struct nvme_completion { union nvme_result result; __le16 sq_head; __le16 sq_id; __u16 command_id; __le16 status; }; struct nvme_host_mem_buf_desc { __le64 addr; __le32 size; __u32 rsvd; }; union nvme_descriptor { struct nvme_sgl_desc *sg_list; __le64 *prp_list; }; struct nvme_iod { struct nvme_request req; struct nvme_command cmd; bool aborted; s8 nr_allocations; unsigned int dma_len; dma_addr_t first_dma; dma_addr_t meta_dma; struct sg_table sgt; union nvme_descriptor list[5]; }; typedef void (*btf_trace_spi_controller_idle)(void *, struct spi_controller *); struct ptp_system_timestamp { struct timespec64 pre_ts; struct timespec64 post_ts; }; struct spi_mem { struct spi_device *spi; void *drvpriv; const char *name; }; enum spi_mem_data_dir { SPI_MEM_NO_DATA = 0, SPI_MEM_DATA_IN = 1, SPI_MEM_DATA_OUT = 2, }; struct spi_mem_op { struct { u8 nbytes; u8 buswidth; u8 dtr: 1; u8 __pad: 7; u16 opcode; } cmd; struct { u8 nbytes; u8 buswidth; u8 dtr: 1; u8 __pad: 7; u64 val; } addr; struct { u8 nbytes; u8 buswidth; u8 dtr: 1; u8 __pad: 7; } dummy; struct { u8 buswidth; u8 dtr: 1; u8 ecc: 1; u8 __pad: 6; enum spi_mem_data_dir dir; unsigned int nbytes; union { void *in; const void *out; } buf; } data; }; struct spi_mem_dirmap_info { struct spi_mem_op op_tmpl; u64 offset; u64 length; }; struct spi_mem_dirmap_desc { struct spi_mem *mem; struct spi_mem_dirmap_info info; unsigned int nodirmap; void *priv; }; typedef void (*btf_trace_spi_controller_busy)(void *, struct spi_controller *); typedef void (*btf_trace_spi_setup)(void *, struct spi_device *, int); typedef void (*btf_trace_spi_set_cs)(void *, struct spi_device *, bool); typedef void (*btf_trace_spi_message_submit)(void *, struct spi_message *); typedef void (*btf_trace_spi_message_start)(void *, struct spi_message *); typedef void (*btf_trace_spi_message_done)(void *, struct spi_message *); typedef void (*btf_trace_spi_transfer_start)(void *, struct spi_message *, struct spi_transfer *); typedef void (*btf_trace_spi_transfer_stop)(void *, struct spi_message *, struct spi_transfer *); struct spi_board_info { char modalias[32]; const void *platform_data; const struct software_node *swnode; void *controller_data; int irq; u32 max_speed_hz; u16 bus_num; u16 chip_select; u32 mode; u64 android_kabi_reserved1; }; struct boardinfo { struct list_head list; struct spi_board_info board_info; }; struct trace_event_raw_spi_controller { struct trace_entry ent; int bus_num; char __data[0]; }; struct trace_event_raw_spi_setup { struct trace_entry ent; int bus_num; int chip_select; unsigned long mode; unsigned int bits_per_word; unsigned int max_speed_hz; int status; char __data[0]; }; struct trace_event_raw_spi_set_cs { struct trace_entry ent; int bus_num; int chip_select; unsigned long mode; bool enable; char __data[0]; }; struct trace_event_raw_spi_message { struct trace_entry ent; int bus_num; int chip_select; struct spi_message *msg; char __data[0]; }; struct trace_event_raw_spi_message_done { struct trace_entry ent; int bus_num; int chip_select; struct spi_message *msg; unsigned int frame; unsigned int actual; char __data[0]; }; struct trace_event_raw_spi_transfer { struct trace_entry ent; int bus_num; int chip_select; struct spi_transfer *xfer; int len; u32 __data_loc_rx_buf; u32 __data_loc_tx_buf; char __data[0]; }; struct spi_device_id; struct spi_driver { const struct spi_device_id *id_table; int (*probe)(struct spi_device *); void (*remove)(struct spi_device *); void (*shutdown)(struct spi_device *); struct device_driver driver; u64 android_kabi_reserved1; }; struct spi_device_id { char name[32]; kernel_ulong_t driver_data; }; typedef void (*spi_res_release_t)(struct spi_controller *, struct spi_message *, void *); struct spi_res { struct list_head entry; spi_res_release_t release; unsigned long long data[0]; }; struct trace_event_data_offsets_spi_transfer { u32 rx_buf; u32 tx_buf; }; struct acpi_device; struct spi_replaced_transfers; typedef void (*spi_replaced_release_t)(struct spi_controller *, struct spi_message *, struct spi_replaced_transfers *); struct spi_replaced_transfers { spi_replaced_release_t release; void *extradata; struct list_head replaced_transfers; struct list_head *replaced_after; size_t inserted; struct spi_transfer inserted_transfers[0]; }; struct trace_event_data_offsets_spi_controller {}; struct trace_event_data_offsets_spi_setup {}; struct trace_event_data_offsets_spi_set_cs {}; struct trace_event_data_offsets_spi_message {}; struct trace_event_data_offsets_spi_message_done {}; struct spi_mem_driver { struct spi_driver spidrv; int (*probe)(struct spi_mem *); int (*remove)(struct spi_mem *); void (*shutdown)(struct spi_mem *); }; typedef void (*btf_trace_spmi_write_begin)(void *, u8, u8, u16, u8, const u8 *); typedef void (*btf_trace_spmi_write_end)(void *, u8, u8, u16, int); typedef void (*btf_trace_spmi_read_begin)(void *, u8, u8, u16); typedef void (*btf_trace_spmi_read_end)(void *, u8, u8, u16, int, u8, const u8 *); typedef void (*btf_trace_spmi_cmd)(void *, u8, u8, int); struct trace_event_raw_spmi_write_begin { struct trace_entry ent; u8 opcode; u8 sid; u16 addr; u8 len; u32 __data_loc_buf; char __data[0]; }; struct trace_event_raw_spmi_write_end { struct trace_entry ent; u8 opcode; u8 sid; u16 addr; int ret; char __data[0]; }; struct trace_event_raw_spmi_read_begin { struct trace_entry ent; u8 opcode; u8 sid; u16 addr; char __data[0]; }; struct trace_event_raw_spmi_read_end { struct trace_entry ent; u8 opcode; u8 sid; u16 addr; int ret; u8 len; u32 __data_loc_buf; char __data[0]; }; struct trace_event_raw_spmi_cmd { struct trace_entry ent; u8 opcode; u8 sid; int ret; char __data[0]; }; struct spmi_driver { struct device_driver driver; int (*probe)(struct spmi_device *); void (*remove)(struct spmi_device *); void (*shutdown)(struct spmi_device *); u64 android_kabi_reserved1; }; struct trace_event_data_offsets_spmi_write_begin { u32 buf; }; struct trace_event_data_offsets_spmi_read_end { u32 buf; }; struct trace_event_data_offsets_spmi_write_end {}; struct trace_event_data_offsets_spmi_read_begin {}; struct trace_event_data_offsets_spmi_cmd {}; enum { NETIF_F_SG_BIT = 0, NETIF_F_IP_CSUM_BIT = 1, __UNUSED_NETIF_F_1 = 2, NETIF_F_HW_CSUM_BIT = 3, NETIF_F_IPV6_CSUM_BIT = 4, NETIF_F_HIGHDMA_BIT = 5, NETIF_F_FRAGLIST_BIT = 6, NETIF_F_HW_VLAN_CTAG_TX_BIT = 7, NETIF_F_HW_VLAN_CTAG_RX_BIT = 8, NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9, NETIF_F_VLAN_CHALLENGED_BIT = 10, NETIF_F_GSO_BIT = 11, NETIF_F_LLTX_BIT = 12, NETIF_F_NETNS_LOCAL_BIT = 13, NETIF_F_GRO_BIT = 14, NETIF_F_LRO_BIT = 15, NETIF_F_GSO_SHIFT = 16, NETIF_F_TSO_BIT = 16, NETIF_F_GSO_ROBUST_BIT = 17, NETIF_F_TSO_ECN_BIT = 18, NETIF_F_TSO_MANGLEID_BIT = 19, NETIF_F_TSO6_BIT = 20, NETIF_F_FSO_BIT = 21, NETIF_F_GSO_GRE_BIT = 22, NETIF_F_GSO_GRE_CSUM_BIT = 23, NETIF_F_GSO_IPXIP4_BIT = 24, NETIF_F_GSO_IPXIP6_BIT = 25, NETIF_F_GSO_UDP_TUNNEL_BIT = 26, NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27, NETIF_F_GSO_PARTIAL_BIT = 28, NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29, NETIF_F_GSO_SCTP_BIT = 30, NETIF_F_GSO_ESP_BIT = 31, NETIF_F_GSO_UDP_BIT = 32, NETIF_F_GSO_UDP_L4_BIT = 33, NETIF_F_GSO_FRAGLIST_BIT = 34, NETIF_F_GSO_LAST = 34, NETIF_F_FCOE_CRC_BIT = 35, NETIF_F_SCTP_CRC_BIT = 36, NETIF_F_FCOE_MTU_BIT = 37, NETIF_F_NTUPLE_BIT = 38, NETIF_F_RXHASH_BIT = 39, NETIF_F_RXCSUM_BIT = 40, NETIF_F_NOCACHE_COPY_BIT = 41, NETIF_F_LOOPBACK_BIT = 42, NETIF_F_RXFCS_BIT = 43, NETIF_F_RXALL_BIT = 44, NETIF_F_HW_VLAN_STAG_TX_BIT = 45, NETIF_F_HW_VLAN_STAG_RX_BIT = 46, NETIF_F_HW_VLAN_STAG_FILTER_BIT = 47, NETIF_F_HW_L2FW_DOFFLOAD_BIT = 48, NETIF_F_HW_TC_BIT = 49, NETIF_F_HW_ESP_BIT = 50, NETIF_F_HW_ESP_TX_CSUM_BIT = 51, NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 52, NETIF_F_HW_TLS_TX_BIT = 53, NETIF_F_HW_TLS_RX_BIT = 54, NETIF_F_GRO_HW_BIT = 55, NETIF_F_HW_TLS_RECORD_BIT = 56, NETIF_F_GRO_FRAGLIST_BIT = 57, NETIF_F_HW_MACSEC_BIT = 58, NETIF_F_GRO_UDP_FWD_BIT = 59, NETIF_F_HW_HSR_TAG_INS_BIT = 60, NETIF_F_HW_HSR_TAG_RM_BIT = 61, NETIF_F_HW_HSR_FWD_BIT = 62, NETIF_F_HW_HSR_DUP_BIT = 63, NETDEV_FEATURE_COUNT = 64, }; enum { SKBTX_HW_TSTAMP = 1, SKBTX_SW_TSTAMP = 2, SKBTX_IN_PROGRESS = 4, SKBTX_HW_TSTAMP_USE_CYCLES = 8, SKBTX_WIFI_STATUS = 16, SKBTX_HW_TSTAMP_NETDEV = 32, SKBTX_SCHED_TSTAMP = 64, }; enum { IFLA_UNSPEC = 0, IFLA_ADDRESS = 1, IFLA_BROADCAST = 2, IFLA_IFNAME = 3, IFLA_MTU = 4, IFLA_LINK = 5, IFLA_QDISC = 6, IFLA_STATS = 7, IFLA_COST = 8, IFLA_PRIORITY = 9, IFLA_MASTER = 10, IFLA_WIRELESS = 11, IFLA_PROTINFO = 12, IFLA_TXQLEN = 13, IFLA_MAP = 14, IFLA_WEIGHT = 15, IFLA_OPERSTATE = 16, IFLA_LINKMODE = 17, IFLA_LINKINFO = 18, IFLA_NET_NS_PID = 19, IFLA_IFALIAS = 20, IFLA_NUM_VF = 21, IFLA_VFINFO_LIST = 22, IFLA_STATS64 = 23, IFLA_VF_PORTS = 24, IFLA_PORT_SELF = 25, IFLA_AF_SPEC = 26, IFLA_GROUP = 27, IFLA_NET_NS_FD = 28, IFLA_EXT_MASK = 29, IFLA_PROMISCUITY = 30, IFLA_NUM_TX_QUEUES = 31, IFLA_NUM_RX_QUEUES = 32, IFLA_CARRIER = 33, IFLA_PHYS_PORT_ID = 34, IFLA_CARRIER_CHANGES = 35, IFLA_PHYS_SWITCH_ID = 36, IFLA_LINK_NETNSID = 37, IFLA_PHYS_PORT_NAME = 38, IFLA_PROTO_DOWN = 39, IFLA_GSO_MAX_SEGS = 40, IFLA_GSO_MAX_SIZE = 41, IFLA_PAD = 42, IFLA_XDP = 43, IFLA_EVENT = 44, IFLA_NEW_NETNSID = 45, IFLA_IF_NETNSID = 46, IFLA_TARGET_NETNSID = 46, IFLA_CARRIER_UP_COUNT = 47, IFLA_CARRIER_DOWN_COUNT = 48, IFLA_NEW_IFINDEX = 49, IFLA_MIN_MTU = 50, IFLA_MAX_MTU = 51, IFLA_PROP_LIST = 52, IFLA_ALT_IFNAME = 53, IFLA_PERM_ADDRESS = 54, IFLA_PROTO_DOWN_REASON = 55, IFLA_PARENT_DEV_NAME = 56, IFLA_PARENT_DEV_BUS_NAME = 57, IFLA_GRO_MAX_SIZE = 58, IFLA_TSO_MAX_SIZE = 59, IFLA_TSO_MAX_SEGS = 60, IFLA_ALLMULTI = 61, IFLA_DEVLINK_PORT = 62, IFLA_GSO_IPV4_MAX_SIZE = 63, IFLA_GRO_IPV4_MAX_SIZE = 64, __IFLA_MAX = 65, }; struct ip_tunnel_parm { char name[16]; int link; __be16 i_flags; __be16 o_flags; __be32 i_key; __be32 o_key; struct iphdr iph; }; struct nh_info; struct nh_group; struct nexthop { struct rb_node rb_node; struct list_head fi_list; struct list_head f6i_list; struct list_head fdb_list; struct list_head grp_list; struct net *net; u32 id; u8 protocol; u8 nh_flags; bool is_group; refcount_t refcnt; struct callback_head rcu; union { struct nh_info __attribute__((btf_type_tag("rcu"))) *nh_info; struct nh_group __attribute__((btf_type_tag("rcu"))) *nh_grp; }; }; struct fib_info; struct fib_nh { struct fib_nh_common nh_common; struct hlist_node nh_hash; struct fib_info *nh_parent; __be32 nh_saddr; int nh_saddr_genid; }; struct nh_info { struct hlist_node dev_hash; struct nexthop *nh_parent; u8 family; bool reject_nh; bool fdb_nh; union { struct fib_nh_common fib_nhc; struct fib_nh fib_nh; struct fib6_nh fib6_nh; }; }; struct fib_info { struct hlist_node fib_hash; struct hlist_node fib_lhash; struct list_head nh_list; struct net *fib_net; refcount_t fib_treeref; refcount_t fib_clntref; unsigned int fib_flags; unsigned char fib_dead; unsigned char fib_protocol; unsigned char fib_scope; unsigned char fib_type; __be32 fib_prefsrc; u32 fib_tb_id; u32 fib_priority; struct dst_metrics *fib_metrics; int fib_nhs; bool fib_nh_is_v6; bool nh_updated; bool pfsrc_removed; struct nexthop *nh; struct callback_head rcu; struct fib_nh fib_nh[0]; }; struct nh_grp_entry { struct nexthop *nh; u8 weight; union { struct { atomic_t upper_bound; } hthr; struct { struct list_head uw_nh_entry; u16 count_buckets; u16 wants_buckets; } res; }; struct list_head nh_list; struct nexthop *nh_parent; }; struct nh_res_table; struct nh_group { struct nh_group *spare; u16 num_nh; bool is_multipath; bool hash_threshold; bool resilient; bool fdb_nh; bool has_v4; struct nh_res_table __attribute__((btf_type_tag("rcu"))) *res_table; struct nh_grp_entry nh_entries[0]; }; struct nh_res_bucket { struct nh_grp_entry __attribute__((btf_type_tag("rcu"))) *nh_entry; atomic_long_t used_time; unsigned long migrated_time; bool occupied; u8 nh_flags; }; struct nh_res_table { struct net *net; u32 nhg_id; struct delayed_work upkeep_dw; struct list_head uw_nh_entries; unsigned long unbalanced_since; u32 idle_timer; u32 unbalanced_timer; u16 num_nh_buckets; struct nh_res_bucket nh_buckets[0]; }; enum noise_lengths { NOISE_PUBLIC_KEY_LEN = 32, NOISE_SYMMETRIC_KEY_LEN = 32, NOISE_TIMESTAMP_LEN = 12, NOISE_AUTHTAG_LEN = 16, NOISE_HASH_LEN = 32, }; enum index_hashtable_type { INDEX_HASHTABLE_HANDSHAKE = 1, INDEX_HASHTABLE_KEYPAIR = 2, }; enum noise_handshake_state { HANDSHAKE_ZEROED = 0, HANDSHAKE_CREATED_INITIATION = 1, HANDSHAKE_CONSUMED_INITIATION = 2, HANDSHAKE_CREATED_RESPONSE = 3, HANDSHAKE_CONSUMED_RESPONSE = 4, }; enum message_type { MESSAGE_INVALID = 0, MESSAGE_HANDSHAKE_INITIATION = 1, MESSAGE_HANDSHAKE_RESPONSE = 2, MESSAGE_HANDSHAKE_COOKIE = 3, MESSAGE_DATA = 4, }; enum limits { REKEY_AFTER_MESSAGES = 1152921504606846976ULL, REJECT_AFTER_MESSAGES = 18446744073709543486ULL, REKEY_TIMEOUT = 5ULL, REKEY_TIMEOUT_JITTER_MAX_JIFFIES = 83ULL, REKEY_AFTER_TIME = 120ULL, REJECT_AFTER_TIME = 180ULL, INITIATIONS_PER_SECOND = 50ULL, MAX_PEERS_PER_DEVICE = 1048576ULL, KEEPALIVE_TIMEOUT = 10ULL, MAX_TIMER_HANDSHAKES = 18ULL, MAX_QUEUED_INCOMING_HANDSHAKES = 4096ULL, MAX_STAGED_PACKETS = 128ULL, MAX_QUEUED_PACKETS = 1024ULL, }; enum curve25519_lengths { CURVE25519_KEY_SIZE = 32, }; struct wg_peer; struct index_hashtable_entry { struct wg_peer *peer; struct hlist_node index_hash; enum index_hashtable_type type; __le32 index; }; struct noise_symmetric_key { u8 key[32]; u64 birthdate; bool is_valid; }; struct noise_replay_counter { u64 counter; spinlock_t lock; unsigned long backtrack[128]; }; struct noise_keypair { struct index_hashtable_entry entry; struct noise_symmetric_key sending; atomic64_t sending_counter; struct noise_symmetric_key receiving; struct noise_replay_counter receiving_counter; __le32 remote_index; bool i_am_the_initiator; struct kref refcount; struct callback_head rcu; u64 internal_id; }; struct prev_queue { struct sk_buff *head; struct sk_buff *tail; struct sk_buff *peeked; struct { struct sk_buff *next; struct sk_buff *prev; } empty; atomic_t count; }; struct noise_keypairs { struct noise_keypair __attribute__((btf_type_tag("rcu"))) *current_keypair; struct noise_keypair __attribute__((btf_type_tag("rcu"))) *previous_keypair; struct noise_keypair __attribute__((btf_type_tag("rcu"))) *next_keypair; spinlock_t keypair_update_lock; }; struct endpoint { union { struct sockaddr addr; struct sockaddr_in addr4; struct sockaddr_in6 addr6; }; union { struct { struct in_addr src4; int src_if4; }; struct in6_addr src6; }; }; struct dst_cache_pcpu; struct dst_cache { struct dst_cache_pcpu __attribute__((btf_type_tag("percpu"))) *cache; unsigned long reset_ts; }; struct noise_static_identity; struct noise_handshake { struct index_hashtable_entry entry; enum noise_handshake_state state; u64 last_initiation_consumption; struct noise_static_identity *static_identity; u8 ephemeral_private[32]; u8 remote_static[32]; u8 remote_ephemeral[32]; u8 precomputed_static_static[32]; u8 preshared_key[32]; u8 hash[32]; u8 chaining_key[32]; u8 latest_timestamp[12]; __le32 remote_index; struct rw_semaphore lock; }; struct cookie { u64 birthdate; bool is_valid; u8 cookie[16]; bool have_sent_mac1; u8 last_mac1_sent[16]; u8 cookie_decryption_key[32]; u8 message_mac1_key[32]; struct rw_semaphore lock; }; struct gro_list { struct list_head list; int count; }; struct napi_struct { struct list_head poll_list; unsigned long state; int weight; int defer_hard_irqs_count; unsigned long gro_bitmask; int (*poll)(struct napi_struct *, int); int list_owner; struct net_device *dev; struct gro_list gro_hash[8]; struct sk_buff *skb; struct list_head rx_list; int rx_count; unsigned int napi_id; struct hrtimer timer; struct task_struct *thread; struct list_head dev_list; struct hlist_node napi_hash_node; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct wg_device; struct wg_peer { struct wg_device *device; struct prev_queue tx_queue; struct prev_queue rx_queue; struct sk_buff_head staged_packet_queue; int serial_work_cpu; bool is_dead; struct noise_keypairs keypairs; struct endpoint endpoint; struct dst_cache endpoint_cache; rwlock_t endpoint_lock; struct noise_handshake handshake; atomic64_t last_sent_handshake; struct work_struct transmit_handshake_work; struct work_struct clear_peer_work; struct work_struct transmit_packet_work; struct cookie latest_cookie; struct hlist_node pubkey_hash; u64 rx_bytes; u64 tx_bytes; struct timer_list timer_retransmit_handshake; struct timer_list timer_send_keepalive; struct timer_list timer_new_handshake; struct timer_list timer_zero_key_material; struct timer_list timer_persistent_keepalive; unsigned int timer_handshake_attempts; u16 persistent_keepalive_interval; bool timer_need_another_keepalive; bool sent_lastminute_handshake; struct timespec64 walltime_last_handshake; struct kref refcount; struct callback_head rcu; struct list_head peer_list; struct list_head allowedips_list; struct napi_struct napi; u64 internal_id; }; struct multicore_worker; struct crypt_queue { struct ptr_ring ring; struct multicore_worker __attribute__((btf_type_tag("percpu"))) *worker; int last_cpu; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct noise_static_identity { u8 static_public[32]; u8 static_private[32]; struct rw_semaphore lock; bool has_identity; }; struct cookie_checker { u8 secret[32]; u8 cookie_encryption_key[32]; u8 message_mac1_key[32]; u64 secret_birthdate; struct rw_semaphore secret_lock; struct wg_device *device; }; struct allowedips_node; struct allowedips { struct allowedips_node __attribute__((btf_type_tag("rcu"))) *root4; struct allowedips_node __attribute__((btf_type_tag("rcu"))) *root6; u64 seq; }; struct pubkey_hashtable; struct index_hashtable; struct wg_device { struct net_device *dev; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct crypt_queue encrypt_queue; struct crypt_queue decrypt_queue; struct crypt_queue handshake_queue; struct sock __attribute__((btf_type_tag("rcu"))) *sock4; struct sock __attribute__((btf_type_tag("rcu"))) *sock6; struct net __attribute__((btf_type_tag("rcu"))) *creating_net; struct noise_static_identity static_identity; struct workqueue_struct *packet_crypt_wq; struct workqueue_struct *handshake_receive_wq; struct workqueue_struct *handshake_send_wq; struct cookie_checker cookie_checker; struct pubkey_hashtable *peer_hashtable; struct index_hashtable *index_hashtable; struct allowedips peer_allowedips; struct mutex device_update_lock; struct mutex socket_update_lock; struct list_head device_list; struct list_head peer_list; atomic_t handshake_queue_len; unsigned int num_peers; unsigned int device_update_gen; u32 fwmark; u16 incoming_port; long: 64; long: 64; long: 64; }; struct multicore_worker { void *ptr; struct work_struct work; }; struct pubkey_hashtable { struct hlist_head hashtable[2048]; siphash_key_t key; struct mutex lock; }; struct index_hashtable { struct hlist_head hashtable[8192]; spinlock_t lock; }; struct allowedips_node { struct wg_peer __attribute__((btf_type_tag("rcu"))) *peer; struct allowedips_node __attribute__((btf_type_tag("rcu"))) *bit[2]; u8 cidr; u8 bit_at_a; u8 bit_at_b; u8 bitlen; long: 0; u8 bits[16]; unsigned long parent_bit_packed; union { struct list_head peer_list; struct callback_head rcu; }; }; struct dst_cache_pcpu { unsigned long refresh_ts; struct dst_entry *dst; u32 cookie; union { struct in_addr in_saddr; struct in6_addr in6_saddr; }; }; struct message_header { __le32 type; }; struct message_macs { u8 mac1[16]; u8 mac2[16]; }; struct message_handshake_initiation { struct message_header header; __le32 sender_index; u8 unencrypted_ephemeral[32]; u8 encrypted_static[48]; u8 encrypted_timestamp[28]; struct message_macs macs; }; struct message_handshake_response { struct message_header header; __le32 sender_index; __le32 receiver_index; u8 unencrypted_ephemeral[32]; u8 encrypted_nothing[16]; struct message_macs macs; }; struct in_ifaddr { struct hlist_node hash; struct in_ifaddr __attribute__((btf_type_tag("rcu"))) *ifa_next; struct in_device *ifa_dev; struct callback_head callback_head; __be32 ifa_local; __be32 ifa_address; __be32 ifa_mask; __u32 ifa_rt_priority; __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; unsigned char ifa_proto; __u32 ifa_flags; char ifa_label[16]; __u32 ifa_valid_lft; __u32 ifa_preferred_lft; unsigned long ifa_cstamp; unsigned long ifa_tstamp; }; struct ip_sf_list; struct ip_mc_list { struct in_device *interface; __be32 multiaddr; unsigned int sfmode; struct ip_sf_list *sources; struct ip_sf_list *tomb; unsigned long sfcount[2]; union { struct ip_mc_list *next; struct ip_mc_list __attribute__((btf_type_tag("rcu"))) *next_rcu; }; struct ip_mc_list __attribute__((btf_type_tag("rcu"))) *next_hash; struct timer_list timer; int users; refcount_t refcnt; spinlock_t lock; char tm_running; char reporter; char unsolicit_count; char loaded; unsigned char gsquery; unsigned char crcount; struct callback_head rcu; }; enum macsec_offload { MACSEC_OFFLOAD_OFF = 0, MACSEC_OFFLOAD_PHY = 1, MACSEC_OFFLOAD_MAC = 2, __MACSEC_OFFLOAD_END = 3, MACSEC_OFFLOAD_MAX = 2, }; struct macsec_secy; struct macsec_rx_sc; struct macsec_rx_sa; struct macsec_tx_sa; struct macsec_tx_sc_stats; struct macsec_tx_sa_stats; struct macsec_rx_sc_stats; struct macsec_rx_sa_stats; struct macsec_dev_stats; struct macsec_context { union { struct net_device *netdev; struct phy_device *phydev; }; enum macsec_offload offload; struct macsec_secy *secy; struct macsec_rx_sc *rx_sc; struct { bool update_pn; unsigned char assoc_num; u8 key[128]; union { struct macsec_rx_sa *rx_sa; struct macsec_tx_sa *tx_sa; }; } sa; union { struct macsec_tx_sc_stats *tx_sc_stats; struct macsec_tx_sa_stats *tx_sa_stats; struct macsec_rx_sc_stats *rx_sc_stats; struct macsec_rx_sa_stats *rx_sa_stats; struct macsec_dev_stats *dev_stats; } stats; }; typedef u64 sci_t; enum macsec_validation_type { MACSEC_VALIDATE_DISABLED = 0, MACSEC_VALIDATE_CHECK = 1, MACSEC_VALIDATE_STRICT = 2, __MACSEC_VALIDATE_END = 3, MACSEC_VALIDATE_MAX = 2, }; struct pcpu_tx_sc_stats; struct metadata_dst; struct macsec_tx_sc { bool active; u8 encoding_sa; bool encrypt; bool send_sci; bool end_station; bool scb; struct macsec_tx_sa __attribute__((btf_type_tag("rcu"))) *sa[4]; struct pcpu_tx_sc_stats __attribute__((btf_type_tag("percpu"))) *stats; struct metadata_dst *md_dst; }; struct macsec_secy { struct net_device *netdev; unsigned int n_rx_sc; sci_t sci; u16 key_len; u16 icv_len; enum macsec_validation_type validate_frames; bool xpn; bool operational; bool protect_frames; bool replay_protect; u32 replay_window; struct macsec_tx_sc tx_sc; struct macsec_rx_sc __attribute__((btf_type_tag("rcu"))) *rx_sc; }; union salt { struct { u32 ssci; u64 pn; } __attribute__((packed)); u8 bytes[12]; }; typedef union salt salt_t; struct macsec_key { u8 id[16]; struct crypto_aead *tfm; salt_t salt; }; typedef u32 ssci_t; union pn { struct { u32 lower; u32 upper; }; u64 full64; }; typedef union pn pn_t; struct macsec_tx_sa { struct macsec_key key; ssci_t ssci; spinlock_t lock; union { pn_t next_pn_halves; u64 next_pn; }; refcount_t refcnt; bool active; struct macsec_tx_sa_stats __attribute__((btf_type_tag("percpu"))) *stats; struct callback_head rcu; }; struct macsec_tx_sa_stats { __u32 OutPktsProtected; __u32 OutPktsEncrypted; }; struct macsec_tx_sc_stats { __u64 OutPktsProtected; __u64 OutPktsEncrypted; __u64 OutOctetsProtected; __u64 OutOctetsEncrypted; }; struct pcpu_tx_sc_stats { struct macsec_tx_sc_stats stats; struct u64_stats_sync syncp; }; struct ip_tunnel_key { __be64 tun_id; union { struct { __be32 src; __be32 dst; } ipv4; struct { struct in6_addr src; struct in6_addr dst; } ipv6; } u; __be16 tun_flags; u8 tos; u8 ttl; __be32 label; u32 nhid; __be16 tp_src; __be16 tp_dst; __u8 flow_flags; }; struct ip_tunnel_encap { u16 type; u16 flags; __be16 sport; __be16 dport; }; struct ip_tunnel_info { struct ip_tunnel_key key; struct ip_tunnel_encap encap; struct dst_cache dst_cache; u8 options_len; u8 mode; }; struct hw_port_info { struct net_device *lower_dev; u32 port_id; }; struct macsec_info { sci_t sci; }; struct xfrm_md_info { u32 if_id; int link; struct dst_entry *dst_orig; }; enum metadata_type { METADATA_IP_TUNNEL = 0, METADATA_HW_PORT_MUX = 1, METADATA_MACSEC = 2, METADATA_XFRM = 3, }; struct metadata_dst { struct dst_entry dst; enum metadata_type type; union { struct ip_tunnel_info tun_info; struct hw_port_info port_info; struct macsec_info macsec_info; struct xfrm_md_info xfrm_info; } u; }; struct pcpu_rx_sc_stats; struct macsec_rx_sc { struct macsec_rx_sc __attribute__((btf_type_tag("rcu"))) *next; sci_t sci; bool active; struct macsec_rx_sa __attribute__((btf_type_tag("rcu"))) *sa[4]; struct pcpu_rx_sc_stats __attribute__((btf_type_tag("percpu"))) *stats; refcount_t refcnt; struct callback_head callback_head; }; struct macsec_rx_sa { struct macsec_key key; ssci_t ssci; spinlock_t lock; union { pn_t next_pn_halves; u64 next_pn; }; refcount_t refcnt; bool active; struct macsec_rx_sa_stats __attribute__((btf_type_tag("percpu"))) *stats; struct macsec_rx_sc *sc; struct callback_head rcu; }; struct macsec_rx_sa_stats { __u32 InPktsOK; __u32 InPktsInvalid; __u32 InPktsNotValid; __u32 InPktsNotUsingSA; __u32 InPktsUnusedSA; }; struct macsec_rx_sc_stats { __u64 InOctetsValidated; __u64 InOctetsDecrypted; __u64 InPktsUnchecked; __u64 InPktsDelayed; __u64 InPktsOK; __u64 InPktsInvalid; __u64 InPktsLate; __u64 InPktsNotValid; __u64 InPktsNotUsingSA; __u64 InPktsUnusedSA; }; struct pcpu_rx_sc_stats { struct macsec_rx_sc_stats stats; struct u64_stats_sync syncp; }; struct macsec_dev_stats { __u64 OutPktsUntagged; __u64 InPktsUntagged; __u64 OutPktsTooLong; __u64 InPktsNoTag; __u64 InPktsBadTag; __u64 InPktsUnknownSCI; __u64 InPktsNoSCI; __u64 InPktsOverrun; }; enum message_alignments { MESSAGE_PADDING_MULTIPLE = 16, MESSAGE_MINIMUM_LENGTH = 32, }; enum { WG_NETDEV_FEATURES = 1126357076009ULL, }; enum { IPV4_DEVCONF_FORWARDING = 1, IPV4_DEVCONF_MC_FORWARDING = 2, IPV4_DEVCONF_PROXY_ARP = 3, IPV4_DEVCONF_ACCEPT_REDIRECTS = 4, IPV4_DEVCONF_SECURE_REDIRECTS = 5, IPV4_DEVCONF_SEND_REDIRECTS = 6, IPV4_DEVCONF_SHARED_MEDIA = 7, IPV4_DEVCONF_RP_FILTER = 8, IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9, IPV4_DEVCONF_BOOTP_RELAY = 10, IPV4_DEVCONF_LOG_MARTIANS = 11, IPV4_DEVCONF_TAG = 12, IPV4_DEVCONF_ARPFILTER = 13, IPV4_DEVCONF_MEDIUM_ID = 14, IPV4_DEVCONF_NOXFRM = 15, IPV4_DEVCONF_NOPOLICY = 16, IPV4_DEVCONF_FORCE_IGMP_VERSION = 17, IPV4_DEVCONF_ARP_ANNOUNCE = 18, IPV4_DEVCONF_ARP_IGNORE = 19, IPV4_DEVCONF_PROMOTE_SECONDARIES = 20, IPV4_DEVCONF_ARP_ACCEPT = 21, IPV4_DEVCONF_ARP_NOTIFY = 22, IPV4_DEVCONF_ACCEPT_LOCAL = 23, IPV4_DEVCONF_SRC_VMARK = 24, IPV4_DEVCONF_PROXY_ARP_PVLAN = 25, IPV4_DEVCONF_ROUTE_LOCALNET = 26, IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27, IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28, IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29, IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30, IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31, IPV4_DEVCONF_BC_FORWARDING = 32, IPV4_DEVCONF_ARP_EVICT_NOCARRIER = 33, __IPV4_DEVCONF_MAX = 34, }; enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_EUI64 = 0, IN6_ADDR_GEN_MODE_NONE = 1, IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2, IN6_ADDR_GEN_MODE_RANDOM = 3, }; struct packet_cb { u64 nonce; struct noise_keypair *keypair; atomic_t state; u32 mtu; u8 ds; }; enum { NAPI_STATE_SCHED = 0, NAPI_STATE_MISSED = 1, NAPI_STATE_DISABLE = 2, NAPI_STATE_NPSVC = 3, NAPI_STATE_LISTED = 4, NAPI_STATE_NO_BUSY_POLL = 5, NAPI_STATE_IN_BUSY_POLL = 6, NAPI_STATE_PREFER_BUSY_POLL = 7, NAPI_STATE_THREADED = 8, NAPI_STATE_SCHED_THREADED = 9, }; enum netdev_state_t { __LINK_STATE_START = 0, __LINK_STATE_PRESENT = 1, __LINK_STATE_NOCARRIER = 2, __LINK_STATE_LINKWATCH_PENDING = 3, __LINK_STATE_DORMANT = 4, __LINK_STATE_TESTING = 5, }; struct udp_hslot; struct udp_table { struct udp_hslot *hash; struct udp_hslot *hash2; unsigned int mask; unsigned int log; }; struct udp_hslot { struct hlist_head head; int count; spinlock_t lock; }; enum { HANDSHAKE_DSCP = 136, }; enum packet_state { PACKET_STATE_UNCRYPTED = 0, PACKET_STATE_CRYPTED = 1, PACKET_STATE_DEAD = 2, }; enum { INET_ECN_NOT_ECT = 0, INET_ECN_ECT_1 = 1, INET_ECN_ECT_0 = 2, INET_ECN_CE = 3, INET_ECN_MASK = 3, }; struct message_data { struct message_header header; __le32 key_idx; __le64 counter; u8 encrypted_data[0]; }; struct message_handshake_cookie { struct message_header header; __le32 receiver_index; u8 nonce[24]; u8 encrypted_cookie[32]; }; struct vlan_hdr { __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; }; struct flow_dissector_key_control { u16 thoff; u16 addr_type; u32 flags; }; struct flow_dissector_key_basic { __be16 n_proto; u8 ip_proto; u8 padding; }; struct flow_keys_basic { struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; }; struct flow_dissector { unsigned long long used_keys; unsigned short offset[33]; }; enum cookie_mac_state { INVALID_MAC = 0, VALID_MAC_BUT_NO_COOKIE = 1, VALID_MAC_WITH_COOKIE_BUT_RATELIMITED = 2, VALID_MAC_WITH_COOKIE = 3, }; enum counter_values { COUNTER_BITS_TOTAL = 8192, COUNTER_REDUNDANT_BITS = 64, COUNTER_WINDOW_SIZE = 8128, }; enum gro_result { GRO_MERGED = 0, GRO_MERGED_FREE = 1, GRO_HELD = 2, GRO_NORMAL = 3, GRO_CONSUMED = 4, }; typedef enum gro_result gro_result_t; struct udp_tunnel_info { unsigned short type; sa_family_t sa_family; __be16 port; u8 hw_priv; }; struct udp_tunnel_nic_shared { struct udp_tunnel_nic *udp_tunnel_nic_info; struct list_head devices; }; enum rt_scope_t { RT_SCOPE_UNIVERSE = 0, RT_SCOPE_SITE = 200, RT_SCOPE_LINK = 253, RT_SCOPE_HOST = 254, RT_SCOPE_NOWHERE = 255, }; enum { RTAX_UNSPEC = 0, RTAX_LOCK = 1, RTAX_MTU = 2, RTAX_WINDOW = 3, RTAX_RTT = 4, RTAX_RTTVAR = 5, RTAX_SSTHRESH = 6, RTAX_CWND = 7, RTAX_ADVMSS = 8, RTAX_REORDERING = 9, RTAX_HOPLIMIT = 10, RTAX_INITCWND = 11, RTAX_FEATURES = 12, RTAX_RTO_MIN = 13, RTAX_INITRWND = 14, RTAX_QUICKACK = 15, RTAX_CC_ALGO = 16, RTAX_FASTOPEN_NO_COOKIE = 17, __RTAX_MAX = 18, }; struct udp_port_cfg { u8 family; union { struct in_addr local_ip; struct in6_addr local_ip6; }; union { struct in_addr peer_ip; struct in6_addr peer_ip6; }; __be16 local_udp_port; __be16 peer_udp_port; int bind_ifindex; unsigned int use_udp_checksums: 1; unsigned int use_udp6_tx_checksums: 1; unsigned int use_udp6_rx_checksums: 1; unsigned int ipv6_v6only: 1; }; typedef int (*udp_tunnel_encap_rcv_t)(struct sock *, struct sk_buff *); typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *, struct sk_buff *); typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); typedef void (*udp_tunnel_encap_destroy_t)(struct sock *); typedef struct sk_buff * (*udp_tunnel_gro_receive_t)(struct sock *, struct list_head *, struct sk_buff *); typedef int (*udp_tunnel_gro_complete_t)(struct sock *, struct sk_buff *, int); struct udp_tunnel_sock_cfg { void *sk_user_data; __u8 encap_type; udp_tunnel_encap_rcv_t encap_rcv; udp_tunnel_encap_err_lookup_t encap_err_lookup; udp_tunnel_encap_err_rcv_t encap_err_rcv; udp_tunnel_encap_destroy_t encap_destroy; udp_tunnel_gro_receive_t gro_receive; udp_tunnel_gro_complete_t gro_complete; }; typedef struct { unsigned long key[2]; } hsiphash_key_t; enum { PACKETS_PER_SECOND = 20, PACKETS_BURSTABLE = 5, PACKET_COST = 50000000, TOKEN_MAX = 250000000, }; struct ratelimiter_entry { u64 last_time_ns; u64 tokens; u64 ip; void *net; spinlock_t lock; struct hlist_node hash; struct callback_head rcu; }; enum cookie_values { COOKIE_SECRET_MAX_AGE = 120, COOKIE_SECRET_LATENCY = 5, COOKIE_NONCE_LEN = 24, COOKIE_LEN = 16, }; enum { COOKIE_KEY_LABEL_LEN = 8, }; enum wgdevice_attribute { WGDEVICE_A_UNSPEC = 0, WGDEVICE_A_IFINDEX = 1, WGDEVICE_A_IFNAME = 2, WGDEVICE_A_PRIVATE_KEY = 3, WGDEVICE_A_PUBLIC_KEY = 4, WGDEVICE_A_FLAGS = 5, WGDEVICE_A_LISTEN_PORT = 6, WGDEVICE_A_FWMARK = 7, WGDEVICE_A_PEERS = 8, __WGDEVICE_A_LAST = 9, }; enum wg_cmd { WG_CMD_GET_DEVICE = 0, WG_CMD_SET_DEVICE = 1, __WG_CMD_MAX = 2, }; enum wgpeer_attribute { WGPEER_A_UNSPEC = 0, WGPEER_A_PUBLIC_KEY = 1, WGPEER_A_PRESHARED_KEY = 2, WGPEER_A_FLAGS = 3, WGPEER_A_ENDPOINT = 4, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL = 5, WGPEER_A_LAST_HANDSHAKE_TIME = 6, WGPEER_A_RX_BYTES = 7, WGPEER_A_TX_BYTES = 8, WGPEER_A_ALLOWEDIPS = 9, WGPEER_A_PROTOCOL_VERSION = 10, __WGPEER_A_LAST = 11, }; enum wgallowedip_attribute { WGALLOWEDIP_A_UNSPEC = 0, WGALLOWEDIP_A_FAMILY = 1, WGALLOWEDIP_A_IPADDR = 2, WGALLOWEDIP_A_CIDR_MASK = 3, __WGALLOWEDIP_A_LAST = 4, }; enum wgdevice_flag { WGDEVICE_F_REPLACE_PEERS = 1, __WGDEVICE_F_ALL = 1, }; enum wgpeer_flag { WGPEER_F_REMOVE_ME = 1, WGPEER_F_REPLACE_ALLOWEDIPS = 2, WGPEER_F_UPDATE_ONLY = 4, __WGPEER_F_ALL = 7, }; struct dump_ctx { struct wg_device *wg; struct wg_peer *next_peer; u64 allowedips_seq; struct allowedips_node *next_allowedip; }; struct genl_dumpit_info { struct genl_split_ops op; struct genl_info info; }; struct qdisc_walker { int stop; int skip; int count; int (*fn)(struct Qdisc *, unsigned long, struct qdisc_walker *); }; struct ifb_q_stats_desc { char desc[32]; size_t offset; }; enum netdev_queue_state_t { __QUEUE_STATE_DRV_XOFF = 0, __QUEUE_STATE_STACK_XOFF = 1, __QUEUE_STATE_FROZEN = 2, }; enum ethtool_stringset { ETH_SS_TEST = 0, ETH_SS_STATS = 1, ETH_SS_PRIV_FLAGS = 2, ETH_SS_NTUPLE_FILTERS = 3, ETH_SS_FEATURES = 4, ETH_SS_RSS_HASH_FUNCS = 5, ETH_SS_TUNABLES = 6, ETH_SS_PHY_STATS = 7, ETH_SS_PHY_TUNABLES = 8, ETH_SS_LINK_MODES = 9, ETH_SS_MSG_CLASSES = 10, ETH_SS_WOL_MODES = 11, ETH_SS_SOF_TIMESTAMPING = 12, ETH_SS_TS_TX_TYPES = 13, ETH_SS_TS_RX_FILTERS = 14, ETH_SS_UDP_TUNNEL_TYPES = 15, ETH_SS_STATS_STD = 16, ETH_SS_STATS_ETH_PHY = 17, ETH_SS_STATS_ETH_MAC = 18, ETH_SS_STATS_ETH_CTRL = 19, ETH_SS_STATS_RMON = 20, ETH_SS_COUNT = 21, }; struct ifb_q_stats { u64 packets; u64 bytes; struct u64_stats_sync sync; }; struct ifb_q_private { struct net_device *dev; struct tasklet_struct ifb_tasklet; int tasklet_pending; int txqnum; struct sk_buff_head rq; struct sk_buff_head tq; struct ifb_q_stats rx_stats; struct ifb_q_stats tx_stats; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct ifb_dev_private { struct ifb_q_private *tx_private; }; struct mdio_bus_stats { u64_stats_t transfers; u64_stats_t errors; u64_stats_t writes; u64_stats_t reads; struct u64_stats_sync syncp; }; struct mdio_device; struct phy_package_shared; struct mii_bus { struct module *owner; const char *name; char id[61]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16); int (*read_c45)(struct mii_bus *, int, int, int); int (*write_c45)(struct mii_bus *, int, int, int, u16); int (*reset)(struct mii_bus *); struct mdio_bus_stats stats[32]; struct mutex mdio_lock; struct device *parent; enum { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4, } state; struct device dev; struct mdio_device *mdio_map[32]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32]; int reset_delay_us; int reset_post_delay_us; struct gpio_desc *reset_gpiod; struct mutex shared_lock; struct phy_package_shared *shared[32]; }; struct mdio_device { struct device dev; struct mii_bus *bus; char modalias[32]; int (*bus_match)(struct device *, struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; struct gpio_desc *reset_gpio; struct reset_control *reset_ctrl; unsigned int reset_assert_delay; unsigned int reset_deassert_delay; }; struct phy_package_shared { int addr; refcount_t refcnt; unsigned long flags; size_t priv_size; void *priv; }; struct mdio_board_info { const char *bus_id; char modalias[32]; int mdio_addr; const void *platform_data; }; struct mdio_board_entry { struct list_head list; struct mdio_board_info board_info; }; struct phylib_stubs { int (*hwtstamp_get)(struct phy_device *, struct kernel_hwtstamp_config *); int (*hwtstamp_set)(struct phy_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); }; struct mdiobus_devres { struct mii_bus *mii; }; struct sfp_eeprom_id; struct sfp_upstream_ops { void (*attach)(void *, struct sfp_bus *); void (*detach)(void *, struct sfp_bus *); int (*module_insert)(void *, const struct sfp_eeprom_id *); void (*module_remove)(void *); int (*module_start)(void *); void (*module_stop)(void *); void (*link_down)(void *); void (*link_up)(void *); int (*connect_phy)(void *, struct phy_device *); void (*disconnect_phy)(void *); }; struct sfp_eeprom_base { u8 phys_id; u8 phys_ext_id; u8 connector; u8 if_1x_copper_passive: 1; u8 if_1x_copper_active: 1; u8 if_1x_lx: 1; u8 if_1x_sx: 1; u8 e10g_base_sr: 1; u8 e10g_base_lr: 1; u8 e10g_base_lrm: 1; u8 e10g_base_er: 1; u8 sonet_oc3_short_reach: 1; u8 sonet_oc3_smf_intermediate_reach: 1; u8 sonet_oc3_smf_long_reach: 1; u8 unallocated_5_3: 1; u8 sonet_oc12_short_reach: 1; u8 sonet_oc12_smf_intermediate_reach: 1; u8 sonet_oc12_smf_long_reach: 1; u8 unallocated_5_7: 1; u8 sonet_oc48_short_reach: 1; u8 sonet_oc48_intermediate_reach: 1; u8 sonet_oc48_long_reach: 1; u8 sonet_reach_bit2: 1; u8 sonet_reach_bit1: 1; u8 sonet_oc192_short_reach: 1; u8 escon_smf_1310_laser: 1; u8 escon_mmf_1310_led: 1; u8 e1000_base_sx: 1; u8 e1000_base_lx: 1; u8 e1000_base_cx: 1; u8 e1000_base_t: 1; u8 e100_base_lx: 1; u8 e100_base_fx: 1; u8 e_base_bx10: 1; u8 e_base_px: 1; u8 fc_tech_electrical_inter_enclosure: 1; u8 fc_tech_lc: 1; u8 fc_tech_sa: 1; u8 fc_ll_m: 1; u8 fc_ll_l: 1; u8 fc_ll_i: 1; u8 fc_ll_s: 1; u8 fc_ll_v: 1; u8 unallocated_8_0: 1; u8 unallocated_8_1: 1; u8 sfp_ct_passive: 1; u8 sfp_ct_active: 1; u8 fc_tech_ll: 1; u8 fc_tech_sl: 1; u8 fc_tech_sn: 1; u8 fc_tech_electrical_intra_enclosure: 1; u8 fc_media_sm: 1; u8 unallocated_9_1: 1; u8 fc_media_m5: 1; u8 fc_media_m6: 1; u8 fc_media_tv: 1; u8 fc_media_mi: 1; u8 fc_media_tp: 1; u8 fc_media_tw: 1; u8 fc_speed_100: 1; u8 unallocated_10_1: 1; u8 fc_speed_200: 1; u8 fc_speed_3200: 1; u8 fc_speed_400: 1; u8 fc_speed_1600: 1; u8 fc_speed_800: 1; u8 fc_speed_1200: 1; u8 encoding; u8 br_nominal; u8 rate_id; u8 link_len[6]; char vendor_name[16]; u8 extended_cc; char vendor_oui[3]; char vendor_pn[16]; char vendor_rev[4]; union { __be16 optical_wavelength; __be16 cable_compliance; struct { u8 sff8431_app_e: 1; u8 fc_pi_4_app_h: 1; u8 reserved60_2: 6; u8 reserved61: 8; } passive; struct { u8 sff8431_app_e: 1; u8 fc_pi_4_app_h: 1; u8 sff8431_lim: 1; u8 fc_pi_4_lim: 1; u8 reserved60_4: 4; u8 reserved61: 8; } active; }; u8 reserved62; u8 cc_base; }; struct sfp_eeprom_ext { __be16 options; u8 br_max; u8 br_min; char vendor_sn[16]; char datecode[8]; u8 diagmon; u8 enhopts; u8 sff8472_compliance; u8 cc_ext; }; struct sfp_eeprom_id { struct sfp_eeprom_base base; struct sfp_eeprom_ext ext; }; struct phy_c45_device_ids { u32 devices_in_package; u32 mmds_present; u32 device_ids[32]; }; enum phy_state { PHY_DOWN = 0, PHY_READY = 1, PHY_HALTED = 2, PHY_ERROR = 3, PHY_UP = 4, PHY_RUNNING = 5, PHY_NOLINK = 6, PHY_CABLETEST = 7, }; struct pse_control; struct phy_driver; struct phy_led_trigger; struct phylink; struct mii_timestamper; struct phy_device { struct mdio_device mdio; struct phy_driver *drv; struct device_link *devlink; u32 phy_id; struct phy_c45_device_ids c45_ids; unsigned int is_c45: 1; unsigned int is_internal: 1; unsigned int is_pseudo_fixed_link: 1; unsigned int is_gigabit_capable: 1; unsigned int has_fixups: 1; unsigned int suspended: 1; unsigned int suspended_by_mdio_bus: 1; unsigned int sysfs_links: 1; unsigned int loopback_enabled: 1; unsigned int downshifted_rate: 1; unsigned int is_on_sfp_module: 1; unsigned int mac_managed_pm: 1; unsigned int wol_enabled: 1; unsigned int autoneg: 1; unsigned int link: 1; unsigned int autoneg_complete: 1; unsigned int interrupts: 1; unsigned int irq_suspended: 1; unsigned int irq_rerun: 1; int rate_matching; enum phy_state state; u32 dev_flags; phy_interface_t interface; int speed; int duplex; int port; int pause; int asym_pause; u8 master_slave_get; u8 master_slave_set; u8 master_slave_state; unsigned long supported[2]; unsigned long advertising[2]; unsigned long lp_advertising[2]; unsigned long adv_old[2]; unsigned long supported_eee[2]; unsigned long advertising_eee[2]; bool eee_enabled; unsigned long host_interfaces[1]; u32 eee_broken_modes; struct phy_led_trigger *phy_led_triggers; unsigned int phy_num_led_triggers; struct phy_led_trigger *last_triggered; struct phy_led_trigger *led_link_trigger; struct list_head leds; int irq; void *priv; struct phy_package_shared *shared; struct sk_buff *skb; void *ehdr; struct nlattr *nest; struct delayed_work state_queue; struct mutex lock; bool sfp_bus_attached; struct sfp_bus *sfp_bus; struct phylink *phylink; struct net_device *attached_dev; struct mii_timestamper *mii_ts; struct pse_control *psec; u8 mdix; u8 mdix_ctrl; int pma_extable; unsigned int link_down_events; void (*phy_link_change)(struct phy_device *, bool); void (*adjust_link)(struct net_device *); const struct macsec_ops *macsec_ops; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct mdio_driver_common { struct device_driver driver; int flags; }; enum led_brightness { LED_OFF = 0, LED_ON = 1, LED_HALF = 127, LED_FULL = 255, }; struct phy_tdr_config; struct phy_plca_cfg; struct phy_plca_status; struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; u32 phy_id_mask; const unsigned long * const features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*get_features)(struct phy_device *); int (*get_rate_matching)(struct phy_device *, phy_interface_t); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*config_intr)(struct phy_device *); irqreturn_t (*handle_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd)(struct phy_device *, int, u16); int (*write_mmd)(struct phy_device *, int, u16, u16); int (*read_page)(struct phy_device *); int (*write_page)(struct phy_device *, int); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*cable_test_start)(struct phy_device *); int (*cable_test_tdr_start)(struct phy_device *, const struct phy_tdr_config *); int (*cable_test_get_status)(struct phy_device *, bool *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); int (*set_loopback)(struct phy_device *, bool); int (*get_sqi)(struct phy_device *); int (*get_sqi_max)(struct phy_device *); int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *); int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); int (*led_brightness_set)(struct phy_device *, u8, enum led_brightness); int (*led_blink_set)(struct phy_device *, u8, unsigned long *, unsigned long *); int (*led_hw_is_supported)(struct phy_device *, u8, unsigned long); int (*led_hw_control_set)(struct phy_device *, u8, unsigned long); int (*led_hw_control_get)(struct phy_device *, u8, unsigned long *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct phy_tdr_config { u32 first; u32 last; u32 step; s8 pair; }; struct phy_plca_cfg { int version; int enabled; int node_id; int node_cnt; int to_tmr; int burst_cnt; int burst_tmr; }; struct phy_plca_status { bool pst; }; struct phylink_link_state { unsigned long advertising[2]; unsigned long lp_advertising[2]; phy_interface_t interface; int speed; int duplex; int pause; int rate_matching; unsigned int link: 1; unsigned int an_complete: 1; }; struct phylink_mac_ops; struct phylink_config; struct phylink_pcs; struct phylink { struct net_device *netdev; const struct phylink_mac_ops *mac_ops; struct phylink_config *config; struct phylink_pcs *pcs; struct device *dev; unsigned int old_link_state: 1; unsigned long phylink_disable_state; struct phy_device *phydev; phy_interface_t link_interface; u8 cfg_link_an_mode; u8 cur_link_an_mode; u8 link_port; unsigned long supported[2]; struct phylink_link_state link_config; phy_interface_t cur_interface; struct gpio_desc *link_gpio; unsigned int link_irq; struct timer_list link_poll; void (*get_fixed_state)(struct net_device *, struct phylink_link_state *); struct mutex state_mutex; struct phylink_link_state phy_state; struct work_struct resolve; unsigned int pcs_neg_mode; unsigned int pcs_state; bool mac_link_dropped; bool using_mac_select_pcs; struct sfp_bus *sfp_bus; bool sfp_may_have_phy; unsigned long sfp_interfaces[1]; unsigned long sfp_support[2]; u8 sfp_port; }; struct phylink_mac_ops { void (*validate)(struct phylink_config *, unsigned long *, struct phylink_link_state *); struct phylink_pcs * (*mac_select_pcs)(struct phylink_config *, phy_interface_t); int (*mac_prepare)(struct phylink_config *, unsigned int, phy_interface_t); void (*mac_config)(struct phylink_config *, unsigned int, const struct phylink_link_state *); int (*mac_finish)(struct phylink_config *, unsigned int, phy_interface_t); void (*mac_link_down)(struct phylink_config *, unsigned int, phy_interface_t); void (*mac_link_up)(struct phylink_config *, struct phy_device *, unsigned int, phy_interface_t, int, int, bool, bool); }; enum phylink_op_type { PHYLINK_NETDEV = 0, PHYLINK_DEV = 1, }; struct phylink_config { struct device *dev; enum phylink_op_type type; bool poll_fixed_state; bool mac_managed_pm; bool ovr_an_inband; void (*get_fixed_state)(struct phylink_config *, struct phylink_link_state *); unsigned long supported_interfaces[1]; unsigned long mac_capabilities; }; struct phylink_pcs_ops; struct phylink_pcs { const struct phylink_pcs_ops *ops; struct phylink *phylink; bool neg_mode; bool poll; }; struct phylink_pcs_ops { int (*pcs_validate)(struct phylink_pcs *, unsigned long *, const struct phylink_link_state *); int (*pcs_enable)(struct phylink_pcs *); void (*pcs_disable)(struct phylink_pcs *); void (*pcs_pre_config)(struct phylink_pcs *, phy_interface_t); int (*pcs_post_config)(struct phylink_pcs *, phy_interface_t); void (*pcs_get_state)(struct phylink_pcs *, struct phylink_link_state *); int (*pcs_config)(struct phylink_pcs *, unsigned int, phy_interface_t, const unsigned long *, bool); void (*pcs_an_restart)(struct phylink_pcs *); void (*pcs_link_up)(struct phylink_pcs *, unsigned int, phy_interface_t, int, int); }; struct mii_timestamper { bool (*rxtstamp)(struct mii_timestamper *, struct sk_buff *, int); void (*txtstamp)(struct mii_timestamper *, struct sk_buff *, int); int (*hwtstamp)(struct mii_timestamper *, struct ifreq *); void (*link_state)(struct mii_timestamper *, struct phy_device *); int (*ts_info)(struct mii_timestamper *, struct ethtool_ts_info *); struct device *device; }; enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, ETHTOOL_LINK_MODE_Autoneg_BIT = 6, ETHTOOL_LINK_MODE_TP_BIT = 7, ETHTOOL_LINK_MODE_AUI_BIT = 8, ETHTOOL_LINK_MODE_MII_BIT = 9, ETHTOOL_LINK_MODE_FIBRE_BIT = 10, ETHTOOL_LINK_MODE_BNC_BIT = 11, ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, ETHTOOL_LINK_MODE_Pause_BIT = 13, ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, ETHTOOL_LINK_MODE_Backplane_BIT = 16, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93, ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94, ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95, ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96, ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97, ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98, ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, __ETHTOOL_LINK_MODE_MASK_NBITS = 102, }; enum { MLO_PAUSE_NONE = 0, MLO_PAUSE_RX = 1, MLO_PAUSE_TX = 2, MLO_PAUSE_TXRX_MASK = 3, MLO_PAUSE_AN = 4, MLO_AN_PHY = 0, MLO_AN_FIXED = 1, MLO_AN_INBAND = 2, PHYLINK_PCS_NEG_NONE = 0, PHYLINK_PCS_NEG_ENABLED = 16, PHYLINK_PCS_NEG_OUTBAND = 32, PHYLINK_PCS_NEG_INBAND = 64, PHYLINK_PCS_NEG_INBAND_DISABLED = 64, PHYLINK_PCS_NEG_INBAND_ENABLED = 80, MAC_SYM_PAUSE = 1, MAC_ASYM_PAUSE = 2, MAC_10HD = 4, MAC_10FD = 8, MAC_10 = 12, MAC_100HD = 16, MAC_100FD = 32, MAC_100 = 48, MAC_1000HD = 64, MAC_1000FD = 128, MAC_1000 = 192, MAC_2500FD = 256, MAC_5000FD = 512, MAC_10000FD = 1024, MAC_20000FD = 2048, MAC_25000FD = 4096, MAC_40000FD = 8192, MAC_50000FD = 16384, MAC_56000FD = 32768, MAC_100000FD = 65536, MAC_200000FD = 131072, MAC_400000FD = 262144, }; enum { PHYLINK_DISABLE_STOPPED = 0, PHYLINK_DISABLE_LINK = 1, PHYLINK_DISABLE_MAC_WOL = 2, PCS_STATE_DOWN = 0, PCS_STATE_STARTING = 1, PCS_STATE_STARTED = 2, }; struct mii_ioctl_data { __u16 phy_id; __u16 reg_num; __u16 val_in; __u16 val_out; }; struct phy_setting { u32 speed; u8 duplex; u8 bit; }; struct fixed_phy_status { int link; int speed; int duplex; int pause; int asym_pause; }; struct led_classdev; struct led_hw_trigger_type; struct led_trigger { const char *name; int (*activate)(struct led_classdev *); void (*deactivate)(struct led_classdev *); struct led_hw_trigger_type *trigger_type; spinlock_t leddev_list_lock; struct list_head led_cdevs; struct list_head next_trig; const struct attribute_group **groups; }; struct phy_led_trigger { struct led_trigger trigger; char name[76]; unsigned int speed; }; struct led_pattern; struct led_classdev { const char *name; unsigned int brightness; unsigned int max_brightness; unsigned int color; int flags; unsigned long work_flags; void (*brightness_set)(struct led_classdev *, enum led_brightness); int (*brightness_set_blocking)(struct led_classdev *, enum led_brightness); enum led_brightness (*brightness_get)(struct led_classdev *); int (*blink_set)(struct led_classdev *, unsigned long *, unsigned long *); int (*pattern_set)(struct led_classdev *, struct led_pattern *, u32, int); int (*pattern_clear)(struct led_classdev *); struct device *dev; const struct attribute_group **groups; struct list_head node; const char *default_trigger; unsigned long blink_delay_on; unsigned long blink_delay_off; struct timer_list blink_timer; int blink_brightness; int new_blink_brightness; void (*flash_resume)(struct led_classdev *); struct work_struct set_brightness_work; int delayed_set_value; unsigned long delayed_delay_on; unsigned long delayed_delay_off; struct rw_semaphore trigger_lock; struct led_trigger *trigger; struct list_head trig_list; void *trigger_data; bool activated; struct led_hw_trigger_type *trigger_type; const char *hw_control_trigger; int (*hw_control_is_supported)(struct led_classdev *, unsigned long); int (*hw_control_set)(struct led_classdev *, unsigned long); int (*hw_control_get)(struct led_classdev *, unsigned long *); struct device * (*hw_control_get_device)(struct led_classdev *); struct mutex led_access; }; struct led_pattern { u32 delta_t; int brightness; }; struct led_hw_trigger_type { int dummy; }; enum { ETHTOOL_MSG_KERNEL_NONE = 0, ETHTOOL_MSG_STRSET_GET_REPLY = 1, ETHTOOL_MSG_LINKINFO_GET_REPLY = 2, ETHTOOL_MSG_LINKINFO_NTF = 3, ETHTOOL_MSG_LINKMODES_GET_REPLY = 4, ETHTOOL_MSG_LINKMODES_NTF = 5, ETHTOOL_MSG_LINKSTATE_GET_REPLY = 6, ETHTOOL_MSG_DEBUG_GET_REPLY = 7, ETHTOOL_MSG_DEBUG_NTF = 8, ETHTOOL_MSG_WOL_GET_REPLY = 9, ETHTOOL_MSG_WOL_NTF = 10, ETHTOOL_MSG_FEATURES_GET_REPLY = 11, ETHTOOL_MSG_FEATURES_SET_REPLY = 12, ETHTOOL_MSG_FEATURES_NTF = 13, ETHTOOL_MSG_PRIVFLAGS_GET_REPLY = 14, ETHTOOL_MSG_PRIVFLAGS_NTF = 15, ETHTOOL_MSG_RINGS_GET_REPLY = 16, ETHTOOL_MSG_RINGS_NTF = 17, ETHTOOL_MSG_CHANNELS_GET_REPLY = 18, ETHTOOL_MSG_CHANNELS_NTF = 19, ETHTOOL_MSG_COALESCE_GET_REPLY = 20, ETHTOOL_MSG_COALESCE_NTF = 21, ETHTOOL_MSG_PAUSE_GET_REPLY = 22, ETHTOOL_MSG_PAUSE_NTF = 23, ETHTOOL_MSG_EEE_GET_REPLY = 24, ETHTOOL_MSG_EEE_NTF = 25, ETHTOOL_MSG_TSINFO_GET_REPLY = 26, ETHTOOL_MSG_CABLE_TEST_NTF = 27, ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 28, ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 29, ETHTOOL_MSG_FEC_GET_REPLY = 30, ETHTOOL_MSG_FEC_NTF = 31, ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY = 32, ETHTOOL_MSG_STATS_GET_REPLY = 33, ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 34, ETHTOOL_MSG_MODULE_GET_REPLY = 35, ETHTOOL_MSG_MODULE_NTF = 36, ETHTOOL_MSG_PSE_GET_REPLY = 37, ETHTOOL_MSG_RSS_GET_REPLY = 38, ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 39, ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 40, ETHTOOL_MSG_PLCA_NTF = 41, ETHTOOL_MSG_MM_GET_REPLY = 42, ETHTOOL_MSG_MM_NTF = 43, __ETHTOOL_MSG_KERNEL_CNT = 44, ETHTOOL_MSG_KERNEL_MAX = 43, }; struct ethtool_phy_ops { int (*get_sset_count)(struct phy_device *); int (*get_strings)(struct phy_device *, u8 *); int (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *, struct netlink_ext_ack *); int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); int (*start_cable_test)(struct phy_device *, struct netlink_ext_ack *); int (*start_cable_test_tdr)(struct phy_device *, struct netlink_ext_ack *, const struct phy_tdr_config *); }; struct phy_fixup { struct list_head list; char bus_id[64]; u32 phy_uid; u32 phy_uid_mask; int (*run)(struct phy_device *); }; struct phy_led { struct list_head list; struct phy_device *phydev; struct led_classdev led_cdev; u8 index; }; struct led_init_data { struct fwnode_handle *fwnode; const char *default_label; const char *devicename; bool devname_mandatory; }; typedef void (*btf_trace_mdio_access)(void *, struct mii_bus *, char, u8, unsigned int, u16, int); struct trace_event_raw_mdio_access { struct trace_entry ent; char busid[61]; char read; u8 addr; u16 val; unsigned int regnum; char __data[0]; }; struct mdio_driver { struct mdio_driver_common mdiodrv; int (*probe)(struct mdio_device *); void (*remove)(struct mdio_device *); void (*shutdown)(struct mdio_device *); }; struct trace_event_data_offsets_mdio_access {}; struct mdio_bus_stat_attr { int addr; unsigned int field_offset; }; struct swmii_regs { u16 bmsr; u16 lpa; u16 lpagb; u16 estat; }; enum { SWMII_SPEED_10 = 0, SWMII_SPEED_100 = 1, SWMII_SPEED_1000 = 2, SWMII_DUPLEX_HALF = 0, SWMII_DUPLEX_FULL = 1, }; struct fixed_mdio_bus { struct mii_bus *mii_bus; struct list_head phys; }; struct fixed_phy { int addr; struct phy_device *phydev; struct fixed_phy_status status; bool no_carrier; int (*link_update)(struct net_device *, struct fixed_phy_status *); struct list_head node; struct gpio_desc *link_gpiod; }; struct wpan_phy; struct wpan_dev_header_ops; struct wpan_dev { struct wpan_phy *wpan_phy; int iftype; struct list_head list; struct net_device *netdev; const struct wpan_dev_header_ops *header_ops; struct net_device *lowpan_dev; u32 identifier; __le16 pan_id; __le16 short_addr; __le64 extended_addr; atomic_t bsn; atomic_t dsn; u8 min_be; u8 max_be; u8 csma_retries; s8 frame_retries; bool lbt; bool ackreq; }; enum nl802154_supported_bool_states { NL802154_SUPPORTED_BOOL_FALSE = 0, NL802154_SUPPORTED_BOOL_TRUE = 1, __NL802154_SUPPORTED_BOOL_INVALD = 2, NL802154_SUPPORTED_BOOL_BOTH = 3, __NL802154_SUPPORTED_BOOL_AFTER_LAST = 4, NL802154_SUPPORTED_BOOL_MAX = 3, }; struct wpan_phy_supported { u32 channels[32]; u32 cca_modes; u32 cca_opts; u32 iftypes; enum nl802154_supported_bool_states lbt; u8 min_minbe; u8 max_minbe; u8 min_maxbe; u8 max_maxbe; u8 min_csma_backoffs; u8 max_csma_backoffs; s8 min_frame_retries; s8 max_frame_retries; size_t tx_powers_size; size_t cca_ed_levels_size; const s32 *tx_powers; const s32 *cca_ed_levels; }; enum nl802154_cca_modes { __NL802154_CCA_INVALID = 0, NL802154_CCA_ENERGY = 1, NL802154_CCA_CARRIER = 2, NL802154_CCA_ENERGY_CARRIER = 3, NL802154_CCA_ALOHA = 4, NL802154_CCA_UWB_SHR = 5, NL802154_CCA_UWB_MULTIPLEXED = 6, __NL802154_CCA_ATTR_AFTER_LAST = 7, NL802154_CCA_ATTR_MAX = 6, }; enum nl802154_cca_opts { NL802154_CCA_OPT_ENERGY_CARRIER_AND = 0, NL802154_CCA_OPT_ENERGY_CARRIER_OR = 1, __NL802154_CCA_OPT_ATTR_AFTER_LAST = 2, NL802154_CCA_OPT_ATTR_MAX = 1, }; struct wpan_phy_cca { enum nl802154_cca_modes mode; enum nl802154_cca_opts opt; }; enum ieee802154_filtering_level { IEEE802154_FILTERING_NONE = 0, IEEE802154_FILTERING_1_FCS = 1, IEEE802154_FILTERING_2_PROMISCUOUS = 2, IEEE802154_FILTERING_3_SCAN = 3, IEEE802154_FILTERING_4_FRAME_FIELDS = 4, }; struct wpan_phy { const void *privid; unsigned long flags; u8 current_channel; u8 current_page; struct wpan_phy_supported supported; s32 transmit_power; struct wpan_phy_cca cca; __le64 perm_extended_addr; s32 cca_ed_level; u32 symbol_duration; u16 lifs_period; u16 sifs_period; struct device dev; possible_net_t _net; spinlock_t queue_lock; atomic_t ongoing_txs; atomic_t hold_txs; wait_queue_head_t sync_txq; enum ieee802154_filtering_level filtering; long: 64; char priv[0]; }; struct ieee802154_addr; struct wpan_dev_header_ops { int (*create)(struct sk_buff *, struct net_device *, const struct ieee802154_addr *, const struct ieee802154_addr *, unsigned int); }; struct ieee802154_addr { u8 mode; __le16 pan_id; union { __le16 short_addr; __le64 extended_addr; }; }; enum { NETIF_MSG_DRV_BIT = 0, NETIF_MSG_PROBE_BIT = 1, NETIF_MSG_LINK_BIT = 2, NETIF_MSG_TIMER_BIT = 3, NETIF_MSG_IFDOWN_BIT = 4, NETIF_MSG_IFUP_BIT = 5, NETIF_MSG_RX_ERR_BIT = 6, NETIF_MSG_TX_ERR_BIT = 7, NETIF_MSG_TX_QUEUED_BIT = 8, NETIF_MSG_INTR_BIT = 9, NETIF_MSG_TX_DONE_BIT = 10, NETIF_MSG_RX_STATUS_BIT = 11, NETIF_MSG_PKTDATA_BIT = 12, NETIF_MSG_HW_BIT = 13, NETIF_MSG_WOL_BIT = 14, NETIF_MSG_CLASS_COUNT = 15, }; enum { IFLA_TUN_UNSPEC = 0, IFLA_TUN_OWNER = 1, IFLA_TUN_GROUP = 2, IFLA_TUN_TYPE = 3, IFLA_TUN_PI = 4, IFLA_TUN_VNET_HDR = 5, IFLA_TUN_PERSIST = 6, IFLA_TUN_MULTI_QUEUE = 7, IFLA_TUN_NUM_QUEUES = 8, IFLA_TUN_NUM_DISABLED_QUEUES = 9, __IFLA_TUN_MAX = 10, }; enum { SKB_GSO_TCPV4 = 1, SKB_GSO_DODGY = 2, SKB_GSO_TCP_ECN = 4, SKB_GSO_TCP_FIXEDID = 8, SKB_GSO_TCPV6 = 16, SKB_GSO_FCOE = 32, SKB_GSO_GRE = 64, SKB_GSO_GRE_CSUM = 128, SKB_GSO_IPXIP4 = 256, SKB_GSO_IPXIP6 = 512, SKB_GSO_UDP_TUNNEL = 1024, SKB_GSO_UDP_TUNNEL_CSUM = 2048, SKB_GSO_PARTIAL = 4096, SKB_GSO_TUNNEL_REMCSUM = 8192, SKB_GSO_SCTP = 16384, SKB_GSO_ESP = 32768, SKB_GSO_UDP = 65536, SKB_GSO_UDP_L4 = 131072, SKB_GSO_FRAGLIST = 262144, }; enum xdp_mem_type { MEM_TYPE_PAGE_SHARED = 0, MEM_TYPE_PAGE_ORDER0 = 1, MEM_TYPE_PAGE_POOL = 2, MEM_TYPE_XSK_BUFF_POOL = 3, MEM_TYPE_MAX = 4, }; struct tun_struct; struct tun_flow_entry { struct hlist_node hash_link; struct callback_head rcu; struct tun_struct *tun; u32 rxhash; u32 rps_rxhash; int queue_index; long: 64; unsigned long updated; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct tap_filter { unsigned int count; u32 mask[2]; unsigned char addr[48]; }; struct tun_file; struct tun_prog; struct tun_struct { struct tun_file __attribute__((btf_type_tag("rcu"))) *tfiles[256]; unsigned int numqueues; unsigned int flags; kuid_t owner; kgid_t group; struct net_device *dev; netdev_features_t set_features; int align; int vnet_hdr_sz; int sndbuf; struct tap_filter txflt; struct sock_fprog fprog; bool filter_attached; u32 msg_enable; spinlock_t lock; struct hlist_head flows[1024]; struct timer_list flow_gc_timer; unsigned long ageing_time; unsigned int numdisabled; struct list_head disabled; void *security; u32 flow_count; u32 rx_batched; atomic_long_t rx_frame_errors; struct bpf_prog __attribute__((btf_type_tag("rcu"))) *xdp_prog; struct tun_prog __attribute__((btf_type_tag("rcu"))) *steering_prog; struct tun_prog __attribute__((btf_type_tag("rcu"))) *filter_prog; struct ethtool_link_ksettings link_ksettings; struct file *file; struct ifreq *ifr; }; struct tun_file { struct sock sk; long: 64; long: 64; long: 64; long: 64; long: 64; struct socket socket; struct tun_struct __attribute__((btf_type_tag("rcu"))) *tun; struct fasync_struct *fasync; unsigned int flags; union { u16 queue_index; unsigned int ifindex; }; struct napi_struct napi; bool napi_enabled; bool napi_frags_enabled; struct mutex napi_mutex; struct list_head next; struct tun_struct *detached; long: 64; long: 64; long: 64; long: 64; long: 64; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; }; struct tun_prog { struct callback_head rcu; struct bpf_prog *prog; }; struct rps_sock_flow_table { u32 mask; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u32 ents[0]; }; struct tun_pi { __u16 flags; __be16 proto; }; struct virtio_net_hdr { __u8 flags; __u8 gso_type; __virtio16 hdr_len; __virtio16 gso_size; __virtio16 csum_start; __virtio16 csum_offset; }; struct veth { __be16 h_vlan_proto; __be16 h_vlan_TCI; }; struct nf_conntrack { refcount_t use; }; struct tun_page { struct page *page; int count; }; struct tun_xdp_hdr { int buflen; struct virtio_net_hdr gso; }; struct tun_filter { __u16 flags; __u16 count; __u8 addr[0]; }; struct tun_msg_ctl { unsigned short type; unsigned short num; void *ptr; }; struct page_pool_params { unsigned int flags; unsigned int order; unsigned int pool_size; int nid; struct device *dev; struct napi_struct *napi; enum dma_data_direction dma_dir; unsigned int max_len; unsigned int offset; void (*init_callback)(struct page *, void *); void *init_arg; }; struct pp_alloc_cache { u32 count; struct page *cache[128]; }; struct page_pool { struct page_pool_params p; long frag_users; struct page *frag_page; unsigned int frag_offset; u32 pages_state_hold_cnt; struct delayed_work release_dw; void (*disconnect)(void *); unsigned long defer_start; unsigned long defer_warn; u32 xdp_mem_id; struct pp_alloc_cache alloc; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct ptr_ring ring; atomic_t pages_state_release_cnt; refcount_t user_cnt; u64 destroy_cnt; u64 android_kabi_reserved1; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct veth_q_stat_desc { char desc[32]; size_t offset; }; enum { VETH_INFO_UNSPEC = 0, VETH_INFO_PEER = 1, __VETH_INFO_MAX = 2, }; struct veth_stats { u64 rx_drops; u64 xdp_packets; u64 xdp_bytes; u64 xdp_redirect; u64 xdp_drops; u64 xdp_tx; u64 xdp_tx_err; u64 peer_tq_xdp_xmit; u64 peer_tq_xdp_xmit_err; }; struct veth_rq_stats { struct veth_stats vs; struct u64_stats_sync syncp; }; struct veth_rq { struct napi_struct xdp_napi; struct napi_struct __attribute__((btf_type_tag("rcu"))) *napi; struct net_device *dev; struct bpf_prog __attribute__((btf_type_tag("rcu"))) *xdp_prog; struct xdp_mem_info xdp_mem; struct veth_rq_stats stats; bool rx_notify_masked; long: 64; long: 64; long: 64; long: 64; struct ptr_ring xdp_ring; struct xdp_rxq_info xdp_rxq; struct page_pool *page_pool; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct ifinfomsg { unsigned char ifi_family; unsigned char __ifi_pad; unsigned short ifi_type; int ifi_index; unsigned int ifi_flags; unsigned int ifi_change; }; struct veth_priv { struct net_device __attribute__((btf_type_tag("rcu"))) *peer; atomic64_t dropped; struct bpf_prog *_xdp_prog; struct veth_rq *rq; unsigned int requested_headroom; }; struct veth_xdp_tx_bq { struct xdp_frame *q[16]; unsigned int count; }; struct veth_xdp_buff { struct xdp_buff xdp; struct sk_buff *skb; }; struct uio_mem; struct map_sysfs_entry { struct attribute attr; ssize_t (*show)(struct uio_mem *, char *); ssize_t (*store)(struct uio_mem *, const char *, size_t); }; struct uio_map; struct uio_mem { const char *name; phys_addr_t addr; unsigned long offs; resource_size_t size; int memtype; void *internal_addr; struct uio_map *map; }; struct uio_map { struct kobject kobj; struct uio_mem *mem; }; struct uio_port; struct portio_sysfs_entry { struct attribute attr; ssize_t (*show)(struct uio_port *, char *); ssize_t (*store)(struct uio_port *, const char *, size_t); }; struct uio_portio; struct uio_port { const char *name; unsigned long start; unsigned long size; int porttype; struct uio_portio *portio; }; struct uio_portio { struct kobject kobj; struct uio_port *port; }; struct uio_info; struct uio_device { struct module *owner; struct device dev; int minor; atomic_t event; struct fasync_struct *async_queue; wait_queue_head_t wait; struct uio_info *info; struct mutex info_lock; struct kobject *map_dir; struct kobject *portio_dir; u64 android_kabi_reserved1; }; struct uio_info { struct uio_device *uio_dev; const char *name; const char *version; struct uio_mem mem[5]; struct uio_port port[5]; long irq; unsigned long irq_flags; void *priv; irqreturn_t (*handler)(int, struct uio_info *); int (*mmap)(struct uio_info *, struct vm_area_struct *); int (*open)(struct uio_info *, struct inode *); int (*release)(struct uio_info *, struct inode *); int (*irqcontrol)(struct uio_info *, s32); u64 android_kabi_reserved1; }; struct uio_listener { struct uio_device *dev; s32 event_count; }; enum usb_otg_state { OTG_STATE_UNDEFINED = 0, OTG_STATE_B_IDLE = 1, OTG_STATE_B_SRP_INIT = 2, OTG_STATE_B_PERIPHERAL = 3, OTG_STATE_B_WAIT_ACON = 4, OTG_STATE_B_HOST = 5, OTG_STATE_A_IDLE = 6, OTG_STATE_A_WAIT_VRISE = 7, OTG_STATE_A_WAIT_BCON = 8, OTG_STATE_A_HOST = 9, OTG_STATE_A_SUSPEND = 10, OTG_STATE_A_PERIPHERAL = 11, OTG_STATE_A_WAIT_VFALL = 12, OTG_STATE_A_VBUS_ERR = 13, }; enum usb_device_speed { USB_SPEED_UNKNOWN = 0, USB_SPEED_LOW = 1, USB_SPEED_FULL = 2, USB_SPEED_HIGH = 3, USB_SPEED_WIRELESS = 4, USB_SPEED_SUPER = 5, USB_SPEED_SUPER_PLUS = 6, }; enum usb_ssp_rate { USB_SSP_GEN_UNKNOWN = 0, USB_SSP_GEN_2x1 = 1, USB_SSP_GEN_1x2 = 2, USB_SSP_GEN_2x2 = 3, }; enum usb_device_state { USB_STATE_NOTATTACHED = 0, USB_STATE_ATTACHED = 1, USB_STATE_POWERED = 2, USB_STATE_RECONNECTING = 3, USB_STATE_UNAUTHENTICATED = 4, USB_STATE_DEFAULT = 5, USB_STATE_ADDRESS = 6, USB_STATE_CONFIGURED = 7, USB_STATE_SUSPENDED = 8, }; enum usb_dr_mode { USB_DR_MODE_UNKNOWN = 0, USB_DR_MODE_HOST = 1, USB_DR_MODE_PERIPHERAL = 2, USB_DR_MODE_OTG = 3, }; struct usb_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bEndpointAddress; __u8 bmAttributes; __le16 wMaxPacketSize; __u8 bInterval; __u8 bRefresh; __u8 bSynchAddress; } __attribute__((packed)); struct usb_otg_caps { u16 otg_rev; bool hnp_support; bool srp_support; bool adp_support; }; enum usb_interface_condition { USB_INTERFACE_UNBOUND = 0, USB_INTERFACE_BINDING = 1, USB_INTERFACE_BOUND = 2, USB_INTERFACE_UNBINDING = 3, }; enum usb_wireless_status { USB_WIRELESS_STATUS_NA = 0, USB_WIRELESS_STATUS_DISCONNECTED = 1, USB_WIRELESS_STATUS_CONNECTED = 2, }; enum usb3_link_state { USB3_LPM_U0 = 0, USB3_LPM_U1 = 1, USB3_LPM_U2 = 2, USB3_LPM_U3 = 3, }; enum usb_phy_type { USB_PHY_TYPE_UNDEFINED = 0, USB_PHY_TYPE_USB2 = 1, USB_PHY_TYPE_USB3 = 2, }; enum usb_phy_events { USB_EVENT_NONE = 0, USB_EVENT_VBUS = 1, USB_EVENT_ID = 2, USB_EVENT_CHARGER = 3, USB_EVENT_ENUMERATED = 4, }; enum usb_charger_type { UNKNOWN_TYPE = 0, SDP_TYPE = 1, DCP_TYPE = 2, CDP_TYPE = 3, ACA_TYPE = 4, }; enum usb_charger_state { USB_CHARGER_DEFAULT = 0, USB_CHARGER_PRESENT = 1, USB_CHARGER_ABSENT = 2, }; enum usb_dev_authorize_policy { USB_DEVICE_AUTHORIZE_NONE = 0, USB_DEVICE_AUTHORIZE_ALL = 1, USB_DEVICE_AUTHORIZE_INTERNAL = 2, }; enum usb_port_connect_type { USB_PORT_CONNECT_TYPE_UNKNOWN = 0, USB_PORT_CONNECT_TYPE_HOT_PLUG = 1, USB_PORT_CONNECT_TYPE_HARD_WIRED = 2, USB_PORT_NOT_USED = 3, }; struct usb_host_interface; struct usb_interface_assoc_descriptor; struct usb_interface { struct usb_host_interface *altsetting; struct usb_host_interface *cur_altsetting; unsigned int num_altsetting; struct usb_interface_assoc_descriptor *intf_assoc; int minor; enum usb_interface_condition condition; unsigned int sysfs_files_created: 1; unsigned int ep_devs_created: 1; unsigned int unregistering: 1; unsigned int needs_remote_wakeup: 1; unsigned int needs_altsetting0: 1; unsigned int needs_binding: 1; unsigned int resetting_device: 1; unsigned int authorized: 1; enum usb_wireless_status wireless_status; struct work_struct wireless_status_work; struct device dev; struct device *usb_dev; struct work_struct reset_ws; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct usb_interface_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bInterfaceNumber; __u8 bAlternateSetting; __u8 bNumEndpoints; __u8 bInterfaceClass; __u8 bInterfaceSubClass; __u8 bInterfaceProtocol; __u8 iInterface; }; struct usb_host_endpoint; struct usb_host_interface { struct usb_interface_descriptor desc; int extralen; unsigned char *extra; struct usb_host_endpoint *endpoint; char *string; }; struct usb_ss_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bMaxBurst; __u8 bmAttributes; __le16 wBytesPerInterval; }; struct usb_ssp_isoc_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wReseved; __le32 dwBytesPerInterval; }; struct ep_device; struct usb_host_endpoint { struct usb_endpoint_descriptor desc; struct usb_ss_ep_comp_descriptor ss_ep_comp; struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; long: 0; struct list_head urb_list; void *hcpriv; struct ep_device *ep_dev; unsigned char *extra; int extralen; int enabled; int streams; long: 0; } __attribute__((packed)); struct usb_interface_assoc_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bFirstInterface; __u8 bInterfaceCount; __u8 bFunctionClass; __u8 bFunctionSubClass; __u8 bFunctionProtocol; __u8 iFunction; }; struct usb_descriptor_header { __u8 bLength; __u8 bDescriptorType; }; struct usb_device; struct each_dev_arg { void *data; int (*fn)(struct usb_device *, void *); }; struct usb_device_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdUSB; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bMaxPacketSize0; __le16 idVendor; __le16 idProduct; __le16 bcdDevice; __u8 iManufacturer; __u8 iProduct; __u8 iSerialNumber; __u8 bNumConfigurations; }; struct usb2_lpm_parameters { unsigned int besl; int timeout; }; struct usb3_lpm_parameters { unsigned int mel; unsigned int pel; unsigned int sel; int timeout; }; struct usb_tt; struct usb_bus; struct usb_host_bos; struct usb_host_config; struct usb_device { int devnum; char devpath[16]; u32 route; enum usb_device_state state; enum usb_device_speed speed; unsigned int rx_lanes; unsigned int tx_lanes; enum usb_ssp_rate ssp_rate; struct usb_tt *tt; int ttport; unsigned int toggle[2]; struct usb_device *parent; struct usb_bus *bus; struct usb_host_endpoint ep0; struct device dev; struct usb_device_descriptor descriptor; struct usb_host_bos *bos; struct usb_host_config *config; struct usb_host_config *actconfig; struct usb_host_endpoint *ep_in[16]; struct usb_host_endpoint *ep_out[16]; char **rawdescriptors; unsigned short bus_mA; u8 portnum; u8 level; u8 devaddr; unsigned int can_submit: 1; unsigned int persist_enabled: 1; unsigned int reset_in_progress: 1; unsigned int have_langid: 1; unsigned int authorized: 1; unsigned int authenticated: 1; unsigned int lpm_capable: 1; unsigned int lpm_devinit_allow: 1; unsigned int usb2_hw_lpm_capable: 1; unsigned int usb2_hw_lpm_besl_capable: 1; unsigned int usb2_hw_lpm_enabled: 1; unsigned int usb2_hw_lpm_allowed: 1; unsigned int usb3_lpm_u1_enabled: 1; unsigned int usb3_lpm_u2_enabled: 1; int string_langid; char *product; char *manufacturer; char *serial; struct list_head filelist; int maxchild; u32 quirks; atomic_t urbnum; unsigned long active_duration; unsigned long connect_time; unsigned int do_remote_wakeup: 1; unsigned int reset_resume: 1; unsigned int port_is_suspended: 1; int slot_id; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; unsigned int lpm_disable_count; u16 hub_delay; unsigned int use_generic_driver: 1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct usb_tt { struct usb_device *hub; int multi; unsigned int think_time; void *hcpriv; spinlock_t lock; struct list_head clear_list; struct work_struct clear_work; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct usb_devmap { unsigned long devicemap[2]; }; struct mon_bus; struct usb_bus { struct device *controller; struct device *sysdev; int busnum; const char *bus_name; u8 uses_pio_for_control; u8 otg_port; unsigned int is_b_host: 1; unsigned int b_hnp_enable: 1; unsigned int no_stop_on_short: 1; unsigned int no_sg_constraint: 1; unsigned int sg_tablesize; int devnum_next; struct mutex devnum_next_mutex; struct usb_devmap devmap; struct usb_device *root_hub; struct usb_bus *hs_companion; int bandwidth_allocated; int bandwidth_int_reqs; int bandwidth_isoc_reqs; unsigned int resuming_ports; struct mon_bus *mon_bus; int monitored; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct usb_bos_descriptor; struct usb_ext_cap_descriptor; struct usb_ss_cap_descriptor; struct usb_ssp_cap_descriptor; struct usb_ss_container_id_descriptor; struct usb_ptm_cap_descriptor; struct usb_host_bos { struct usb_bos_descriptor *desc; struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; struct usb_ptm_cap_descriptor *ptm_cap; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct usb_bos_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumDeviceCaps; } __attribute__((packed)); struct usb_ext_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __le32 bmAttributes; } __attribute__((packed)); struct usb_ss_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bmAttributes; __le16 wSpeedSupported; __u8 bFunctionalitySupport; __u8 bU1devExitLat; __le16 bU2DevExitLat; }; struct usb_ssp_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __le32 bmAttributes; __le16 wFunctionalitySupport; __le16 wReserved; union { __le32 legacy_padding; struct { struct {} __empty_bmSublinkSpeedAttr; __le32 bmSublinkSpeedAttr[0]; }; }; }; struct usb_ss_container_id_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __u8 ContainerID[16]; }; struct usb_ptm_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; }; struct usb_config_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumInterfaces; __u8 bConfigurationValue; __u8 iConfiguration; __u8 bmAttributes; __u8 bMaxPower; } __attribute__((packed)); struct usb_interface_cache; struct usb_host_config { struct usb_config_descriptor desc; char *string; struct usb_interface_assoc_descriptor *intf_assoc[16]; struct usb_interface *interface[32]; struct usb_interface_cache *intf_cache[32]; unsigned char *extra; int extralen; }; struct usb_interface_cache { unsigned int num_altsetting; struct kref ref; struct usb_host_interface altsetting[0]; }; struct giveback_urb_bh { bool running; bool high_prio; spinlock_t lock; struct list_head head; struct tasklet_struct bh; struct usb_host_endpoint *completing_ep; }; struct urb; struct hc_driver; struct usb_phy; struct usb_phy_roothub; struct usb_hcd { struct usb_bus self; struct kref kref; const char *product_desc; int speed; char irq_descr[24]; struct timer_list rh_timer; struct urb *status_urb; struct work_struct wakeup_work; struct work_struct died_work; const struct hc_driver *driver; struct usb_phy *usb_phy; struct usb_phy_roothub *phy_roothub; unsigned long flags; enum usb_dev_authorize_policy dev_policy; unsigned int rh_registered: 1; unsigned int rh_pollable: 1; unsigned int msix_enabled: 1; unsigned int msi_enabled: 1; unsigned int skip_phy_initialization: 1; unsigned int uses_new_polling: 1; unsigned int has_tt: 1; unsigned int amd_resume_bug: 1; unsigned int can_do_streams: 1; unsigned int tpl_support: 1; unsigned int cant_recv_wakeups: 1; unsigned int irq; void *regs; resource_size_t rsrc_start; resource_size_t rsrc_len; unsigned int power_budget; struct giveback_urb_bh high_prio_bh; struct giveback_urb_bh low_prio_bh; struct mutex *address0_mutex; struct mutex *bandwidth_mutex; struct usb_hcd *shared_hcd; struct usb_hcd *primary_hcd; struct dma_pool *pool[4]; int state; struct gen_pool *localmem_pool; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; unsigned long hcd_priv[0]; }; typedef void (*usb_complete_t)(struct urb *); struct usb_iso_packet_descriptor { unsigned int offset; unsigned int length; unsigned int actual_length; int status; }; struct usb_anchor; struct urb { struct kref kref; int unlinked; void *hcpriv; atomic_t use_count; atomic_t reject; struct list_head urb_list; struct list_head anchor_list; struct usb_anchor *anchor; struct usb_device *dev; struct usb_host_endpoint *ep; unsigned int pipe; unsigned int stream_id; int status; unsigned int transfer_flags; void *transfer_buffer; dma_addr_t transfer_dma; struct scatterlist *sg; int num_mapped_sgs; int num_sgs; u32 transfer_buffer_length; u32 actual_length; unsigned char *setup_packet; dma_addr_t setup_dma; int start_frame; int number_of_packets; int interval; int error_count; void *context; usb_complete_t complete; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; struct usb_iso_packet_descriptor iso_frame_desc[0]; }; struct usb_anchor { struct list_head urb_list; wait_queue_head_t wait; spinlock_t lock; atomic_t suspend_wakeups; unsigned int poisoned: 1; }; struct hc_driver { const char *description; const char *product_desc; size_t hcd_priv_size; irqreturn_t (*irq)(struct usb_hcd *); int flags; int (*reset)(struct usb_hcd *); int (*start)(struct usb_hcd *); int (*pci_suspend)(struct usb_hcd *, bool); int (*pci_resume)(struct usb_hcd *, pm_message_t); int (*pci_poweroff_late)(struct usb_hcd *, bool); void (*stop)(struct usb_hcd *); void (*shutdown)(struct usb_hcd *); int (*get_frame_number)(struct usb_hcd *); int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t); int (*urb_dequeue)(struct usb_hcd *, struct urb *, int); int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t); void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *); void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *); void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *); int (*hub_status_data)(struct usb_hcd *, char *); int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); int (*bus_suspend)(struct usb_hcd *); int (*bus_resume)(struct usb_hcd *); int (*start_port_reset)(struct usb_hcd *, unsigned int); unsigned long (*get_resuming_ports)(struct usb_hcd *); void (*relinquish_port)(struct usb_hcd *, int); int (*port_handed_over)(struct usb_hcd *, int); void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); int (*alloc_dev)(struct usb_hcd *, struct usb_device *); void (*free_dev)(struct usb_hcd *, struct usb_device *); int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t); int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t); int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); int (*address_device)(struct usb_hcd *, struct usb_device *, unsigned int); int (*enable_device)(struct usb_hcd *, struct usb_device *); int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); int (*reset_device)(struct usb_hcd *, struct usb_device *); int (*update_device)(struct usb_hcd *, struct usb_device *); int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); int (*find_raw_port_number)(struct usb_hcd *, int); int (*port_power)(struct usb_hcd *, int, bool); int (*submit_single_step_set_feature)(struct usb_hcd *, struct urb *, int); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct usb_charger_current { unsigned int sdp_min; unsigned int sdp_max; unsigned int dcp_min; unsigned int dcp_max; unsigned int cdp_min; unsigned int cdp_max; unsigned int aca_min; unsigned int aca_max; }; struct usb_otg; struct usb_phy_io_ops; struct extcon_dev; struct usb_phy { struct device *dev; const char *label; unsigned int flags; enum usb_phy_type type; enum usb_phy_events last_event; struct usb_otg *otg; struct device *io_dev; struct usb_phy_io_ops *io_ops; void *io_priv; struct extcon_dev *edev; struct extcon_dev *id_edev; struct notifier_block vbus_nb; struct notifier_block id_nb; struct notifier_block type_nb; enum usb_charger_type chg_type; enum usb_charger_state chg_state; struct usb_charger_current chg_cur; struct work_struct chg_work; struct atomic_notifier_head notifier; u16 port_status; u16 port_change; struct list_head head; int (*init)(struct usb_phy *); void (*shutdown)(struct usb_phy *); int (*set_vbus)(struct usb_phy *, int); int (*set_power)(struct usb_phy *, unsigned int); int (*set_suspend)(struct usb_phy *, int); int (*set_wakeup)(struct usb_phy *, bool); int (*notify_connect)(struct usb_phy *, enum usb_device_speed); int (*notify_disconnect)(struct usb_phy *, enum usb_device_speed); enum usb_charger_type (*charger_detect)(struct usb_phy *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct usb_gadget; struct usb_otg { u8 default_a; struct phy *phy; struct usb_phy *usb_phy; struct usb_bus *host; struct usb_gadget *gadget; enum usb_otg_state state; int (*set_host)(struct usb_otg *, struct usb_bus *); int (*set_peripheral)(struct usb_otg *, struct usb_gadget *); int (*set_vbus)(struct usb_otg *, bool); int (*start_srp)(struct usb_otg *); int (*start_hnp)(struct usb_otg *); u64 android_kabi_reserved1; }; struct usb_phy_io_ops { int (*read)(struct usb_phy *, u32); int (*write)(struct usb_phy *, u32, u32); }; struct usb_hub_status { __le16 wHubStatus; __le16 wHubChange; }; struct usb_port_status { __le16 wPortStatus; __le16 wPortChange; __le32 dwExtPortStatus; }; struct usb_hub_descriptor; struct usb_port; struct usb_hub { struct device *intfdev; struct usb_device *hdev; struct kref kref; struct urb *urb; u8 (*buffer)[8]; union { struct usb_hub_status hub; struct usb_port_status port; } *status; struct mutex status_mutex; int error; int nerrors; unsigned long event_bits[1]; unsigned long change_bits[1]; unsigned long removed_bits[1]; unsigned long wakeup_bits[1]; unsigned long power_bits[1]; unsigned long child_usage_bits[1]; unsigned long warm_reset_bits[1]; struct usb_hub_descriptor *descriptor; struct usb_tt tt; unsigned int mA_per_port; unsigned int wakeup_enabled_descendants; unsigned int limited_power: 1; unsigned int quiescing: 1; unsigned int disconnected: 1; unsigned int in_reset: 1; unsigned int quirk_disable_autosuspend: 1; unsigned int quirk_check_port_auto_suspend: 1; unsigned int has_indicators: 1; u8 indicator[31]; struct delayed_work leds; struct delayed_work init_work; struct work_struct events; spinlock_t irq_urb_lock; struct timer_list irq_urb_retry; struct usb_port **ports; struct list_head onboard_hub_devs; }; struct usb_hub_descriptor { __u8 bDescLength; __u8 bDescriptorType; __u8 bNbrPorts; __le16 wHubCharacteristics; __u8 bPwrOn2PwrGood; __u8 bHubContrCurrent; union { struct { __u8 DeviceRemovable[4]; __u8 PortPwrCtrlMask[4]; } hs; struct { __u8 bHubHdrDecLat; __le16 wHubDelay; __le16 DeviceRemovable; } __attribute__((packed)) ss; } u; } __attribute__((packed)); typedef u32 usb_port_location_t; struct usb_dev_state; struct usb_port { struct usb_device *child; struct device dev; struct usb_dev_state *port_owner; struct usb_port *peer; struct dev_pm_qos_request *req; enum usb_port_connect_type connect_type; enum usb_device_state state; struct kernfs_node *state_kn; usb_port_location_t location; struct mutex status_lock; u32 over_current_count; u8 portnum; u32 quirks; unsigned int early_stop: 1; unsigned int ignore_event: 1; unsigned int is_superspeed: 1; unsigned int usb3_lpm_u1_permit: 1; unsigned int usb3_lpm_u2_permit: 1; }; struct usbdrv_wrap { struct device_driver driver; int for_devices; }; struct usb_device_id; struct usb_device_driver { const char *name; bool (*match)(struct usb_device *); int (*probe)(struct usb_device *); void (*disconnect)(struct usb_device *); int (*suspend)(struct usb_device *, pm_message_t); int (*resume)(struct usb_device *, pm_message_t); const struct attribute_group **dev_groups; struct usbdrv_wrap drvwrap; const struct usb_device_id *id_table; unsigned int supports_autosuspend: 1; unsigned int generic_subclass: 1; }; struct usb_device_id { __u16 match_flags; __u16 idVendor; __u16 idProduct; __u16 bcdDevice_lo; __u16 bcdDevice_hi; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bInterfaceClass; __u8 bInterfaceSubClass; __u8 bInterfaceProtocol; __u8 bInterfaceNumber; kernel_ulong_t driver_info; }; struct usb_dynids { spinlock_t lock; struct list_head list; }; struct usb_driver { const char *name; int (*probe)(struct usb_interface *, const struct usb_device_id *); void (*disconnect)(struct usb_interface *); int (*unlocked_ioctl)(struct usb_interface *, unsigned int, void *); int (*suspend)(struct usb_interface *, pm_message_t); int (*resume)(struct usb_interface *); int (*reset_resume)(struct usb_interface *); int (*pre_reset)(struct usb_interface *); int (*post_reset)(struct usb_interface *); const struct usb_device_id *id_table; const struct attribute_group **dev_groups; struct usb_dynids dynids; struct usbdrv_wrap drvwrap; unsigned int no_dynamic_id: 1; unsigned int supports_autosuspend: 1; unsigned int disable_hub_initiated_lpm: 1; unsigned int soft_unbind: 1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct find_interface_arg { int minor; struct device_driver *drv; }; enum hub_led_mode { INDICATOR_AUTO = 0, INDICATOR_CYCLE = 1, INDICATOR_GREEN_BLINK = 2, INDICATOR_GREEN_BLINK_OFF = 3, INDICATOR_AMBER_BLINK = 4, INDICATOR_AMBER_BLINK_OFF = 5, INDICATOR_ALT_BLINK = 6, INDICATOR_ALT_BLINK_OFF = 7, } __attribute__((mode(byte))); enum hub_quiescing_type { HUB_DISCONNECT = 0, HUB_PRE_RESET = 1, HUB_SUSPEND = 2, }; enum hub_activation_type { HUB_INIT = 0, HUB_INIT2 = 1, HUB_INIT3 = 2, HUB_POST_RESET = 3, HUB_RESUME = 4, HUB_RESET_RESUME = 5, }; struct usb_tt_clear { struct list_head clear_list; unsigned int tt; u16 devinfo; struct usb_hcd *hcd; struct usb_host_endpoint *ep; }; struct usb_otg_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bmAttributes; }; struct usb_qualifier_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdUSB; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bMaxPacketSize0; __u8 bNumConfigurations; __u8 bRESERVED; }; struct usbdevfs_hub_portinfo { char nports; char port[127]; }; struct usb_set_sel_req { __u8 u1_sel; __u8 u1_pel; __le16 u2_sel; __le16 u2_pel; }; struct usb_mon_operations { void (*urb_submit)(struct usb_bus *, struct urb *); void (*urb_submit_error)(struct usb_bus *, struct urb *, int); void (*urb_complete)(struct usb_bus *, struct urb *, int); }; enum usb_led_event { USB_LED_EVENT_HOST = 0, USB_LED_EVENT_GADGET = 1, }; struct usb_ctrlrequest { __u8 bRequestType; __u8 bRequest; __le16 wValue; __le16 wIndex; __le16 wLength; }; struct usb_cdc_union_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bMasterInterface0; __u8 bSlaveInterface0; }; struct usb_cdc_country_functional_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 iCountryCodeRelDate; __le16 wCountyCode0; }; struct usb_cdc_header_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __le16 bcdCDC; } __attribute__((packed)); struct usb_cdc_acm_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bmCapabilities; }; struct usb_cdc_ether_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 iMACAddress; __le32 bmEthernetStatistics; __le16 wMaxSegmentSize; __le16 wNumberMCFilters; __u8 bNumberPowerFilters; } __attribute__((packed)); struct usb_cdc_call_mgmt_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bmCapabilities; __u8 bDataInterface; }; struct usb_cdc_dmm_desc { __u8 bFunctionLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u16 bcdVersion; __le16 wMaxCommand; } __attribute__((packed)); struct usb_cdc_mdlm_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __le16 bcdVersion; __u8 bGUID[16]; } __attribute__((packed)); struct usb_cdc_mdlm_detail_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bGuidDescriptorType; __u8 bDetailData[0]; }; struct usb_cdc_ncm_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __le16 bcdNcmVersion; __u8 bmNetworkCapabilities; } __attribute__((packed)); struct usb_cdc_mbim_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __le16 bcdMBIMVersion; __le16 wMaxControlMessage; __u8 bNumberFilters; __u8 bMaxFilterSize; __le16 wMaxSegmentSize; __u8 bmNetworkCapabilities; } __attribute__((packed)); struct usb_cdc_mbim_extended_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __le16 bcdMBIMExtendedVersion; __u8 bMaxOutstandingCommandMessages; __le16 wMTU; } __attribute__((packed)); struct set_config_request { struct usb_device *udev; int config; struct work_struct work; struct list_head node; }; struct usb_sg_request { int status; size_t bytes; spinlock_t lock; struct usb_device *dev; int pipe; int entries; struct urb **urbs; int count; struct completion complete; }; struct api_context { struct completion done; int status; }; struct usb_cdc_network_terminal_desc; struct usb_cdc_obex_desc; struct usb_cdc_parsed_header { struct usb_cdc_union_desc *usb_cdc_union_desc; struct usb_cdc_header_desc *usb_cdc_header_desc; struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; struct usb_cdc_ether_desc *usb_cdc_ether_desc; struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; struct usb_cdc_obex_desc *usb_cdc_obex_desc; struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; bool phonet_magic_present; }; struct usb_cdc_network_terminal_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bEntityId; __u8 iName; __u8 bChannelIndex; __u8 bPhysicalInterface; }; struct usb_cdc_obex_desc { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __le16 bcdVersion; } __attribute__((packed)); struct usb_dynid { struct list_head node; struct usb_device_id id; }; struct usb_dev_cap_header { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; }; struct usb_class_driver { char *name; char * (*devnode)(const struct device *, umode_t *); const struct file_operations *fops; int minor_base; }; struct ep_device { struct usb_endpoint_descriptor *desc; struct usb_device *udev; struct device dev; }; enum snoop_when { SUBMIT = 0, COMPLETE___2 = 1, }; struct usb_dev_state { struct list_head list; struct usb_device *dev; struct file *file; spinlock_t lock; struct list_head async_pending; struct list_head async_completed; struct list_head memory_list; wait_queue_head_t wait; wait_queue_head_t wait_for_resume; unsigned int discsignr; struct pid *disc_pid; const struct cred *cred; sigval_t disccontext; unsigned long ifclaimed; u32 disabled_bulk_eps; unsigned long interface_allowed_mask; int not_yet_resumed; bool suspend_allowed; bool privileges_dropped; }; struct usb_memory; struct async { struct list_head asynclist; struct usb_dev_state *ps; struct pid *pid; const struct cred *cred; unsigned int signr; unsigned int ifnum; void __attribute__((btf_type_tag("user"))) *userbuffer; void __attribute__((btf_type_tag("user"))) *userurb; sigval_t userurb_sigval; struct urb *urb; struct usb_memory *usbm; unsigned int mem_usage; int status; u8 bulk_addr; u8 bulk_status; }; struct usb_memory { struct list_head memlist; int vma_use_count; int urb_use_count; u32 size; void *mem; dma_addr_t dma_handle; unsigned long vm_start; struct usb_dev_state *ps; }; struct usbdevfs_iso_packet_desc { unsigned int length; unsigned int actual_length; unsigned int status; }; struct usbdevfs_urb { unsigned char type; unsigned char endpoint; int status; unsigned int flags; void __attribute__((btf_type_tag("user"))) *buffer; int buffer_length; int actual_length; int start_frame; union { int number_of_packets; unsigned int stream_id; }; int error_count; unsigned int signr; void __attribute__((btf_type_tag("user"))) *usercontext; struct usbdevfs_iso_packet_desc iso_frame_desc[0]; }; struct usbdevfs_urb32 { unsigned char type; unsigned char endpoint; compat_int_t status; compat_uint_t flags; compat_caddr_t buffer; compat_int_t buffer_length; compat_int_t actual_length; compat_int_t start_frame; compat_int_t number_of_packets; compat_int_t error_count; compat_uint_t signr; compat_caddr_t usercontext; struct usbdevfs_iso_packet_desc iso_frame_desc[0]; }; struct usbdevfs_setinterface { unsigned int interface; unsigned int altsetting; }; struct usbdevfs_bulktransfer32 { compat_uint_t ep; compat_uint_t len; compat_uint_t timeout; compat_caddr_t data; }; struct usbdevfs_bulktransfer { unsigned int ep; unsigned int len; unsigned int timeout; void __attribute__((btf_type_tag("user"))) *data; }; struct usbdevfs_getdriver { unsigned int interface; char driver[256]; }; struct usbdevfs_disconnect_claim { unsigned int interface; unsigned int flags; char driver[256]; }; struct usbdevfs_disconnectsignal32 { compat_int_t signr; compat_caddr_t context; }; struct usbdevfs_ctrltransfer32 { u8 bRequestType; u8 bRequest; u16 wValue; u16 wIndex; u16 wLength; u32 timeout; compat_caddr_t data; }; struct usbdevfs_ctrltransfer { __u8 bRequestType; __u8 bRequest; __u16 wValue; __u16 wIndex; __u16 wLength; __u32 timeout; void __attribute__((btf_type_tag("user"))) *data; }; struct usbdevfs_disconnectsignal { unsigned int signr; void __attribute__((btf_type_tag("user"))) *context; }; struct usbdevfs_ioctl32 { s32 ifno; s32 ioctl_code; compat_caddr_t data; }; struct usbdevfs_ioctl { int ifno; int ioctl_code; void __attribute__((btf_type_tag("user"))) *data; }; struct usbdevfs_connectinfo { unsigned int devnum; unsigned char slow; }; struct usbdevfs_conninfo_ex { __u32 size; __u32 busnum; __u32 devnum; __u32 speed; __u8 num_ports; __u8 ports[7]; }; struct usbdevfs_streams { unsigned int num_streams; unsigned int num_eps; unsigned char eps[0]; }; struct quirk_entry { u16 vid; u16 pid; u32 flags; }; struct class_info { int class; char *class_name; }; struct usb_phy_roothub { struct phy *phy; struct list_head list; }; typedef void (*companion_fn)(struct pci_dev *, struct usb_hcd *, struct pci_dev *, struct usb_hcd *); struct phy_devm { struct usb_phy *phy; struct notifier_block *nb; }; enum usb_phy_interface { USBPHY_INTERFACE_MODE_UNKNOWN = 0, USBPHY_INTERFACE_MODE_UTMI = 1, USBPHY_INTERFACE_MODE_UTMIW = 2, USBPHY_INTERFACE_MODE_ULPI = 3, USBPHY_INTERFACE_MODE_SERIAL = 4, USBPHY_INTERFACE_MODE_HSIC = 5, }; struct usb_ep; struct usb_request { void *buf; unsigned int length; dma_addr_t dma; struct scatterlist *sg; unsigned int num_sgs; unsigned int num_mapped_sgs; unsigned int stream_id: 16; unsigned int is_last: 1; unsigned int no_interrupt: 1; unsigned int zero: 1; unsigned int short_not_ok: 1; unsigned int dma_mapped: 1; void (*complete)(struct usb_ep *, struct usb_request *); void *context; struct list_head list; unsigned int frame_number; int status; unsigned int actual; u64 android_kabi_reserved1; }; struct dwc3_ep; struct dwc3_trb; struct dwc3_request { struct usb_request request; struct list_head list; struct dwc3_ep *dep; struct scatterlist *sg; struct scatterlist *start_sg; unsigned int num_pending_sgs; unsigned int num_queued_sgs; unsigned int remaining; unsigned int status; u8 epnum; struct dwc3_trb *trb; dma_addr_t trb_dma; unsigned int num_trbs; unsigned int needs_extra_trb: 1; unsigned int direction: 1; unsigned int mapped: 1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct ulpi; enum dwc3_ep0_next { DWC3_EP0_UNKNOWN = 0, DWC3_EP0_COMPLETE = 1, DWC3_EP0_NRDY_DATA = 2, DWC3_EP0_NRDY_STATUS = 3, }; enum dwc3_ep0_state { EP0_UNCONNECTED = 0, EP0_SETUP_PHASE = 1, EP0_DATA_PHASE = 2, EP0_STATUS_PHASE = 3, }; enum dwc3_link_state { DWC3_LINK_STATE_U0 = 0, DWC3_LINK_STATE_U1 = 1, DWC3_LINK_STATE_U2 = 2, DWC3_LINK_STATE_U3 = 3, DWC3_LINK_STATE_SS_DIS = 4, DWC3_LINK_STATE_RX_DET = 5, DWC3_LINK_STATE_SS_INACT = 6, DWC3_LINK_STATE_POLL = 7, DWC3_LINK_STATE_RECOV = 8, DWC3_LINK_STATE_HRESET = 9, DWC3_LINK_STATE_CMPLY = 10, DWC3_LINK_STATE_LPBK = 11, DWC3_LINK_STATE_RESET = 14, DWC3_LINK_STATE_RESUME = 15, DWC3_LINK_STATE_MASK = 15, }; struct dwc3_hwparams { u32 hwparams0; u32 hwparams1; u32 hwparams2; u32 hwparams3; u32 hwparams4; u32 hwparams5; u32 hwparams6; u32 hwparams7; u32 hwparams8; u32 hwparams9; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct dwc3_event_buffer; struct usb_gadget_driver; struct usb_role_switch; struct power_supply; struct dwc3 { struct work_struct drd_work; struct dwc3_trb *ep0_trb; void *bounce; u8 *setup_buf; dma_addr_t ep0_trb_addr; dma_addr_t bounce_addr; struct dwc3_request ep0_usb_req; struct completion ep0_in_setup; spinlock_t lock; struct mutex mutex; struct device *dev; struct device *sysdev; struct platform_device *xhci; struct resource xhci_resources[2]; struct dwc3_event_buffer *ev_buf; struct dwc3_ep *eps[32]; struct usb_gadget *gadget; struct usb_gadget_driver *gadget_driver; struct clk *bus_clk; struct clk *ref_clk; struct clk *susp_clk; struct reset_control *reset; struct usb_phy *usb2_phy; struct usb_phy *usb3_phy; struct phy *usb2_generic_phy; struct phy *usb3_generic_phy; bool phys_ready; struct ulpi *ulpi; bool ulpi_ready; void *regs; size_t regs_size; enum usb_dr_mode dr_mode; u32 current_dr_role; u32 desired_dr_role; struct extcon_dev *edev; struct notifier_block edev_nb; enum usb_phy_interface hsphy_mode; struct usb_role_switch *role_sw; enum usb_dr_mode role_switch_default_mode; struct power_supply *usb_psy; u32 fladj; u32 ref_clk_per; u32 irq_gadget; u32 otg_irq; u32 current_otg_role; u32 desired_otg_role; bool otg_restart_host; u32 u1u2; u32 maximum_speed; u32 gadget_max_speed; enum usb_ssp_rate max_ssp_rate; enum usb_ssp_rate gadget_ssp_rate; u32 ip; u32 revision; u32 version_type; enum dwc3_ep0_next ep0_next_event; enum dwc3_ep0_state ep0state; enum dwc3_link_state link_state; u16 u2sel; u16 u2pel; u8 u1sel; u8 u1pel; u8 speed; u8 num_eps; struct dwc3_hwparams hwparams; struct debugfs_regset32 *regset; u32 dbg_lsp_select; u8 test_mode; u8 test_mode_nr; u8 lpm_nyet_threshold; u8 hird_threshold; u8 rx_thr_num_pkt; u8 rx_max_burst; u8 tx_thr_num_pkt; u8 tx_max_burst; u8 rx_thr_num_pkt_prd; u8 rx_max_burst_prd; u8 tx_thr_num_pkt_prd; u8 tx_max_burst_prd; u8 tx_fifo_resize_max_num; u8 clear_stall_protocol; u16 num_hc_interrupters; const char *hsphy_interface; unsigned int connected: 1; unsigned int softconnect: 1; unsigned int delayed_status: 1; unsigned int ep0_bounced: 1; unsigned int ep0_expect_in: 1; unsigned int sysdev_is_parent: 1; unsigned int has_lpm_erratum: 1; unsigned int is_utmi_l1_suspend: 1; unsigned int is_fpga: 1; unsigned int pending_events: 1; unsigned int do_fifo_resize: 1; unsigned int pullups_connected: 1; unsigned int setup_packet_pending: 1; unsigned int three_stage_setup: 1; unsigned int dis_start_transfer_quirk: 1; unsigned int usb3_lpm_capable: 1; unsigned int usb2_lpm_disable: 1; unsigned int usb2_gadget_lpm_disable: 1; unsigned int disable_scramble_quirk: 1; unsigned int u2exit_lfps_quirk: 1; unsigned int u2ss_inp3_quirk: 1; unsigned int req_p1p2p3_quirk: 1; unsigned int del_p1p2p3_quirk: 1; unsigned int del_phy_power_chg_quirk: 1; unsigned int lfps_filter_quirk: 1; unsigned int rx_detect_poll_quirk: 1; unsigned int dis_u3_susphy_quirk: 1; unsigned int dis_u2_susphy_quirk: 1; unsigned int dis_enblslpm_quirk: 1; unsigned int dis_u1_entry_quirk: 1; unsigned int dis_u2_entry_quirk: 1; unsigned int dis_rxdet_inp3_quirk: 1; unsigned int dis_u2_freeclk_exists_quirk: 1; unsigned int dis_del_phy_power_chg_quirk: 1; unsigned int dis_tx_ipgap_linecheck_quirk: 1; unsigned int resume_hs_terminations: 1; unsigned int ulpi_ext_vbus_drv: 1; unsigned int parkmode_disable_ss_quirk: 1; unsigned int parkmode_disable_hs_quirk: 1; unsigned int gfladj_refclk_lpm_sel: 1; unsigned int tx_de_emphasis_quirk: 1; unsigned int tx_de_emphasis: 2; unsigned int dis_metastability_quirk: 1; unsigned int dis_split_quirk: 1; unsigned int async_callbacks: 1; unsigned int sys_wakeup: 1; unsigned int wakeup_configured: 1; unsigned int suspended: 1; u16 imod_interval; int max_cfg_eps; int last_fifo_depth; int num_ep_resized; struct dentry *debug_root; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct dwc3_trb { u32 bpl; u32 bph; u32 size; u32 ctrl; }; struct usb_ep_caps { unsigned int type_control: 1; unsigned int type_iso: 1; unsigned int type_bulk: 1; unsigned int type_int: 1; unsigned int dir_in: 1; unsigned int dir_out: 1; }; struct usb_ep_ops; struct usb_ep { void *driver_data; const char *name; const struct usb_ep_ops *ops; struct list_head ep_list; struct usb_ep_caps caps; bool claimed; bool enabled; unsigned int maxpacket: 16; unsigned int maxpacket_limit: 16; unsigned int max_streams: 16; unsigned int mult: 2; unsigned int maxburst: 5; u8 address; const struct usb_endpoint_descriptor *desc; const struct usb_ss_ep_comp_descriptor *comp_desc; u64 android_kabi_reserved1; }; struct usb_ep_ops { int (*enable)(struct usb_ep *, const struct usb_endpoint_descriptor *); int (*disable)(struct usb_ep *); void (*dispose)(struct usb_ep *); struct usb_request * (*alloc_request)(struct usb_ep *, gfp_t); void (*free_request)(struct usb_ep *, struct usb_request *); int (*queue)(struct usb_ep *, struct usb_request *, gfp_t); int (*dequeue)(struct usb_ep *, struct usb_request *); int (*set_halt)(struct usb_ep *, int); int (*set_wedge)(struct usb_ep *); int (*fifo_status)(struct usb_ep *); void (*fifo_flush)(struct usb_ep *); u64 android_kabi_reserved1; }; struct dwc3_ep { struct usb_ep endpoint; struct list_head cancelled_list; struct list_head pending_list; struct list_head started_list; void *regs; struct dwc3_trb *trb_pool; dma_addr_t trb_pool_dma; struct dwc3 *dwc; u32 saved_state; unsigned int flags; u8 trb_enqueue; u8 trb_dequeue; u8 number; u8 type; u8 resource_index; u32 frame_number; u32 interval; char name[20]; unsigned int direction: 1; unsigned int stream_capable: 1; u8 combo_num; int start_cmd_status; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct dwc3_event_buffer { void *buf; void *cache; unsigned int length; unsigned int lpos; unsigned int count; unsigned int flags; dma_addr_t dma; struct dwc3 *dwc; u64 android_kabi_reserved1; }; struct usb_udc; struct usb_gadget_ops; struct usb_gadget { struct work_struct work; struct usb_udc *udc; const struct usb_gadget_ops *ops; struct usb_ep *ep0; struct list_head ep_list; enum usb_device_speed speed; enum usb_device_speed max_speed; enum usb_ssp_rate ssp_rate; enum usb_ssp_rate max_ssp_rate; enum usb_device_state state; const char *name; struct device dev; unsigned int isoch_delay; unsigned int out_epnum; unsigned int in_epnum; unsigned int mA; struct usb_otg_caps *otg_caps; unsigned int sg_supported: 1; unsigned int is_otg: 1; unsigned int is_a_peripheral: 1; unsigned int b_hnp_enable: 1; unsigned int a_hnp_support: 1; unsigned int a_alt_hnp_support: 1; unsigned int hnp_polling_support: 1; unsigned int host_request_flag: 1; unsigned int quirk_ep_out_aligned_size: 1; unsigned int quirk_altset_not_supp: 1; unsigned int quirk_stall_not_supp: 1; unsigned int quirk_zlp_not_supp: 1; unsigned int quirk_avoids_skb_reserve: 1; unsigned int is_selfpowered: 1; unsigned int deactivated: 1; unsigned int connected: 1; unsigned int lpm_capable: 1; unsigned int wakeup_capable: 1; unsigned int wakeup_armed: 1; int irq; int id_number; }; struct usb_dcd_config_params; struct usb_gadget_ops { int (*get_frame)(struct usb_gadget *); int (*wakeup)(struct usb_gadget *); int (*func_wakeup)(struct usb_gadget *, int); int (*set_remote_wakeup)(struct usb_gadget *, int); int (*set_selfpowered)(struct usb_gadget *, int); int (*vbus_session)(struct usb_gadget *, int); int (*vbus_draw)(struct usb_gadget *, unsigned int); int (*pullup)(struct usb_gadget *, int); int (*ioctl)(struct usb_gadget *, unsigned int, unsigned long); void (*get_config_params)(struct usb_gadget *, struct usb_dcd_config_params *); int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *); int (*udc_stop)(struct usb_gadget *); void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed); void (*udc_set_ssp_rate)(struct usb_gadget *, enum usb_ssp_rate); void (*udc_async_callbacks)(struct usb_gadget *, bool); struct usb_ep * (*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); int (*check_config)(struct usb_gadget *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct usb_dcd_config_params { __u8 bU1devExitLat; __le16 bU2DevExitLat; __u8 besl_baseline; __u8 besl_deep; }; struct usb_gadget_driver { char *function; enum usb_device_speed max_speed; int (*bind)(struct usb_gadget *, struct usb_gadget_driver *); void (*unbind)(struct usb_gadget *); int (*setup)(struct usb_gadget *, const struct usb_ctrlrequest *); void (*disconnect)(struct usb_gadget *); void (*suspend)(struct usb_gadget *); void (*resume)(struct usb_gadget *); void (*reset)(struct usb_gadget *); struct device_driver driver; char *udc_name; unsigned int match_existing_only: 1; bool is_bound: 1; }; struct power_supply_desc; struct power_supply_battery_info; struct thermal_zone_device; struct power_supply { const struct power_supply_desc *desc; char **supplied_to; size_t num_supplicants; char **supplied_from; size_t num_supplies; struct device_node *of_node; void *drv_data; struct device dev; struct work_struct changed_work; struct delayed_work deferred_register_work; spinlock_t changed_lock; bool changed; bool initialized; bool removing; atomic_t use_cnt; struct power_supply_battery_info *battery_info; struct thermal_zone_device *tzd; struct thermal_cooling_device *tcd; struct led_trigger *charging_full_trig; char *charging_full_trig_name; struct led_trigger *charging_trig; char *charging_trig_name; struct led_trigger *full_trig; char *full_trig_name; struct led_trigger *online_trig; char *online_trig_name; struct led_trigger *charging_blink_full_solid_trig; char *charging_blink_full_solid_trig_name; u64 android_kabi_reserved1; }; enum power_supply_type { POWER_SUPPLY_TYPE_UNKNOWN = 0, POWER_SUPPLY_TYPE_BATTERY = 1, POWER_SUPPLY_TYPE_UPS = 2, POWER_SUPPLY_TYPE_MAINS = 3, POWER_SUPPLY_TYPE_USB = 4, POWER_SUPPLY_TYPE_USB_DCP = 5, POWER_SUPPLY_TYPE_USB_CDP = 6, POWER_SUPPLY_TYPE_USB_ACA = 7, POWER_SUPPLY_TYPE_USB_TYPE_C = 8, POWER_SUPPLY_TYPE_USB_PD = 9, POWER_SUPPLY_TYPE_USB_PD_DRP = 10, POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11, POWER_SUPPLY_TYPE_WIRELESS = 12, }; enum power_supply_usb_type { POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, POWER_SUPPLY_USB_TYPE_SDP = 1, POWER_SUPPLY_USB_TYPE_DCP = 2, POWER_SUPPLY_USB_TYPE_CDP = 3, POWER_SUPPLY_USB_TYPE_ACA = 4, POWER_SUPPLY_USB_TYPE_C = 5, POWER_SUPPLY_USB_TYPE_PD = 6, POWER_SUPPLY_USB_TYPE_PD_DRP = 7, POWER_SUPPLY_USB_TYPE_PD_PPS = 8, POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9, }; enum power_supply_property { POWER_SUPPLY_PROP_STATUS = 0, POWER_SUPPLY_PROP_CHARGE_TYPE = 1, POWER_SUPPLY_PROP_HEALTH = 2, POWER_SUPPLY_PROP_PRESENT = 3, POWER_SUPPLY_PROP_ONLINE = 4, POWER_SUPPLY_PROP_AUTHENTIC = 5, POWER_SUPPLY_PROP_TECHNOLOGY = 6, POWER_SUPPLY_PROP_CYCLE_COUNT = 7, POWER_SUPPLY_PROP_VOLTAGE_MAX = 8, POWER_SUPPLY_PROP_VOLTAGE_MIN = 9, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11, POWER_SUPPLY_PROP_VOLTAGE_NOW = 12, POWER_SUPPLY_PROP_VOLTAGE_AVG = 13, POWER_SUPPLY_PROP_VOLTAGE_OCV = 14, POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15, POWER_SUPPLY_PROP_CURRENT_MAX = 16, POWER_SUPPLY_PROP_CURRENT_NOW = 17, POWER_SUPPLY_PROP_CURRENT_AVG = 18, POWER_SUPPLY_PROP_CURRENT_BOOT = 19, POWER_SUPPLY_PROP_POWER_NOW = 20, POWER_SUPPLY_PROP_POWER_AVG = 21, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22, POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23, POWER_SUPPLY_PROP_CHARGE_FULL = 24, POWER_SUPPLY_PROP_CHARGE_EMPTY = 25, POWER_SUPPLY_PROP_CHARGE_NOW = 26, POWER_SUPPLY_PROP_CHARGE_AVG = 27, POWER_SUPPLY_PROP_CHARGE_COUNTER = 28, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34, POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD = 35, POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD = 36, POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR = 37, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 38, POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT = 39, POWER_SUPPLY_PROP_INPUT_POWER_LIMIT = 40, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 41, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 42, POWER_SUPPLY_PROP_ENERGY_FULL = 43, POWER_SUPPLY_PROP_ENERGY_EMPTY = 44, POWER_SUPPLY_PROP_ENERGY_NOW = 45, POWER_SUPPLY_PROP_ENERGY_AVG = 46, POWER_SUPPLY_PROP_CAPACITY = 47, POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 48, POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 49, POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN = 50, POWER_SUPPLY_PROP_CAPACITY_LEVEL = 51, POWER_SUPPLY_PROP_TEMP = 52, POWER_SUPPLY_PROP_TEMP_MAX = 53, POWER_SUPPLY_PROP_TEMP_MIN = 54, POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 55, POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 56, POWER_SUPPLY_PROP_TEMP_AMBIENT = 57, POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 58, POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 59, POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 60, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 61, POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 62, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 63, POWER_SUPPLY_PROP_TYPE = 64, POWER_SUPPLY_PROP_USB_TYPE = 65, POWER_SUPPLY_PROP_SCOPE = 66, POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 67, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 68, POWER_SUPPLY_PROP_CALIBRATE = 69, POWER_SUPPLY_PROP_MANUFACTURE_YEAR = 70, POWER_SUPPLY_PROP_MANUFACTURE_MONTH = 71, POWER_SUPPLY_PROP_MANUFACTURE_DAY = 72, POWER_SUPPLY_PROP_MODEL_NAME = 73, POWER_SUPPLY_PROP_MANUFACTURER = 74, POWER_SUPPLY_PROP_SERIAL_NUMBER = 75, }; union power_supply_propval; struct power_supply_desc { const char *name; enum power_supply_type type; const enum power_supply_usb_type *usb_types; size_t num_usb_types; const enum power_supply_property *properties; size_t num_properties; int (*get_property)(struct power_supply *, enum power_supply_property, union power_supply_propval *); int (*set_property)(struct power_supply *, enum power_supply_property, const union power_supply_propval *); int (*property_is_writeable)(struct power_supply *, enum power_supply_property); void (*external_power_changed)(struct power_supply *); void (*set_charged)(struct power_supply *); bool no_thermal; int use_for_apm; u64 android_kabi_reserved1; }; union power_supply_propval { int intval; const char *strval; }; struct power_supply_maintenance_charge_table; struct power_supply_battery_ocv_table; struct power_supply_resistance_temp_table; struct power_supply_vbat_ri_table; struct power_supply_battery_info { unsigned int technology; int energy_full_design_uwh; int charge_full_design_uah; int voltage_min_design_uv; int voltage_max_design_uv; int tricklecharge_current_ua; int precharge_current_ua; int precharge_voltage_max_uv; int charge_term_current_ua; int charge_restart_voltage_uv; int overvoltage_limit_uv; int constant_charge_current_max_ua; int constant_charge_voltage_max_uv; struct power_supply_maintenance_charge_table *maintenance_charge; int maintenance_charge_size; int alert_low_temp_charge_current_ua; int alert_low_temp_charge_voltage_uv; int alert_high_temp_charge_current_ua; int alert_high_temp_charge_voltage_uv; int factory_internal_resistance_uohm; int factory_internal_resistance_charging_uohm; int ocv_temp[20]; int temp_ambient_alert_min; int temp_ambient_alert_max; int temp_alert_min; int temp_alert_max; int temp_min; int temp_max; struct power_supply_battery_ocv_table *ocv_table[20]; int ocv_table_size[20]; struct power_supply_resistance_temp_table *resist_table; int resist_table_size; struct power_supply_vbat_ri_table *vbat2ri_discharging; int vbat2ri_discharging_size; struct power_supply_vbat_ri_table *vbat2ri_charging; int vbat2ri_charging_size; int bti_resistance_ohm; int bti_resistance_tolerance; u64 android_kabi_reserved1; }; struct power_supply_maintenance_charge_table { int charge_current_max_ua; int charge_voltage_max_uv; int charge_safety_timer_minutes; }; struct power_supply_battery_ocv_table { int ocv; int capacity; }; struct power_supply_resistance_temp_table { int temp; int resistance; }; struct power_supply_vbat_ri_table { int vbat_uv; int ri_uohm; }; typedef void (*btf_trace_dwc3_readl)(void *, void *, u32, u32); typedef void (*btf_trace_dwc3_writel)(void *, void *, u32, u32); typedef void (*btf_trace_dwc3_event)(void *, u32, struct dwc3 *); typedef void (*btf_trace_dwc3_ctrl_req)(void *, struct usb_ctrlrequest *); typedef void (*btf_trace_dwc3_alloc_request)(void *, struct dwc3_request *); typedef void (*btf_trace_dwc3_free_request)(void *, struct dwc3_request *); typedef void (*btf_trace_dwc3_ep_queue)(void *, struct dwc3_request *); typedef void (*btf_trace_dwc3_ep_dequeue)(void *, struct dwc3_request *); typedef void (*btf_trace_dwc3_gadget_giveback)(void *, struct dwc3_request *); typedef void (*btf_trace_dwc3_gadget_generic_cmd)(void *, unsigned int, u32, int); struct dwc3_gadget_ep_cmd_params; typedef void (*btf_trace_dwc3_gadget_ep_cmd)(void *, struct dwc3_ep *, unsigned int, struct dwc3_gadget_ep_cmd_params *, int); struct dwc3_gadget_ep_cmd_params { u32 param2; u32 param1; u32 param0; }; typedef void (*btf_trace_dwc3_prepare_trb)(void *, struct dwc3_ep *, struct dwc3_trb *); typedef void (*btf_trace_dwc3_complete_trb)(void *, struct dwc3_ep *, struct dwc3_trb *); typedef void (*btf_trace_dwc3_gadget_ep_enable)(void *, struct dwc3_ep *); typedef void (*btf_trace_dwc3_gadget_ep_disable)(void *, struct dwc3_ep *); struct trace_event_raw_dwc3_log_io { struct trace_entry ent; void *base; u32 offset; u32 value; char __data[0]; }; struct trace_event_raw_dwc3_log_event { struct trace_entry ent; u32 event; u32 ep0state; char __data[0]; }; struct trace_event_raw_dwc3_log_ctrl { struct trace_entry ent; __u8 bRequestType; __u8 bRequest; __u16 wValue; __u16 wIndex; __u16 wLength; char __data[0]; }; struct trace_event_raw_dwc3_log_request { struct trace_entry ent; u32 __data_loc_name; struct dwc3_request *req; unsigned int actual; unsigned int length; int status; int zero; int short_not_ok; int no_interrupt; char __data[0]; }; struct trace_event_raw_dwc3_log_generic_cmd { struct trace_entry ent; unsigned int cmd; u32 param; int status; char __data[0]; }; struct trace_event_raw_dwc3_log_gadget_ep_cmd { struct trace_entry ent; u32 __data_loc_name; unsigned int cmd; u32 param0; u32 param1; u32 param2; int cmd_status; char __data[0]; }; struct trace_event_raw_dwc3_log_trb { struct trace_entry ent; u32 __data_loc_name; struct dwc3_trb *trb; u32 bpl; u32 bph; u32 size; u32 ctrl; u32 type; u32 enqueue; u32 dequeue; char __data[0]; }; struct trace_event_raw_dwc3_log_ep { struct trace_entry ent; u32 __data_loc_name; unsigned int maxpacket; unsigned int maxpacket_limit; unsigned int max_streams; unsigned int maxburst; unsigned int flags; unsigned int direction; u8 trb_enqueue; u8 trb_dequeue; char __data[0]; }; struct trace_event_data_offsets_dwc3_log_request { u32 name; }; struct trace_event_data_offsets_dwc3_log_gadget_ep_cmd { u32 name; }; struct trace_event_data_offsets_dwc3_log_trb { u32 name; }; struct trace_event_data_offsets_dwc3_log_ep { u32 name; }; struct dwc3_event_type { u32 is_devspec: 1; u32 type: 7; u32 reserved8_31: 24; }; struct dwc3_event_depevt { u32 one_bit: 1; u32 endpoint_number: 5; u32 endpoint_event: 4; u32 reserved11_10: 2; u32 status: 4; u32 parameters: 16; }; struct dwc3_event_devt { u32 one_bit: 1; u32 device_event: 7; u32 type: 4; u32 reserved15_12: 4; u32 event_info: 9; u32 reserved31_25: 7; }; struct dwc3_event_gevt { u32 one_bit: 1; u32 device_event: 7; u32 phy_port_number: 4; u32 reserved31_12: 20; }; union dwc3_event { u32 raw; struct dwc3_event_type type; struct dwc3_event_depevt depevt; struct dwc3_event_devt devt; struct dwc3_event_gevt gevt; }; struct trace_event_data_offsets_dwc3_log_io {}; struct trace_event_data_offsets_dwc3_log_event {}; struct trace_event_data_offsets_dwc3_log_ctrl {}; struct trace_event_data_offsets_dwc3_log_generic_cmd {}; struct timing { u8 u1sel; u8 u1pel; __le16 u2sel; __le16 u2pel; }; enum usb_role { USB_ROLE_NONE = 0, USB_ROLE_HOST = 1, USB_ROLE_DEVICE = 2, }; typedef int (*usb_role_switch_set_t)(struct usb_role_switch *, enum usb_role); typedef enum usb_role (*usb_role_switch_get_t)(struct usb_role_switch *); struct usb_role_switch_desc { struct fwnode_handle *fwnode; struct device *usb2_port; struct device *usb3_port; struct device *udc; usb_role_switch_set_t set; usb_role_switch_get_t get; bool allow_userspace_control; void *driver_data; const char *name; }; struct dwc3_ep_file_map { const char name[25]; const struct file_operations * const fops; }; struct dwc3_haps { struct platform_device *dwc3; struct pci_dev *pci; }; struct dwc3_of_simple { struct device *dev; struct clk_bulk_data *clks; int num_clocks; struct reset_control *resets; bool need_reset; }; struct dwc3_acpi_pdata; struct dwc3_qcom { struct device *dev; void *qscratch_base; struct platform_device *dwc3; struct platform_device *urs_usb; struct clk **clks; int num_clocks; struct reset_control *resets; int hs_phy_irq; int dp_hs_phy_irq; int dm_hs_phy_irq; int ss_phy_irq; enum usb_device_speed usb2_speed; struct extcon_dev *edev; struct extcon_dev *host_edev; struct notifier_block vbus_nb; struct notifier_block host_nb; const struct dwc3_acpi_pdata *acpi_pdata; enum usb_dr_mode mode; bool is_suspended; bool pm_suspended; struct icc_path *icc_path_ddr; struct icc_path *icc_path_apps; }; struct dwc3_acpi_pdata { u32 qscratch_base_offset; u32 qscratch_base_size; u32 dwc3_core_base_size; int hs_phy_irq_index; int dp_hs_phy_irq_index; int dm_hs_phy_irq_index; int ss_phy_irq_index; bool is_urs; }; enum amd_chipset_gen { NOT_AMD_CHIPSET = 0, AMD_CHIPSET_SB600 = 1, AMD_CHIPSET_SB700 = 2, AMD_CHIPSET_SB800 = 3, AMD_CHIPSET_HUDSON2 = 4, AMD_CHIPSET_BOLTON = 5, AMD_CHIPSET_YANGTZE = 6, AMD_CHIPSET_TAISHAN = 7, AMD_CHIPSET_UNKNOWN = 8, }; struct amd_chipset_type { enum amd_chipset_gen gen; u8 rev; }; struct amd_chipset_info { struct pci_dev *nb_dev; struct pci_dev *smbus_dev; int nb_type; struct amd_chipset_type sb_type; int isoc_reqs; int probe_count; bool need_pll_quirk; }; enum ehci_hrtimer_event { EHCI_HRTIMER_POLL_ASS = 0, EHCI_HRTIMER_POLL_PSS = 1, EHCI_HRTIMER_POLL_DEAD = 2, EHCI_HRTIMER_UNLINK_INTR = 3, EHCI_HRTIMER_FREE_ITDS = 4, EHCI_HRTIMER_ACTIVE_UNLINK = 5, EHCI_HRTIMER_START_UNLINK_INTR = 6, EHCI_HRTIMER_ASYNC_UNLINKS = 7, EHCI_HRTIMER_IAA_WATCHDOG = 8, EHCI_HRTIMER_DISABLE_PERIODIC = 9, EHCI_HRTIMER_DISABLE_ASYNC = 10, EHCI_HRTIMER_IO_WATCHDOG = 11, EHCI_HRTIMER_NUM_EVENTS = 12, }; enum ehci_rh_state { EHCI_RH_HALTED = 0, EHCI_RH_SUSPENDED = 1, EHCI_RH_RUNNING = 2, EHCI_RH_STOPPING = 3, }; struct ehci_caps; struct ehci_regs; struct ehci_dbg_port; struct ehci_qh; union ehci_shadow; struct ehci_itd; struct ehci_sitd; struct ehci_hcd { enum ehci_hrtimer_event next_hrtimer_event; unsigned int enabled_hrtimer_events; ktime_t hr_timeouts[12]; struct hrtimer hrtimer; int PSS_poll_count; int ASS_poll_count; int died_poll_count; struct ehci_caps *caps; struct ehci_regs *regs; struct ehci_dbg_port *debug; __u32 hcs_params; spinlock_t lock; enum ehci_rh_state rh_state; bool scanning: 1; bool need_rescan: 1; bool intr_unlinking: 1; bool iaa_in_progress: 1; bool async_unlinking: 1; bool shutdown: 1; struct ehci_qh *qh_scan_next; struct ehci_qh *async; struct ehci_qh *dummy; struct list_head async_unlink; struct list_head async_idle; unsigned int async_unlink_cycle; unsigned int async_count; __le32 old_current; __le32 old_token; unsigned int periodic_size; __le32 *periodic; dma_addr_t periodic_dma; struct list_head intr_qh_list; unsigned int i_thresh; union ehci_shadow *pshadow; struct list_head intr_unlink_wait; struct list_head intr_unlink; unsigned int intr_unlink_wait_cycle; unsigned int intr_unlink_cycle; unsigned int now_frame; unsigned int last_iso_frame; unsigned int intr_count; unsigned int isoc_count; unsigned int periodic_count; unsigned int uframe_periodic_max; struct list_head cached_itd_list; struct ehci_itd *last_itd_to_free; struct list_head cached_sitd_list; struct ehci_sitd *last_sitd_to_free; unsigned long reset_done[15]; unsigned long bus_suspended; unsigned long companion_ports; unsigned long owned_ports; unsigned long port_c_suspend; unsigned long suspended_ports; unsigned long resuming_ports; struct dma_pool *qh_pool; struct dma_pool *qtd_pool; struct dma_pool *itd_pool; struct dma_pool *sitd_pool; unsigned int random_frame; unsigned long next_statechange; ktime_t last_periodic_enable; u32 command; unsigned int no_selective_suspend: 1; unsigned int has_fsl_port_bug: 1; unsigned int has_fsl_hs_errata: 1; unsigned int has_fsl_susp_errata: 1; unsigned int has_ci_pec_bug: 1; unsigned int big_endian_mmio: 1; unsigned int big_endian_desc: 1; unsigned int big_endian_capbase: 1; unsigned int has_amcc_usb23: 1; unsigned int need_io_watchdog: 1; unsigned int amd_pll_fix: 1; unsigned int use_dummy_qh: 1; unsigned int has_synopsys_hc_bug: 1; unsigned int frame_index_bug: 1; unsigned int need_oc_pp_cycle: 1; unsigned int imx28_write_fix: 1; unsigned int spurious_oc: 1; unsigned int is_aspeed: 1; unsigned int zx_wakeup_clear_needed: 1; __le32 *ohci_hcctrl_reg; unsigned int has_hostpc: 1; unsigned int has_tdi_phy_lpm: 1; unsigned int has_ppcd: 1; u8 sbrn; u8 bandwidth[64]; u8 tt_budget[64]; struct list_head tt_list; unsigned long priv[0]; }; struct ehci_caps { u32 hc_capbase; u32 hcs_params; u32 hcc_params; u8 portroute[8]; }; struct ehci_regs { u32 command; u32 status; u32 intr_enable; u32 frame_index; u32 segment; u32 frame_list; u32 async_next; u32 reserved1[2]; u32 txfill_tuning; u32 reserved2[6]; u32 configured_flag; union { u32 port_status[15]; struct { u32 reserved3[9]; u32 usbmode; }; }; union { struct { u32 reserved4; u32 hostpc[15]; }; u32 brcm_insnreg[4]; }; u32 reserved5[2]; u32 usbmode_ex; }; struct ehci_dbg_port { u32 control; u32 pids; u32 data03; u32 data47; u32 address; }; struct ehci_fstn; union ehci_shadow { struct ehci_qh *qh; struct ehci_itd *itd; struct ehci_sitd *sitd; struct ehci_fstn *fstn; __le32 *hw_next; void *ptr; }; struct ehci_per_sched { struct usb_device *udev; struct usb_host_endpoint *ep; struct list_head ps_list; u16 tt_usecs; u16 cs_mask; u16 period; u16 phase; u8 bw_phase; u8 phase_uf; u8 usecs; u8 c_usecs; u8 bw_uperiod; u8 bw_period; }; struct ehci_qh_hw; struct ehci_qtd; struct ehci_qh { struct ehci_qh_hw *hw; dma_addr_t qh_dma; union ehci_shadow qh_next; struct list_head qtd_list; struct list_head intr_node; struct ehci_qtd *dummy; struct list_head unlink_node; struct ehci_per_sched ps; unsigned int unlink_cycle; u8 qh_state; u8 xacterrs; u8 unlink_reason; u8 gap_uf; unsigned int is_out: 1; unsigned int clearing_tt: 1; unsigned int dequeue_during_giveback: 1; unsigned int should_be_inactive: 1; }; struct ehci_qh_hw { __le32 hw_next; __le32 hw_info1; __le32 hw_info2; __le32 hw_current; __le32 hw_qtd_next; __le32 hw_alt_next; __le32 hw_token; __le32 hw_buf[5]; __le32 hw_buf_hi[5]; long: 64; long: 64; long: 64; }; struct ehci_iso_stream; struct ehci_itd { __le32 hw_next; __le32 hw_transaction[8]; __le32 hw_bufp[7]; __le32 hw_bufp_hi[7]; dma_addr_t itd_dma; union ehci_shadow itd_next; struct urb *urb; struct ehci_iso_stream *stream; struct list_head itd_list; unsigned int frame; unsigned int pg; unsigned int index[8]; long: 64; }; struct ehci_iso_stream { struct ehci_qh_hw *hw; u8 bEndpointAddress; u8 highspeed; struct list_head td_list; struct list_head free_list; struct ehci_per_sched ps; unsigned int next_uframe; __le32 splits; u16 uperiod; u16 maxp; unsigned int bandwidth; __le32 buf0; __le32 buf1; __le32 buf2; __le32 address; }; struct ehci_sitd { __le32 hw_next; __le32 hw_fullspeed_ep; __le32 hw_uframe; __le32 hw_results; __le32 hw_buf[2]; __le32 hw_backpointer; __le32 hw_buf_hi[2]; dma_addr_t sitd_dma; union ehci_shadow sitd_next; struct urb *urb; struct ehci_iso_stream *stream; struct list_head sitd_list; unsigned int frame; unsigned int index; }; struct ehci_fstn { __le32 hw_next; __le32 hw_prev; dma_addr_t fstn_dma; union ehci_shadow fstn_next; long: 64; }; struct ehci_qtd { __le32 hw_next; __le32 hw_alt_next; __le32 hw_token; __le32 hw_buf[5]; __le32 hw_buf_hi[5]; dma_addr_t qtd_dma; struct list_head qtd_list; struct urb *urb; size_t length; }; struct ehci_tt { u16 bandwidth[8]; struct list_head tt_list; struct list_head ps_list; struct usb_tt *usb_tt; int tt_port; }; struct ehci_iso_packet { u64 bufp; __le32 transaction; u8 cross; u32 buf1; }; struct ehci_iso_sched { struct list_head td_list; unsigned int span; unsigned int first_packet; struct ehci_iso_packet packet[0]; }; struct ehci_driver_overrides { size_t extra_priv_size; int (*reset)(struct usb_hcd *); int (*port_power)(struct usb_hcd *, int, bool); }; struct usb_ehci_pdata { int caps_offset; unsigned int has_tt: 1; unsigned int has_synopsys_hc_bug: 1; unsigned int big_endian_desc: 1; unsigned int big_endian_mmio: 1; unsigned int no_io_watchdog: 1; unsigned int reset_on_resume: 1; unsigned int dma_mask_64: 1; unsigned int spurious_oc: 1; int (*power_on)(struct platform_device *); void (*power_off)(struct platform_device *); void (*power_suspend)(struct platform_device *); int (*pre_setup)(struct usb_hcd *); }; struct ehci_platform_priv { struct clk *clks[4]; struct reset_control *rsts; bool reset_on_resume; bool quirk_poll; struct timer_list poll_timer; struct delayed_work poll_work; }; enum xhci_ring_type { TYPE_CTRL = 0, TYPE_ISOC = 1, TYPE_BULK = 2, TYPE_INTR = 3, TYPE_STREAM = 4, TYPE_COMMAND = 5, TYPE_EVENT = 6, }; enum xhci_overhead_type { LS_OVERHEAD_TYPE = 0, FS_OVERHEAD_TYPE = 1, HS_OVERHEAD_TYPE = 2, }; enum xhci_cancelled_td_status { TD_DIRTY = 0, TD_HALTED = 1, TD_CLEARING_CACHE = 2, TD_CLEARED = 3, }; enum xhci_setup_dev { SETUP_CONTEXT_ONLY = 0, SETUP_CONTEXT_ADDRESS = 1, }; struct s3_save { u32 command; u32 dev_nt; u64 dcbaa_ptr; u32 config_reg; }; struct xhci_bus_state { unsigned long bus_suspended; unsigned long next_statechange; u32 port_c_suspend; u32 suspended_ports; u32 port_remote_wakeup; unsigned long resuming_ports; }; struct xhci_port; struct xhci_hub { struct xhci_port **ports; unsigned int num_ports; struct usb_hcd *hcd; struct xhci_bus_state bus_state; u8 maj_rev; u8 min_rev; }; struct xhci_cap_regs; struct xhci_op_regs; struct xhci_run_regs; struct xhci_doorbell_array; struct xhci_device_context_array; struct xhci_interrupter; struct xhci_ring; struct xhci_command; struct xhci_scratchpad; struct xhci_virt_device; struct xhci_root_port_bw_info; struct xhci_port_cap; struct xhci_hcd { struct usb_hcd *main_hcd; struct usb_hcd *shared_hcd; struct xhci_cap_regs *cap_regs; struct xhci_op_regs *op_regs; struct xhci_run_regs *run_regs; struct xhci_doorbell_array *dba; __u32 hcs_params1; __u32 hcs_params2; __u32 hcs_params3; __u32 hcc_params; __u32 hcc_params2; spinlock_t lock; u8 sbrn; u16 hci_version; u8 max_slots; u16 max_interrupters; u8 max_ports; u8 isoc_threshold; u32 imod_interval; int event_ring_max; int page_size; int page_shift; int msix_count; struct clk *clk; struct clk *reg_clk; struct reset_control *reset; struct xhci_device_context_array *dcbaa; struct xhci_interrupter **interrupters; struct xhci_ring *cmd_ring; unsigned int cmd_ring_state; struct list_head cmd_list; unsigned int cmd_ring_reserved_trbs; struct delayed_work cmd_timer; struct completion cmd_ring_stop_completion; struct xhci_command *current_cmd; struct xhci_scratchpad *scratchpad; struct mutex mutex; struct xhci_virt_device *devs[256]; struct xhci_root_port_bw_info *rh_bw; struct dma_pool *device_pool; struct dma_pool *segment_pool; struct dma_pool *small_streams_pool; struct dma_pool *medium_streams_pool; unsigned int xhc_state; unsigned long run_graceperiod; struct s3_save s3; unsigned long long quirks; unsigned int num_active_eps; unsigned int limit_active_eps; struct xhci_port *hw_ports; struct xhci_hub usb2_rhub; struct xhci_hub usb3_rhub; unsigned int hw_lpm_support: 1; unsigned int broken_suspend: 1; unsigned int allow_single_roothub: 1; u32 *ext_caps; unsigned int num_ext_caps; struct xhci_port_cap *port_caps; unsigned int num_port_caps; struct timer_list comp_mode_recovery_timer; u32 port_status_u0; u16 test_mode; struct dentry *debugfs_root; struct dentry *debugfs_slots; struct list_head regset_list; void *dbc; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; unsigned long priv[0]; }; struct xhci_cap_regs { __le32 hc_capbase; __le32 hcs_params1; __le32 hcs_params2; __le32 hcs_params3; __le32 hcc_params; __le32 db_off; __le32 run_regs_off; __le32 hcc_params2; }; struct xhci_op_regs { __le32 command; __le32 status; __le32 page_size; __le32 reserved1; __le32 reserved2; __le32 dev_notification; __le64 cmd_ring; __le32 reserved3[4]; __le64 dcbaa_ptr; __le32 config_reg; __le32 reserved4[241]; __le32 port_status_base; __le32 port_power_base; __le32 port_link_base; __le32 reserved5; __le32 reserved6[1016]; }; struct xhci_intr_reg { __le32 irq_pending; __le32 irq_control; __le32 erst_size; __le32 rsvd; __le64 erst_base; __le64 erst_dequeue; }; struct xhci_run_regs { __le32 microframe_index; __le32 rsvd[7]; struct xhci_intr_reg ir_set[128]; }; struct xhci_doorbell_array { __le32 doorbell[256]; }; struct xhci_device_context_array { __le64 dev_context_ptrs[256]; dma_addr_t dma; }; struct xhci_erst_entry; struct xhci_erst { struct xhci_erst_entry *entries; unsigned int num_entries; dma_addr_t erst_dma_addr; unsigned int erst_size; u64 android_kabi_reserved1; }; struct xhci_interrupter { struct xhci_ring *event_ring; struct xhci_erst erst; struct xhci_intr_reg *ir_set; unsigned int intr_num; bool ip_autoclear; bool skip_events; u32 isoc_bei_interval; u32 s3_irq_pending; u32 s3_irq_control; u32 s3_erst_size; u64 s3_erst_base; u64 s3_erst_dequeue; }; struct xhci_segment; union xhci_trb; struct xhci_ring { struct xhci_segment *first_seg; struct xhci_segment *last_seg; union xhci_trb *enqueue; struct xhci_segment *enq_seg; union xhci_trb *dequeue; struct xhci_segment *deq_seg; struct list_head td_list; u32 cycle_state; unsigned int stream_id; unsigned int num_segs; unsigned int num_trbs_free; unsigned int bounce_buf_len; enum xhci_ring_type type; bool last_td_was_short; struct xarray *trb_address_map; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct xhci_segment { union xhci_trb *trbs; struct xhci_segment *next; unsigned int num; dma_addr_t dma; dma_addr_t bounce_dma; void *bounce_buf; unsigned int bounce_offs; unsigned int bounce_len; u64 android_kabi_reserved1; }; struct xhci_link_trb { __le64 segment_ptr; __le32 intr_target; __le32 control; }; struct xhci_transfer_event { __le64 buffer; __le32 transfer_len; __le32 flags; }; struct xhci_event_cmd { __le64 cmd_trb; __le32 status; __le32 flags; }; struct xhci_generic_trb { __le32 field[4]; }; union xhci_trb { struct xhci_link_trb link; struct xhci_transfer_event trans_event; struct xhci_event_cmd event_cmd; struct xhci_generic_trb generic; }; struct xhci_erst_entry { __le64 seg_addr; __le32 seg_size; __le32 rsvd; }; struct xhci_container_ctx; struct xhci_command { struct xhci_container_ctx *in_ctx; u32 status; int slot_id; struct completion *completion; union xhci_trb *command_trb; struct list_head cmd_list; unsigned int timeout_ms; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct xhci_container_ctx { unsigned int type; int size; u8 *bytes; dma_addr_t dma; }; struct xhci_scratchpad { u64 *sp_array; dma_addr_t sp_dma; void **sp_buffers; }; struct xhci_bw_info { unsigned int ep_interval; unsigned int mult; unsigned int num_packets; unsigned int max_packet_size; unsigned int max_esit_payload; unsigned int type; }; struct xhci_sideband; struct xhci_stream_info; struct xhci_virt_ep { struct xhci_virt_device *vdev; unsigned int ep_index; struct xhci_ring *ring; struct xhci_stream_info *stream_info; struct xhci_ring *new_ring; unsigned int err_count; unsigned int ep_state; struct list_head cancelled_td_list; struct xhci_hcd *xhci; struct xhci_segment *queued_deq_seg; union xhci_trb *queued_deq_ptr; bool skip; struct xhci_bw_info bw_info; struct list_head bw_endpoint_list; int next_frame_id; bool use_extended_tbc; struct xhci_sideband *sideband; }; struct xhci_interval_bw_table; struct xhci_tt_bw_info; struct xhci_virt_device { int slot_id; struct usb_device *udev; struct xhci_container_ctx *out_ctx; struct xhci_container_ctx *in_ctx; struct xhci_virt_ep eps[31]; u8 fake_port; u8 real_port; struct xhci_interval_bw_table *bw_table; struct xhci_tt_bw_info *tt_info; unsigned long flags; u16 current_mel; void *debugfs_private; struct xhci_sideband *sideband; }; struct xhci_stream_ctx; struct xhci_stream_info { struct xhci_ring **stream_rings; unsigned int num_streams; struct xhci_stream_ctx *stream_ctx_array; unsigned int num_stream_ctxs; dma_addr_t ctx_array_dma; struct xarray trb_address_map; struct xhci_command *free_streams_command; }; struct xhci_stream_ctx { __le64 stream_ring; __le32 reserved[2]; }; struct xhci_interval_bw { unsigned int num_packets; struct list_head endpoints; unsigned int overhead[3]; }; struct xhci_interval_bw_table { unsigned int interval0_esit_payload; struct xhci_interval_bw interval_bw[16]; unsigned int bw_used; unsigned int ss_bw_in; unsigned int ss_bw_out; }; struct xhci_tt_bw_info { struct list_head tt_list; int slot_id; int ttport; struct xhci_interval_bw_table bw_table; int active_eps; }; struct xhci_root_port_bw_info { struct list_head tts; unsigned int num_active_tts; struct xhci_interval_bw_table bw_table; }; struct xhci_port { __le32 *addr; int hw_portnum; int hcd_portnum; struct xhci_hub *rhub; struct xhci_port_cap *port_cap; unsigned int lpm_incapable: 1; unsigned long resume_timestamp; bool rexit_active; struct completion rexit_done; struct completion u3exit_done; }; struct xhci_port_cap { u32 *psi; u8 psi_count; u8 psi_uid_count; u8 maj_rev; u8 min_rev; }; struct xhci_input_control_ctx { __le32 drop_flags; __le32 add_flags; __le32 rsvd2[6]; }; struct xhci_ep_ctx { __le32 ep_info; __le32 ep_info2; __le64 deq; __le32 tx_info; __le32 reserved[3]; }; struct xhci_slot_ctx { __le32 dev_info; __le32 dev_info2; __le32 tt_info; __le32 dev_state; __le32 reserved[4]; }; struct xhci_td { struct list_head td_list; struct list_head cancelled_td_list; int status; enum xhci_cancelled_td_status cancel_status; struct urb *urb; struct xhci_segment *start_seg; union xhci_trb *first_trb; union xhci_trb *last_trb; struct xhci_segment *last_trb_seg; struct xhci_segment *bounce_seg; bool urb_length_set; bool error_mid_td; unsigned int num_trbs; }; struct urb_priv { int num_tds; int num_tds_done; struct xhci_td td[0]; }; typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); struct xhci_driver_overrides { size_t extra_priv_size; int (*reset)(struct usb_hcd *); int (*start)(struct usb_hcd *); int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; enum xhci_ep_reset_type { EP_HARD_RESET = 0, EP_SOFT_RESET = 1, }; typedef void (*btf_trace_xhci_dbg_address)(void *, struct va_format *); typedef void (*btf_trace_xhci_dbg_context_change)(void *, struct va_format *); typedef void (*btf_trace_xhci_dbg_quirks)(void *, struct va_format *); typedef void (*btf_trace_xhci_dbg_reset_ep)(void *, struct va_format *); typedef void (*btf_trace_xhci_dbg_cancel_urb)(void *, struct va_format *); typedef void (*btf_trace_xhci_dbg_init)(void *, struct va_format *); typedef void (*btf_trace_xhci_dbg_ring_expansion)(void *, struct va_format *); typedef void (*btf_trace_xhci_address_ctx)(void *, struct xhci_hcd *, struct xhci_container_ctx *, unsigned int); typedef void (*btf_trace_xhci_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *); typedef void (*btf_trace_xhci_handle_command)(void *, struct xhci_ring *, struct xhci_generic_trb *); typedef void (*btf_trace_xhci_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *); typedef void (*btf_trace_xhci_queue_trb)(void *, struct xhci_ring *, struct xhci_generic_trb *); typedef void (*btf_trace_xhci_dbc_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *); typedef void (*btf_trace_xhci_dbc_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *); typedef void (*btf_trace_xhci_dbc_gadget_ep_queue)(void *, struct xhci_ring *, struct xhci_generic_trb *); typedef void (*btf_trace_xhci_free_virt_device)(void *, struct xhci_virt_device *); typedef void (*btf_trace_xhci_alloc_virt_device)(void *, struct xhci_virt_device *); typedef void (*btf_trace_xhci_setup_device)(void *, struct xhci_virt_device *); typedef void (*btf_trace_xhci_setup_addressable_virt_device)(void *, struct xhci_virt_device *); typedef void (*btf_trace_xhci_stop_device)(void *, struct xhci_virt_device *); typedef void (*btf_trace_xhci_urb_enqueue)(void *, struct urb *); typedef void (*btf_trace_xhci_urb_giveback)(void *, struct urb *); typedef void (*btf_trace_xhci_urb_dequeue)(void *, struct urb *); typedef void (*btf_trace_xhci_handle_cmd_stop_ep)(void *, struct xhci_ep_ctx *); typedef void (*btf_trace_xhci_handle_cmd_set_deq_ep)(void *, struct xhci_ep_ctx *); typedef void (*btf_trace_xhci_handle_cmd_reset_ep)(void *, struct xhci_ep_ctx *); typedef void (*btf_trace_xhci_handle_cmd_config_ep)(void *, struct xhci_ep_ctx *); typedef void (*btf_trace_xhci_add_endpoint)(void *, struct xhci_ep_ctx *); typedef void (*btf_trace_xhci_alloc_dev)(void *, struct xhci_slot_ctx *); typedef void (*btf_trace_xhci_free_dev)(void *, struct xhci_slot_ctx *); typedef void (*btf_trace_xhci_handle_cmd_disable_slot)(void *, struct xhci_slot_ctx *); typedef void (*btf_trace_xhci_discover_or_reset_device)(void *, struct xhci_slot_ctx *); typedef void (*btf_trace_xhci_setup_device_slot)(void *, struct xhci_slot_ctx *); typedef void (*btf_trace_xhci_handle_cmd_addr_dev)(void *, struct xhci_slot_ctx *); typedef void (*btf_trace_xhci_handle_cmd_reset_dev)(void *, struct xhci_slot_ctx *); typedef void (*btf_trace_xhci_handle_cmd_set_deq)(void *, struct xhci_slot_ctx *); typedef void (*btf_trace_xhci_configure_endpoint)(void *, struct xhci_slot_ctx *); typedef void (*btf_trace_xhci_address_ctrl_ctx)(void *, struct xhci_input_control_ctx *); typedef void (*btf_trace_xhci_configure_endpoint_ctrl_ctx)(void *, struct xhci_input_control_ctx *); typedef void (*btf_trace_xhci_ring_alloc)(void *, struct xhci_ring *); typedef void (*btf_trace_xhci_ring_free)(void *, struct xhci_ring *); typedef void (*btf_trace_xhci_ring_expansion)(void *, struct xhci_ring *); typedef void (*btf_trace_xhci_inc_enq)(void *, struct xhci_ring *); typedef void (*btf_trace_xhci_inc_deq)(void *, struct xhci_ring *); typedef void (*btf_trace_xhci_handle_port_status)(void *, u32, u32); typedef void (*btf_trace_xhci_get_port_status)(void *, u32, u32); typedef void (*btf_trace_xhci_hub_status_data)(void *, u32, u32); typedef void (*btf_trace_xhci_ring_ep_doorbell)(void *, u32, u32); typedef void (*btf_trace_xhci_ring_host_doorbell)(void *, u32, u32); struct dbc_request; typedef void (*btf_trace_xhci_dbc_alloc_request)(void *, struct dbc_request *); struct xhci_dbc; struct dbc_request { void *buf; unsigned int length; dma_addr_t dma; void (*complete)(struct xhci_dbc *, struct dbc_request *); struct list_head list_pool; int status; unsigned int actual; struct xhci_dbc *dbc; struct list_head list_pending; dma_addr_t trb_dma; union xhci_trb *trb; unsigned int direction: 1; }; enum dbc_state { DS_DISABLED = 0, DS_INITIALIZED = 1, DS_ENABLED = 2, DS_CONNECTED = 3, DS_CONFIGURED = 4, DS_STALLED = 5, }; struct dbc_ep { struct xhci_dbc *dbc; struct list_head list_pending; struct xhci_ring *ring; unsigned int direction: 1; }; struct dbc_regs; struct dbc_str_descs; struct dbc_driver; struct xhci_dbc { spinlock_t lock; struct device *dev; struct xhci_hcd *xhci; struct dbc_regs *regs; struct xhci_ring *ring_evt; struct xhci_ring *ring_in; struct xhci_ring *ring_out; struct xhci_erst erst; struct xhci_container_ctx *ctx; struct dbc_str_descs *string; dma_addr_t string_dma; size_t string_size; u16 idVendor; u16 idProduct; u16 bcdDevice; u8 bInterfaceProtocol; enum dbc_state state; struct delayed_work event_work; unsigned int resume_required: 1; struct dbc_ep eps[2]; const struct dbc_driver *driver; void *priv; }; struct dbc_regs { __le32 capability; __le32 doorbell; __le32 ersts; __le32 __reserved_0; __le64 erstba; __le64 erdp; __le32 control; __le32 status; __le32 portsc; __le32 __reserved_1; __le64 dccp; __le32 devinfo1; __le32 devinfo2; }; struct dbc_str_descs { char string0[64]; char manufacturer[64]; char product[64]; char serial[64]; }; struct dbc_driver { int (*configure)(struct xhci_dbc *); void (*disconnect)(struct xhci_dbc *); }; typedef void (*btf_trace_xhci_dbc_free_request)(void *, struct dbc_request *); typedef void (*btf_trace_xhci_dbc_queue_request)(void *, struct dbc_request *); typedef void (*btf_trace_xhci_dbc_giveback_request)(void *, struct dbc_request *); struct trace_event_raw_xhci_log_msg { struct trace_entry ent; u32 __data_loc_msg; char __data[0]; }; struct trace_event_raw_xhci_log_ctx { struct trace_entry ent; int ctx_64; unsigned int ctx_type; dma_addr_t ctx_dma; u8 *ctx_va; unsigned int ctx_ep_num; u32 __data_loc_ctx_data; char __data[0]; }; struct trace_event_raw_xhci_log_trb { struct trace_entry ent; u32 type; u32 field0; u32 field1; u32 field2; u32 field3; char __data[0]; }; struct trace_event_raw_xhci_log_free_virt_dev { struct trace_entry ent; void *vdev; unsigned long long out_ctx; unsigned long long in_ctx; u8 fake_port; u8 real_port; u16 current_mel; char __data[0]; }; struct trace_event_raw_xhci_log_virt_dev { struct trace_entry ent; void *vdev; unsigned long long out_ctx; unsigned long long in_ctx; int devnum; int state; int speed; u8 portnum; u8 level; int slot_id; char __data[0]; }; struct trace_event_raw_xhci_log_urb { struct trace_entry ent; void *urb; unsigned int pipe; unsigned int stream; int status; unsigned int flags; int num_mapped_sgs; int num_sgs; int length; int actual; int epnum; int dir_in; int type; int slot_id; char __data[0]; }; struct trace_event_raw_xhci_log_ep_ctx { struct trace_entry ent; u32 info; u32 info2; u64 deq; u32 tx_info; char __data[0]; }; struct trace_event_raw_xhci_log_slot_ctx { struct trace_entry ent; u32 info; u32 info2; u32 tt_info; u32 state; char __data[0]; }; struct trace_event_raw_xhci_log_ctrl_ctx { struct trace_entry ent; u32 drop; u32 add; char __data[0]; }; struct trace_event_raw_xhci_log_ring { struct trace_entry ent; u32 type; void *ring; dma_addr_t enq; dma_addr_t deq; dma_addr_t enq_seg; dma_addr_t deq_seg; unsigned int num_segs; unsigned int stream_id; unsigned int cycle_state; unsigned int bounce_buf_len; char __data[0]; }; struct trace_event_raw_xhci_log_portsc { struct trace_entry ent; u32 portnum; u32 portsc; char __data[0]; }; struct trace_event_raw_xhci_log_doorbell { struct trace_entry ent; u32 slot; u32 doorbell; char __data[0]; }; struct trace_event_raw_xhci_dbc_log_request { struct trace_entry ent; struct dbc_request *req; bool dir; unsigned int actual; unsigned int length; int status; char __data[0]; }; struct trace_event_data_offsets_xhci_log_msg { u32 msg; }; struct trace_event_data_offsets_xhci_log_ctx { u32 ctx_data; }; struct trace_event_data_offsets_xhci_log_trb {}; struct trace_event_data_offsets_xhci_log_free_virt_dev {}; struct trace_event_data_offsets_xhci_log_virt_dev {}; struct trace_event_data_offsets_xhci_log_urb {}; struct trace_event_data_offsets_xhci_log_ep_ctx {}; struct trace_event_data_offsets_xhci_log_slot_ctx {}; struct trace_event_data_offsets_xhci_log_ctrl_ctx {}; struct trace_event_data_offsets_xhci_log_ring {}; struct trace_event_data_offsets_xhci_log_portsc {}; struct trace_event_data_offsets_xhci_log_doorbell {}; struct trace_event_data_offsets_xhci_dbc_log_request {}; struct xhci_file_map { const char *name; int (*show)(struct seq_file *, void *); }; struct xhci_regset { char name[32]; struct debugfs_regset32 regset; size_t nregs; struct list_head list; }; struct xhci_ep_priv; struct xhci_slot_priv { char name[32]; struct dentry *root; struct xhci_ep_priv *eps[31]; struct xhci_virt_device *dev; }; struct xhci_ep_priv { char name[32]; struct dentry *root; struct xhci_stream_info *stream_info; struct xhci_ring *show_ring; unsigned int stream_id; }; struct xhci_driver_data { u64 quirks; const char *firmware; }; struct xhci_plat_priv { const char *firmware_name; unsigned long long quirks; void (*plat_start)(struct usb_hcd *); int (*init_quirk)(struct usb_hcd *); int (*suspend_quirk)(struct usb_hcd *); int (*resume_quirk)(struct usb_hcd *); }; enum { US_FL_SINGLE_LUN = 1, US_FL_NEED_OVERRIDE = 2, US_FL_SCM_MULT_TARG = 4, US_FL_FIX_INQUIRY = 8, US_FL_FIX_CAPACITY = 16, US_FL_IGNORE_RESIDUE = 32, US_FL_BULK32 = 64, US_FL_NOT_LOCKABLE = 128, US_FL_GO_SLOW = 256, US_FL_NO_WP_DETECT = 512, US_FL_MAX_SECTORS_64 = 1024, US_FL_IGNORE_DEVICE = 2048, US_FL_CAPACITY_HEURISTICS = 4096, US_FL_MAX_SECTORS_MIN = 8192, US_FL_BULK_IGNORE_TAG = 16384, US_FL_SANE_SENSE = 32768, US_FL_CAPACITY_OK = 65536, US_FL_BAD_SENSE = 131072, US_FL_NO_READ_DISC_INFO = 262144, US_FL_NO_READ_CAPACITY_16 = 524288, US_FL_INITIAL_READ10 = 1048576, US_FL_WRITE_CACHE = 2097152, US_FL_NEEDS_CAP16 = 4194304, US_FL_IGNORE_UAS = 8388608, US_FL_BROKEN_FUA = 16777216, US_FL_NO_ATA_1X = 33554432, US_FL_NO_REPORT_OPCODES = 67108864, US_FL_MAX_SECTORS_240 = 134217728, US_FL_NO_REPORT_LUNS = 268435456, US_FL_ALWAYS_SYNC = 536870912, US_FL_NO_SAME = 1073741824, US_FL_SENSE_AFTER_SYNC = 2147483648, }; enum { SUBMIT_STATUS_URB = 2, ALLOC_DATA_IN_URB = 4, SUBMIT_DATA_IN_URB = 8, ALLOC_DATA_OUT_URB = 16, SUBMIT_DATA_OUT_URB = 32, ALLOC_CMD_URB = 64, SUBMIT_CMD_URB = 128, COMMAND_INFLIGHT = 256, DATA_IN_URB_INFLIGHT = 512, DATA_OUT_URB_INFLIGHT = 1024, COMMAND_ABORTED = 2048, IS_IN_WORK_LIST = 4096, }; enum { IU_ID_COMMAND = 1, IU_ID_STATUS = 3, IU_ID_RESPONSE = 4, IU_ID_TASK_MGMT = 5, IU_ID_READ_READY = 6, IU_ID_WRITE_READY = 7, }; enum { RC_TMF_COMPLETE = 0, RC_INVALID_INFO_UNIT = 2, RC_TMF_NOT_SUPPORTED = 4, RC_TMF_FAILED = 5, RC_TMF_SUCCEEDED = 8, RC_INCORRECT_LUN = 9, RC_OVERLAPPED_TAG = 10, }; enum { CMD_PIPE_ID = 1, STATUS_PIPE_ID = 2, DATA_IN_PIPE_ID = 3, DATA_OUT_PIPE_ID = 4, UAS_SIMPLE_TAG = 0, UAS_HEAD_TAG = 1, UAS_ORDERED_TAG = 2, UAS_ACA = 4, }; struct uas_dev_info { struct usb_interface *intf; struct usb_device *udev; struct usb_anchor cmd_urbs; struct usb_anchor sense_urbs; struct usb_anchor data_urbs; unsigned long flags; int qdepth; int resetting; unsigned int cmd_pipe; unsigned int status_pipe; unsigned int data_in_pipe; unsigned int data_out_pipe; unsigned int use_streams: 1; unsigned int shutdown: 1; struct scsi_cmnd *cmnd[256]; spinlock_t lock; struct work_struct work; struct work_struct scan_work; }; struct response_iu { __u8 iu_id; __u8 rsvd1; __be16 tag; __u8 add_response_info[3]; __u8 response_code; }; struct uas_cmd_info { unsigned int state; unsigned int uas_tag; struct urb *cmd_urb; struct urb *data_in_urb; struct urb *data_out_urb; }; struct sense_iu { __u8 iu_id; __u8 rsvd1; __be16 tag; __be16 status_qual; __u8 status; __u8 rsvd7[7]; __be16 len; __u8 sense[96]; }; struct command_iu { __u8 iu_id; __u8 rsvd1; __be16 tag; __u8 prio_attr; __u8 rsvd5; __u8 len; __u8 rsvd7; struct scsi_lun lun; __u8 cdb[16]; }; struct iu { __u8 iu_id; __u8 rsvd1; __be16 tag; }; struct us_data; typedef int (*trans_cmnd)(struct scsi_cmnd *, struct us_data *); typedef int (*trans_reset)(struct us_data *); typedef void (*proto_cmnd)(struct scsi_cmnd *, struct us_data *); typedef void (*extra_data_destructor)(void *); typedef void (*pm_hook)(struct us_data *, int); struct us_unusual_dev; struct us_data { struct mutex dev_mutex; struct usb_device *pusb_dev; struct usb_interface *pusb_intf; const struct us_unusual_dev *unusual_dev; unsigned long fflags; unsigned long dflags; unsigned int send_bulk_pipe; unsigned int recv_bulk_pipe; unsigned int send_ctrl_pipe; unsigned int recv_ctrl_pipe; unsigned int recv_intr_pipe; char *transport_name; char *protocol_name; __le32 bcs_signature; u8 subclass; u8 protocol; u8 max_lun; u8 ifnum; u8 ep_bInterval; trans_cmnd transport; trans_reset transport_reset; proto_cmnd proto_handler; struct scsi_cmnd *srb; unsigned int tag; char scsi_name[32]; struct urb *current_urb; struct usb_ctrlrequest *cr; struct usb_sg_request current_sg; unsigned char *iobuf; dma_addr_t iobuf_dma; struct task_struct *ctl_thread; struct completion cmnd_ready; struct completion notify; wait_queue_head_t delay_wait; struct delayed_work scan_dwork; void *extra; extra_data_destructor extra_destructor; pm_hook suspend_resume_hook; int use_last_sector_hacks; int last_sector_retries; }; struct us_unusual_dev { const char *vendorName; const char *productName; __u8 useProtocol; __u8 useTransport; int (*initFunction)(struct us_data *); }; enum xfer_buf_dir { TO_XFER_BUF = 0, FROM_XFER_BUF = 1, }; struct bulk_cb_wrap { __le32 Signature; __u32 Tag; __le32 DataTransferLength; __u8 Flags; __u8 Lun; __u8 Length; __u8 CDB[16]; }; struct bulk_cs_wrap { __le32 Signature; __u32 Tag; __le32 Residue; __u8 Status; }; struct swoc_info { __u8 rev; __u8 reserved[8]; __u16 LinuxSKU; __u16 LinuxVer; __u8 reserved2[47]; } __attribute__((packed)); struct ignore_entry { u16 vid; u16 pid; u16 bcdmin; u16 bcdmax; }; struct usb_string; struct usb_gadget_strings { u16 language; struct usb_string *strings; }; struct usb_string { u8 id; const char *s; }; struct usb_otg20_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bmAttributes; __le16 bcdOTG; } __attribute__((packed)); struct usb_configuration; struct usb_os_desc_table; struct usb_function_instance; struct usb_function { const char *name; struct usb_gadget_strings **strings; struct usb_descriptor_header **fs_descriptors; struct usb_descriptor_header **hs_descriptors; struct usb_descriptor_header **ss_descriptors; struct usb_descriptor_header **ssp_descriptors; struct usb_configuration *config; struct usb_os_desc_table *os_desc_table; unsigned int os_desc_n; int (*bind)(struct usb_configuration *, struct usb_function *); void (*unbind)(struct usb_configuration *, struct usb_function *); void (*free_func)(struct usb_function *); struct module *mod; int (*set_alt)(struct usb_function *, unsigned int, unsigned int); int (*get_alt)(struct usb_function *, unsigned int); void (*disable)(struct usb_function *); int (*setup)(struct usb_function *, const struct usb_ctrlrequest *); bool (*req_match)(struct usb_function *, const struct usb_ctrlrequest *, bool); void (*suspend)(struct usb_function *); void (*resume)(struct usb_function *); int (*get_status)(struct usb_function *); int (*func_suspend)(struct usb_function *, u8); bool func_suspended; bool func_wakeup_armed; struct list_head list; unsigned long endpoints[1]; const struct usb_function_instance *fi; unsigned int bind_deactivated: 1; }; struct usb_composite_dev; struct usb_configuration { const char *label; struct usb_gadget_strings **strings; const struct usb_descriptor_header **descriptors; void (*unbind)(struct usb_configuration *); int (*setup)(struct usb_configuration *, const struct usb_ctrlrequest *); u8 bConfigurationValue; u8 iConfiguration; u8 bmAttributes; u16 MaxPower; struct usb_composite_dev *cdev; struct list_head list; struct list_head functions; u8 next_interface_id; unsigned int superspeed: 1; unsigned int highspeed: 1; unsigned int fullspeed: 1; unsigned int superspeed_plus: 1; struct usb_function *interface[16]; }; struct android_uevent_opts { struct device *dev; int device_id; bool connected; bool configured; bool sw_connected; struct work_struct work; struct ida function_ida; }; struct usb_composite_driver; struct usb_composite_dev { struct usb_gadget *gadget; struct usb_request *req; struct usb_request *os_desc_req; struct usb_configuration *config; u8 qw_sign[14]; u8 b_vendor_code; struct usb_configuration *os_desc_config; unsigned int use_os_string: 1; u16 bcd_webusb_version; u8 b_webusb_vendor_code; char landing_page[260]; unsigned int use_webusb: 1; unsigned int suspended: 1; struct usb_device_descriptor desc; struct list_head configs; struct list_head gstrings; struct usb_composite_driver *driver; u8 next_string_id; char *def_manufacturer; struct usb_string *usb_strings; unsigned int deactivations; int delayed_status; spinlock_t lock; struct android_uevent_opts android_opts; unsigned int setup_pending: 1; unsigned int os_desc_pending: 1; }; struct usb_composite_driver { const char *name; const struct usb_device_descriptor *dev; struct usb_gadget_strings **strings; enum usb_device_speed max_speed; unsigned int needs_serial: 1; int (*bind)(struct usb_composite_dev *); int (*unbind)(struct usb_composite_dev *); void (*disconnect)(struct usb_composite_dev *); void (*suspend)(struct usb_composite_dev *); void (*resume)(struct usb_composite_dev *); struct usb_gadget_driver gadget_driver; }; struct usb_os_desc; struct usb_os_desc_table { int if_id; struct usb_os_desc *os_desc; }; struct usb_os_desc { char *ext_compat_id; struct list_head ext_prop; int ext_prop_len; int ext_prop_count; struct mutex *opts_mutex; struct config_group group; struct module *owner; }; struct usb_function_driver; struct usb_function_instance { struct config_group group; struct list_head cfs_list; struct usb_function_driver *fd; int (*set_inst_name)(struct usb_function_instance *, const char *); void (*free_func_inst)(struct usb_function_instance *); }; struct usb_function_driver { const char *name; struct module *mod; struct list_head list; struct usb_function_instance * (*alloc_inst)(); struct usb_function * (*alloc_func)(struct usb_function_instance *); }; enum { USB_GADGET_MANUFACTURER_IDX = 0, USB_GADGET_PRODUCT_IDX = 1, USB_GADGET_SERIAL_IDX = 2, USB_GADGET_FIRST_AVAIL_IDX = 3, }; struct webusb_url_descriptor { u8 bLength; u8 bDescriptorType; u8 bScheme; u8 URL[252]; }; struct usb_gadget_string_container { struct list_head list; u8 *stash[0]; }; struct usb_webusb_cap_data { __le16 bcdVersion; u8 bVendorCode; u8 iLandingPage; }; struct usb_os_desc_ext_prop { struct list_head entry; u8 type; int name_len; char *name; int data_len; char *data; struct config_item item; }; struct usb_os_string { __u8 bLength; __u8 bDescriptorType; __u8 qwSignature[14]; __u8 bMS_VendorCode; __u8 bPad; }; struct usb_string_descriptor { __u8 bLength; __u8 bDescriptorType; union { __le16 legacy_padding; struct { struct {} __empty_wData; __le16 wData[0]; }; }; }; struct usb_plat_dev_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __u8 UUID[16]; __u8 CapabilityData[0]; }; struct usb_composite_overwrite { u16 idVendor; u16 idProduct; u16 bcdDevice; char *serial_number; char *manufacturer; char *product; }; struct gadget_info { struct config_group group; struct config_group functions_group; struct config_group configs_group; struct config_group strings_group; struct config_group os_desc_group; struct config_group webusb_group; struct mutex lock; struct usb_gadget_strings *gstrings[3]; struct list_head string_list; struct list_head available_func; struct usb_composite_driver composite; struct usb_composite_dev cdev; bool use_os_desc; char b_vendor_code; char qw_sign[14]; bool use_webusb; u16 bcd_webusb_version; u8 b_webusb_vendor_code; char landing_page[260]; spinlock_t spinlock; bool unbind; }; struct config_usb_cfg { struct config_group group; struct config_group strings_group; struct list_head string_list; struct usb_configuration c; struct list_head func_list; struct usb_gadget_strings *gstrings[3]; }; struct gadget_config_name { struct usb_gadget_strings stringtab_dev; struct usb_string strings; char *configuration; struct config_group group; struct list_head list; }; struct gadget_language { struct usb_gadget_strings stringtab_dev; struct usb_string strings[3]; char *manufacturer; char *product; char *serialnumber; struct config_group group; struct list_head list; struct list_head gadget_strings; unsigned int nstrings; }; struct gadget_string { struct config_item item; struct list_head list; char string[126]; struct usb_string usb_string; }; struct usb_udc { struct usb_gadget_driver *driver; struct usb_gadget *gadget; struct device dev; struct list_head list; bool vbus; bool started; bool allow_connect; struct work_struct vbus_work; struct mutex connect_lock; }; typedef void (*btf_trace_usb_gadget_frame_number)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_wakeup)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_set_remote_wakeup)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_set_selfpowered)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_clear_selfpowered)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_vbus_connect)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_vbus_draw)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_vbus_disconnect)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_connect)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_disconnect)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_deactivate)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_gadget_activate)(void *, struct usb_gadget *, int); typedef void (*btf_trace_usb_ep_set_maxpacket_limit)(void *, struct usb_ep *, int); typedef void (*btf_trace_usb_ep_enable)(void *, struct usb_ep *, int); typedef void (*btf_trace_usb_ep_disable)(void *, struct usb_ep *, int); typedef void (*btf_trace_usb_ep_set_halt)(void *, struct usb_ep *, int); typedef void (*btf_trace_usb_ep_clear_halt)(void *, struct usb_ep *, int); typedef void (*btf_trace_usb_ep_set_wedge)(void *, struct usb_ep *, int); typedef void (*btf_trace_usb_ep_fifo_status)(void *, struct usb_ep *, int); typedef void (*btf_trace_usb_ep_fifo_flush)(void *, struct usb_ep *, int); typedef void (*btf_trace_usb_ep_alloc_request)(void *, struct usb_ep *, struct usb_request *, int); typedef void (*btf_trace_usb_ep_free_request)(void *, struct usb_ep *, struct usb_request *, int); typedef void (*btf_trace_usb_ep_queue)(void *, struct usb_ep *, struct usb_request *, int); typedef void (*btf_trace_usb_ep_dequeue)(void *, struct usb_ep *, struct usb_request *, int); typedef void (*btf_trace_usb_gadget_giveback_request)(void *, struct usb_ep *, struct usb_request *, int); struct trace_event_raw_udc_log_gadget { struct trace_entry ent; enum usb_device_speed speed; enum usb_device_speed max_speed; enum usb_device_state state; unsigned int mA; unsigned int sg_supported; unsigned int is_otg; unsigned int is_a_peripheral; unsigned int b_hnp_enable; unsigned int a_hnp_support; unsigned int hnp_polling_support; unsigned int host_request_flag; unsigned int quirk_ep_out_aligned_size; unsigned int quirk_altset_not_supp; unsigned int quirk_stall_not_supp; unsigned int quirk_zlp_not_supp; unsigned int is_selfpowered; unsigned int deactivated; unsigned int connected; int ret; char __data[0]; }; struct trace_event_raw_udc_log_ep { struct trace_entry ent; u32 __data_loc_name; unsigned int maxpacket; unsigned int maxpacket_limit; unsigned int max_streams; unsigned int mult; unsigned int maxburst; u8 address; bool claimed; bool enabled; int ret; char __data[0]; }; struct trace_event_raw_udc_log_req { struct trace_entry ent; u32 __data_loc_name; unsigned int length; unsigned int actual; unsigned int num_sgs; unsigned int num_mapped_sgs; unsigned int stream_id; unsigned int no_interrupt; unsigned int zero; unsigned int short_not_ok; int status; int ret; struct usb_request *req; char __data[0]; }; struct trace_event_data_offsets_udc_log_ep { u32 name; }; struct trace_event_data_offsets_udc_log_req { u32 name; }; struct trace_event_data_offsets_udc_log_gadget {}; struct f_serial_opts { struct usb_function_instance func_inst; u8 port_num; }; struct usb_cdc_line_coding { __le32 dwDTERate; __u8 bCharFormat; __u8 bParityType; __u8 bDataBits; } __attribute__((packed)); struct gs_port; struct gserial { struct usb_function func; struct gs_port *ioport; struct usb_ep *in; struct usb_ep *out; struct usb_cdc_line_coding port_line_coding; void (*connect)(struct gserial *); void (*disconnect)(struct gserial *); int (*send_break)(struct gserial *, int); }; struct f_acm { struct gserial port; u8 ctrl_id; u8 data_id; u8 port_num; u8 pending; spinlock_t lock; struct usb_ep *notify; struct usb_request *notify_req; struct usb_cdc_line_coding port_line_coding; u16 port_handshake_bits; u16 serial_state; }; struct usb_cdc_notification { __u8 bmRequestType; __u8 bNotificationType; __le16 wValue; __le16 wIndex; __le16 wLength; }; struct portmaster { struct mutex lock; struct gs_port *port; }; struct kfifo { union { struct __kfifo kfifo; unsigned char *type; const unsigned char *const_type; char (*rectype)[0]; void *ptr; const void *ptr_const; }; unsigned char buf[0]; }; struct gs_port { struct tty_port port; spinlock_t port_lock; struct gserial *port_usb; u8 port_num; struct list_head read_pool; int read_started; int read_allocated; struct list_head read_queue; unsigned int n_read; struct delayed_work push; struct list_head write_pool; int write_started; int write_allocated; struct kfifo port_write_buf; wait_queue_head_t drain_wait; bool write_busy; wait_queue_head_t close_wait; bool suspended; bool start_delayed; struct usb_cdc_line_coding port_line_coding; }; struct f_gser { struct gserial port; u8 data_id; u8 port_num; }; struct gether; struct eth_dev { spinlock_t lock; struct gether *port_usb; struct net_device *net; struct usb_gadget *gadget; spinlock_t req_lock; struct list_head tx_reqs; struct list_head rx_reqs; atomic_t tx_qlen; struct sk_buff_head rx_frames; unsigned int qmult; unsigned int header_len; struct sk_buff * (*wrap)(struct gether *, struct sk_buff *); int (*unwrap)(struct gether *, struct sk_buff *, struct sk_buff_head *); struct work_struct work; unsigned long todo; bool zlp; bool no_skb_reserve; bool ifname_set; u8 host_mac[6]; u8 dev_mac[6]; }; struct gether { struct usb_function func; struct eth_dev *ioport; struct usb_ep *in_ep; struct usb_ep *out_ep; bool is_zlp_ok; u16 cdc_filter; u32 header_len; bool is_fixed; u32 fixed_out_len; u32 fixed_in_len; bool supports_multi_frame; struct sk_buff * (*wrap)(struct gether *, struct sk_buff *); int (*unwrap)(struct gether *, struct sk_buff *, struct sk_buff_head *); void (*open)(struct gether *); void (*close)(struct gether *); bool is_suspend; }; struct ndp_parser_opts { u32 nth_sign; u32 ndp_sign; unsigned int nth_size; unsigned int ndp_size; unsigned int dpe_size; unsigned int ndplen_align; unsigned int dgram_item_len; unsigned int block_length; unsigned int ndp_index; unsigned int reserved1; unsigned int reserved2; unsigned int next_ndp_index; }; struct usb_cdc_ncm_ntb_parameters { __le16 wLength; __le16 bmNtbFormatsSupported; __le32 dwNtbInMaxSize; __le16 wNdpInDivisor; __le16 wNdpInPayloadRemainder; __le16 wNdpInAlignment; __le16 wPadding1; __le32 dwNtbOutMaxSize; __le16 wNdpOutDivisor; __le16 wNdpOutPayloadRemainder; __le16 wNdpOutAlignment; __le16 wNtbOutMaxDatagrams; }; enum ncm_notify_state { NCM_NOTIFY_NONE = 0, NCM_NOTIFY_CONNECT = 1, NCM_NOTIFY_SPEED = 2, }; struct f_ncm_opts { struct usb_function_instance func_inst; struct net_device *net; bool bound; struct config_group *ncm_interf_group; struct usb_os_desc ncm_os_desc; char ncm_ext_compat_id[16]; struct mutex lock; int refcnt; }; struct f_ncm { struct gether port; u8 ctrl_id; u8 data_id; char ethaddr[14]; struct usb_ep *notify; struct usb_request *notify_req; u8 notify_state; atomic_t notify_count; bool is_open; const struct ndp_parser_opts *parser_opts; bool is_crc; u32 ndp_sign; spinlock_t lock; struct net_device *netdev; struct sk_buff *skb_tx_data; struct sk_buff *skb_tx_ndp; u16 ndp_dgram_count; struct hrtimer task_timer; }; enum ecm_notify_state { ECM_NOTIFY_NONE = 0, ECM_NOTIFY_CONNECT = 1, ECM_NOTIFY_SPEED = 2, }; struct f_ecm_opts { struct usb_function_instance func_inst; struct net_device *net; bool bound; struct mutex lock; int refcnt; }; struct f_ecm { struct gether port; u8 ctrl_id; u8 data_id; char ethaddr[14]; struct usb_ep *notify; struct usb_request *notify_req; u8 notify_state; atomic_t notify_count; bool is_open; }; struct f_eem_opts { struct usb_function_instance func_inst; struct net_device *net; bool bound; struct mutex lock; int refcnt; }; struct f_eem { struct gether port; u8 ctrl_id; }; struct in_context { struct sk_buff *skb; struct usb_ep *ep; }; enum fsg_buffer_state { BUF_STATE_SENDING = -2, BUF_STATE_RECEIVING = -1, BUF_STATE_EMPTY = 0, BUF_STATE_FULL = 1, }; enum fsg_state { FSG_STATE_NORMAL = 0, FSG_STATE_ABORT_BULK_OUT = 1, FSG_STATE_PROTOCOL_RESET = 2, FSG_STATE_CONFIG_CHANGE = 3, FSG_STATE_EXIT = 4, FSG_STATE_TERMINATED = 5, }; enum data_direction { DATA_DIR_UNKNOWN = 0, DATA_DIR_FROM_HOST = 1, DATA_DIR_TO_HOST = 2, DATA_DIR_NONE = 3, }; enum { FSG_STRING_INTERFACE = 0, }; struct fsg_lun { struct file *filp; loff_t file_length; loff_t num_sectors; unsigned int initially_ro: 1; unsigned int ro: 1; unsigned int removable: 1; unsigned int cdrom: 1; unsigned int prevent_medium_removal: 1; unsigned int registered: 1; unsigned int info_valid: 1; unsigned int nofua: 1; u32 sense_data; u32 sense_data_info; u32 unit_attention_data; unsigned int blkbits; unsigned int blksize; struct device dev; const char *name; const char **name_pfx; char inquiry_string[29]; }; struct fsg_lun_opts { struct config_group group; struct fsg_lun *lun; int lun_id; }; struct fsg_common; struct fsg_opts { struct fsg_common *common; struct usb_function_instance func_inst; struct fsg_lun_opts lun0; struct config_group *default_groups[2]; bool no_configfs; struct mutex lock; int refcnt; }; struct fsg_dev; struct fsg_buffhd; struct fsg_common { struct usb_gadget *gadget; struct usb_composite_dev *cdev; struct fsg_dev *fsg; wait_queue_head_t io_wait; wait_queue_head_t fsg_wait; struct rw_semaphore filesem; spinlock_t lock; struct usb_ep *ep0; struct usb_request *ep0req; unsigned int ep0_req_tag; struct fsg_buffhd *next_buffhd_to_fill; struct fsg_buffhd *next_buffhd_to_drain; struct fsg_buffhd *buffhds; unsigned int fsg_num_buffers; int cmnd_size; u8 cmnd[16]; unsigned int lun; struct fsg_lun *luns[16]; struct fsg_lun *curlun; unsigned int bulk_out_maxpacket; enum fsg_state state; unsigned int exception_req_tag; void *exception_arg; enum data_direction data_dir; u32 data_size; u32 data_size_from_cmnd; u32 tag; u32 residue; u32 usb_amount_left; unsigned int can_stall: 1; unsigned int free_storage_on_release: 1; unsigned int phase_error: 1; unsigned int short_packet_received: 1; unsigned int bad_lun_okay: 1; unsigned int running: 1; unsigned int sysfs: 1; struct completion thread_notifier; struct task_struct *thread_task; void *private_data; char inquiry_string[29]; }; struct fsg_dev { struct usb_function function; struct usb_gadget *gadget; struct fsg_common *common; u16 interface_number; unsigned int bulk_in_enabled: 1; unsigned int bulk_out_enabled: 1; unsigned long atomic_bitflags; struct usb_ep *bulk_in; struct usb_ep *bulk_out; }; struct fsg_buffhd { void *buf; enum fsg_buffer_state state; struct fsg_buffhd *next; unsigned int bulk_out_intended_length; struct usb_request *inreq; struct usb_request *outreq; }; struct fsg_lun_config { const char *filename; char ro; char removable; char cdrom; char nofua; char inquiry_string[29]; }; struct fsg_operations; struct fsg_config { unsigned int nluns; struct fsg_lun_config luns[16]; const struct fsg_operations *ops; void *private_data; const char *vendor_name; const char *product_name; char can_stall; unsigned int fsg_num_buffers; }; struct fsg_module_parameters { char *file[16]; bool ro[16]; bool removable[16]; bool cdrom[16]; bool nofua[16]; unsigned int file_count; unsigned int ro_count; unsigned int removable_count; unsigned int cdrom_count; unsigned int nofua_count; unsigned int luns; bool stall; }; enum ffs_state { FFS_READ_DESCRIPTORS = 0, FFS_READ_STRINGS = 1, FFS_ACTIVE = 2, FFS_DEACTIVATED = 3, FFS_CLOSING = 4, }; enum ffs_setup_state { FFS_NO_SETUP = 0, FFS_SETUP_PENDING = 1, FFS_SETUP_CANCELLED = 2, }; enum { Opt_no_disconnect = 0, Opt_rmode = 1, Opt_fmode = 2, Opt_mode___6 = 3, Opt_uid___6 = 4, Opt_gid___7 = 5, }; enum usb_functionfs_event_type { FUNCTIONFS_BIND = 0, FUNCTIONFS_UNBIND = 1, FUNCTIONFS_ENABLE = 2, FUNCTIONFS_DISABLE = 3, FUNCTIONFS_SETUP = 4, FUNCTIONFS_SUSPEND = 5, FUNCTIONFS_RESUME = 6, }; enum { FUNCTIONFS_DESCRIPTORS_MAGIC = 1, FUNCTIONFS_STRINGS_MAGIC = 2, FUNCTIONFS_DESCRIPTORS_MAGIC_V2 = 3, }; enum functionfs_flags { FUNCTIONFS_HAS_FS_DESC = 1, FUNCTIONFS_HAS_HS_DESC = 2, FUNCTIONFS_HAS_SS_DESC = 4, FUNCTIONFS_HAS_MS_OS_DESC = 8, FUNCTIONFS_VIRTUAL_ADDR = 16, FUNCTIONFS_EVENTFD = 32, FUNCTIONFS_ALL_CTRL_RECIP = 64, FUNCTIONFS_CONFIG0_SETUP = 128, }; enum ffs_entity_type { FFS_DESCRIPTOR = 0, FFS_INTERFACE = 1, FFS_STRING = 2, FFS_ENDPOINT = 3, }; enum ffs_os_desc_type { FFS_OS_DESC = 0, FFS_OS_DESC_EXT_COMPAT = 1, FFS_OS_DESC_EXT_PROP = 2, }; struct ffs_data; struct f_fs_opts; struct ffs_dev { struct ffs_data *ffs_data; struct f_fs_opts *opts; struct list_head entry; char name[41]; bool mounted; bool desc_ready; bool single; int (*ffs_ready_callback)(struct ffs_data *); void (*ffs_closed_callback)(struct ffs_data *); void * (*ffs_acquire_dev_callback)(struct ffs_dev *); void (*ffs_release_dev_callback)(struct ffs_dev *); }; struct ffs_file_perms { umode_t mode; kuid_t uid; kgid_t gid; }; struct ffs_function; struct ffs_epfile; struct ffs_data { struct usb_gadget *gadget; struct mutex mutex; spinlock_t eps_lock; struct usb_request *ep0req; struct completion ep0req_completion; refcount_t ref; atomic_t opened; enum ffs_state state; enum ffs_setup_state setup_state; struct { u8 types[4]; unsigned short count; unsigned short can_stall; struct usb_ctrlrequest setup; wait_queue_head_t waitq; } ev; unsigned long flags; wait_queue_head_t wait; struct ffs_function *func; const char *dev_name; void *private_data; const void *raw_descs_data; const void *raw_descs; unsigned int raw_descs_length; unsigned int fs_descs_count; unsigned int hs_descs_count; unsigned int ss_descs_count; unsigned int ms_os_descs_count; unsigned int ms_os_descs_ext_prop_count; unsigned int ms_os_descs_ext_prop_name_len; unsigned int ms_os_descs_ext_prop_data_len; void *ms_os_descs_ext_prop_avail; void *ms_os_descs_ext_prop_name_avail; void *ms_os_descs_ext_prop_data_avail; unsigned int user_flags; u8 eps_addrmap[31]; unsigned short strings_count; unsigned short interfaces_count; unsigned short eps_count; unsigned short _pad1; const void *raw_strings; struct usb_gadget_strings **stringtabs; struct super_block *sb; struct ffs_file_perms file_perms; struct eventfd_ctx *ffs_eventfd; struct workqueue_struct *io_completion_wq; bool no_disconnect; struct work_struct reset_work; struct ffs_epfile *epfiles; }; struct ffs_ep; struct ffs_function { struct usb_configuration *conf; struct usb_gadget *gadget; struct ffs_data *ffs; struct ffs_ep *eps; u8 eps_revmap[16]; short *interfaces_nums; struct usb_function function; }; struct ffs_ep { struct usb_ep *ep; struct usb_request *req; struct usb_endpoint_descriptor *descs[3]; u8 num; }; struct ffs_buffer; struct ffs_epfile { struct mutex mutex; struct ffs_data *ffs; struct ffs_ep *ep; struct dentry *dentry; struct ffs_buffer *read_buffer; char name[5]; unsigned char in; unsigned char isoc; unsigned char _pad; }; struct ffs_buffer { size_t length; char *data; char storage[0]; }; struct f_fs_opts { struct usb_function_instance func_inst; struct ffs_dev *dev; unsigned int refcnt; bool no_configfs; }; struct ffs_io_data { bool aio; bool read; struct kiocb *kiocb; struct iov_iter data; const void *to_free; char *buf; struct mm_struct *mm; struct work_struct work; struct usb_ep *ep; struct usb_request *req; struct sg_table sgt; bool use_sg; struct ffs_data *ffs; int status; struct completion done; }; typedef int (*ffs_entity_callback)(enum ffs_entity_type, u8 *, struct usb_descriptor_header *, void *); struct ffs_desc_helper { struct ffs_data *ffs; unsigned int interfaces_count; unsigned int eps_count; }; struct usb_os_desc_header { __u8 interface; __le32 dwLength; __le16 bcdVersion; __le16 wIndex; union { struct { __u8 bCount; __u8 Reserved; }; __le16 wCount; }; } __attribute__((packed)); typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type, struct usb_os_desc_header *, void *, unsigned int, void *); struct usb_ext_compat_desc { __u8 bFirstInterfaceNumber; __u8 Reserved1; __u8 CompatibleID[8]; __u8 SubCompatibleID[8]; __u8 Reserved2[6]; }; struct usb_ext_prop_desc { __le32 dwSize; __le32 dwPropertyDataType; __le16 wPropertyNameLength; } __attribute__((packed)); struct ffs_sb_fill_data { struct ffs_file_perms perms; umode_t root_mode; const char *dev_name; bool no_disconnect; struct ffs_data *ffs_data; }; struct usb_functionfs_event { union { struct usb_ctrlrequest setup; } u; __u8 type; __u8 _pad[3]; }; typedef unsigned long snd_pcm_uframes_t; struct snd_pcm_substream; struct snd_pcm_hw_params; struct snd_pcm_audio_tstamp_config; struct snd_pcm_audio_tstamp_report; struct snd_pcm_ops { int (*open)(struct snd_pcm_substream *); int (*close)(struct snd_pcm_substream *); int (*ioctl)(struct snd_pcm_substream *, unsigned int, void *); int (*hw_params)(struct snd_pcm_substream *, struct snd_pcm_hw_params *); int (*hw_free)(struct snd_pcm_substream *); int (*prepare)(struct snd_pcm_substream *); int (*trigger)(struct snd_pcm_substream *, int); int (*sync_stop)(struct snd_pcm_substream *); snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *); int (*get_time_info)(struct snd_pcm_substream *, struct timespec64 *, struct timespec64 *, struct snd_pcm_audio_tstamp_config *, struct snd_pcm_audio_tstamp_report *); int (*fill_silence)(struct snd_pcm_substream *, int, unsigned long, unsigned long); int (*copy)(struct snd_pcm_substream *, int, unsigned long, struct iov_iter *, unsigned long); struct page * (*page)(struct snd_pcm_substream *, unsigned long); int (*mmap)(struct snd_pcm_substream *, struct vm_area_struct *); int (*ack)(struct snd_pcm_substream *); u64 android_kabi_reserved1; }; struct snd_dma_device { int type; enum dma_data_direction dir; bool need_sync; struct device *dev; }; struct snd_dma_buffer { struct snd_dma_device dev; unsigned char *area; dma_addr_t addr; size_t bytes; void *private_data; }; struct snd_pcm_group { spinlock_t lock; struct mutex mutex; struct list_head substreams; refcount_t refs; }; struct snd_pcm; struct snd_pcm_str; struct snd_pcm_runtime; struct snd_timer; struct snd_pcm_substream { struct snd_pcm *pcm; struct snd_pcm_str *pstr; void *private_data; int number; char name[32]; int stream; struct pm_qos_request latency_pm_qos_req; size_t buffer_bytes_max; struct snd_dma_buffer dma_buffer; size_t dma_max; const struct snd_pcm_ops *ops; struct snd_pcm_runtime *runtime; struct snd_timer *timer; unsigned int timer_running: 1; long wait_time; struct snd_pcm_substream *next; struct list_head link_list; struct snd_pcm_group self_group; struct snd_pcm_group *group; int ref_count; atomic_t mmap_count; unsigned int f_flags; void (*pcm_release)(struct snd_pcm_substream *); struct pid *pid; unsigned int hw_opened: 1; unsigned int managed_buffer_alloc: 1; u64 android_kabi_reserved1; }; struct snd_kcontrol; struct snd_pcm_str { int stream; struct snd_pcm *pcm; unsigned int substream_count; unsigned int substream_opened; struct snd_pcm_substream *substream; struct snd_kcontrol *chmap_kctl; struct device *dev; u64 android_kabi_reserved1; }; struct snd_card; struct snd_pcm { struct snd_card *card; struct list_head list; int device; unsigned int info_flags; unsigned short dev_class; unsigned short dev_subclass; char id[64]; char name[80]; struct snd_pcm_str streams[2]; struct mutex open_mutex; wait_queue_head_t open_wait; void *private_data; void (*private_free)(struct snd_pcm *); bool internal; bool nonatomic; bool no_device_suspend; u64 android_kabi_reserved1; }; struct snd_shutdown_f_ops; struct snd_info_entry; struct snd_card { int number; char id[16]; char driver[16]; char shortname[32]; char longname[80]; char irq_descr[32]; char mixername[80]; char components[128]; struct module *module; void *private_data; void (*private_free)(struct snd_card *); struct list_head devices; struct device *ctl_dev; unsigned int last_numid; struct rw_semaphore controls_rwsem; rwlock_t ctl_files_rwlock; int controls_count; size_t user_ctl_alloc_size; struct list_head controls; struct list_head ctl_files; struct xarray ctl_numids; struct xarray ctl_hash; bool ctl_hash_collision; struct snd_info_entry *proc_root; struct proc_dir_entry *proc_root_link; struct list_head files_list; struct snd_shutdown_f_ops *s_f_ops; spinlock_t files_lock; int shutdown; struct completion *release_completion; struct device *dev; struct device card_dev; const struct attribute_group *dev_groups[4]; bool registered; bool managed; bool releasing; int sync_irq; wait_queue_head_t remove_sleep; size_t total_pcm_alloc_bytes; struct mutex memory_mutex; unsigned int power_state; atomic_t power_ref; wait_queue_head_t power_sleep; wait_queue_head_t power_ref_sleep; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; typedef int snd_kcontrol_tlv_rw_t(struct snd_kcontrol *, int, unsigned int, unsigned int __attribute__((btf_type_tag("user"))) *); typedef int snd_ctl_elem_iface_t; struct snd_ctl_elem_id { unsigned int numid; snd_ctl_elem_iface_t iface; unsigned int device; unsigned int subdevice; unsigned char name[44]; unsigned int index; }; struct snd_ctl_elem_info; typedef int snd_kcontrol_info_t(struct snd_kcontrol *, struct snd_ctl_elem_info *); struct snd_ctl_elem_value; typedef int snd_kcontrol_get_t(struct snd_kcontrol *, struct snd_ctl_elem_value *); typedef int snd_kcontrol_put_t(struct snd_kcontrol *, struct snd_ctl_elem_value *); struct snd_ctl_file; struct snd_kcontrol_volatile { struct snd_ctl_file *owner; unsigned int access; }; struct snd_kcontrol { struct list_head list; struct snd_ctl_elem_id id; unsigned int count; snd_kcontrol_info_t *info; snd_kcontrol_get_t *get; snd_kcontrol_put_t *put; union { snd_kcontrol_tlv_rw_t *c; const unsigned int *p; } tlv; unsigned long private_value; void *private_data; void (*private_free)(struct snd_kcontrol *); struct snd_kcontrol_volatile vd[0]; }; typedef int snd_ctl_elem_type_t; struct snd_ctl_elem_info { struct snd_ctl_elem_id id; snd_ctl_elem_type_t type; unsigned int access; unsigned int count; __kernel_pid_t owner; union { struct { long min; long max; long step; } integer; struct { long long min; long long max; long long step; } integer64; struct { unsigned int items; unsigned int item; char name[64]; __u64 names_ptr; unsigned int names_length; } enumerated; unsigned char reserved[128]; } value; unsigned char reserved[64]; }; struct snd_aes_iec958 { unsigned char status[24]; unsigned char subcode[147]; unsigned char pad; unsigned char dig_subframe[4]; }; struct snd_ctl_elem_value { struct snd_ctl_elem_id id; unsigned int indirect: 1; union { union { long value[128]; long *value_ptr; } integer; union { long long value[64]; long long *value_ptr; } integer64; union { unsigned int item[128]; unsigned int *item_ptr; } enumerated; union { unsigned char data[512]; unsigned char *data_ptr; } bytes; struct snd_aes_iec958 iec958; } value; unsigned char reserved[128]; }; struct snd_fasync; struct snd_ctl_file { struct list_head list; struct snd_card *card; struct pid *pid; int preferred_subdevice[2]; wait_queue_head_t change_sleep; spinlock_t read_lock; struct snd_fasync *fasync; int subscribed; struct list_head events; }; typedef int snd_pcm_state_t; typedef long snd_pcm_sframes_t; typedef int snd_pcm_access_t; typedef int snd_pcm_format_t; typedef int snd_pcm_subformat_t; union snd_pcm_sync_id { unsigned char id[16]; unsigned short id16[8]; unsigned int id32[4]; }; struct snd_pcm_hardware { unsigned int info; u64 formats; unsigned int rates; unsigned int rate_min; unsigned int rate_max; unsigned int channels_min; unsigned int channels_max; size_t buffer_bytes_max; size_t period_bytes_min; size_t period_bytes_max; unsigned int periods_min; unsigned int periods_max; size_t fifo_size; }; struct snd_mask { __u32 bits[8]; }; struct snd_interval { unsigned int min; unsigned int max; unsigned int openmin: 1; unsigned int openmax: 1; unsigned int integer: 1; unsigned int empty: 1; }; struct snd_pcm_hw_rule; struct snd_pcm_hw_constraints { struct snd_mask masks[3]; struct snd_interval intervals[12]; unsigned int rules_num; unsigned int rules_all; struct snd_pcm_hw_rule *rules; }; struct snd_pcm_audio_tstamp_config { u32 type_requested: 4; u32 report_delay: 1; }; struct snd_pcm_audio_tstamp_report { u32 valid: 1; u32 actual_type: 4; u32 accuracy_report: 1; u32 accuracy; }; struct snd_pcm_mmap_status; struct snd_pcm_mmap_control; struct snd_pcm_runtime { snd_pcm_state_t state; snd_pcm_state_t suspended_state; struct snd_pcm_substream *trigger_master; struct timespec64 trigger_tstamp; bool trigger_tstamp_latched; int overrange; snd_pcm_uframes_t avail_max; snd_pcm_uframes_t hw_ptr_base; snd_pcm_uframes_t hw_ptr_interrupt; unsigned long hw_ptr_jiffies; unsigned long hw_ptr_buffer_jiffies; snd_pcm_sframes_t delay; u64 hw_ptr_wrap; snd_pcm_access_t access; snd_pcm_format_t format; snd_pcm_subformat_t subformat; unsigned int rate; unsigned int channels; snd_pcm_uframes_t period_size; unsigned int periods; snd_pcm_uframes_t buffer_size; snd_pcm_uframes_t min_align; size_t byte_align; unsigned int frame_bits; unsigned int sample_bits; unsigned int info; unsigned int rate_num; unsigned int rate_den; unsigned int no_period_wakeup: 1; int tstamp_mode; unsigned int period_step; snd_pcm_uframes_t start_threshold; snd_pcm_uframes_t stop_threshold; snd_pcm_uframes_t silence_threshold; snd_pcm_uframes_t silence_size; snd_pcm_uframes_t boundary; snd_pcm_uframes_t silence_start; snd_pcm_uframes_t silence_filled; union snd_pcm_sync_id sync; struct snd_pcm_mmap_status *status; struct snd_pcm_mmap_control *control; snd_pcm_uframes_t twake; wait_queue_head_t sleep; wait_queue_head_t tsleep; struct snd_fasync *fasync; bool stop_operating; struct mutex buffer_mutex; atomic_t buffer_accessing; void *private_data; void (*private_free)(struct snd_pcm_runtime *); struct snd_pcm_hardware hw; struct snd_pcm_hw_constraints hw_constraints; unsigned int timer_resolution; int tstamp_type; unsigned char *dma_area; dma_addr_t dma_addr; size_t dma_bytes; struct snd_dma_buffer *dma_buffer_p; unsigned int buffer_changed: 1; struct snd_pcm_audio_tstamp_config audio_tstamp_config; struct snd_pcm_audio_tstamp_report audio_tstamp_report; struct timespec64 driver_tstamp; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; typedef char __pad_before_uframe[0]; typedef char __pad_after_uframe[0]; struct snd_pcm_mmap_status { snd_pcm_state_t state; __u32 pad1; __pad_before_uframe __pad1; snd_pcm_uframes_t hw_ptr; __pad_after_uframe __pad2; struct __kernel_timespec tstamp; snd_pcm_state_t suspended_state; __u32 pad3; struct __kernel_timespec audio_tstamp; }; struct snd_pcm_mmap_control { __pad_before_uframe __pad1; snd_pcm_uframes_t appl_ptr; __pad_before_uframe __pad2; __pad_before_uframe __pad3; snd_pcm_uframes_t avail_min; __pad_after_uframe __pad4; }; typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *, struct snd_pcm_hw_rule *); struct snd_pcm_hw_rule { unsigned int cond; int var; int deps[5]; snd_pcm_hw_rule_func_t func; void *private; }; struct snd_pcm_hw_params { unsigned int flags; struct snd_mask masks[3]; struct snd_mask mres[5]; struct snd_interval intervals[12]; struct snd_interval ires[9]; unsigned int rmask; unsigned int cmask; unsigned int info; unsigned int msbits; unsigned int rate_num; unsigned int rate_den; snd_pcm_uframes_t fifo_size; unsigned char reserved[64]; }; struct snd_kcontrol_new { snd_ctl_elem_iface_t iface; unsigned int device; unsigned int subdevice; const char *name; unsigned int index; unsigned int access; unsigned int count; snd_kcontrol_info_t *info; snd_kcontrol_get_t *get; snd_kcontrol_put_t *put; union { snd_kcontrol_tlv_rw_t *c; const unsigned int *p; } tlv; unsigned long private_value; }; enum { SNDRV_PCM_STREAM_PLAYBACK = 0, SNDRV_PCM_STREAM_CAPTURE = 1, SNDRV_PCM_STREAM_LAST = 1, }; enum { UAC_FBACK_CTRL = 0, UAC_P_PITCH_CTRL = 1, UAC_MUTE_CTRL = 2, UAC_VOLUME_CTRL = 3, UAC_RATE_CTRL = 4, }; struct snd_uac_chip; struct uac_rtd_params { struct snd_uac_chip *uac; bool ep_enabled; struct snd_pcm_substream *ss; ssize_t hw_ptr; void *rbuf; unsigned int pitch; unsigned int max_psize; struct usb_request **reqs; struct usb_request *req_fback; bool fb_ep_enabled; int fu_id; struct snd_kcontrol *snd_kctl_volume; struct snd_kcontrol *snd_kctl_mute; s16 volume_min; s16 volume_max; s16 volume_res; s16 volume; int mute; struct snd_kcontrol *snd_kctl_rate; int srate; int active; spinlock_t lock; }; struct g_audio; struct snd_uac_chip { struct g_audio *audio_dev; struct uac_rtd_params p_prm; struct uac_rtd_params c_prm; struct snd_card *card; struct snd_pcm *pcm; unsigned long long p_residue_mil; unsigned int p_interval; unsigned int p_framesize; }; struct uac_fu_params { int id; bool mute_present; bool volume_present; s16 volume_min; s16 volume_max; s16 volume_res; }; struct uac_params { int p_chmask; int p_srates[10]; int p_ssize; struct uac_fu_params p_fu; int c_chmask; int c_srates[10]; int c_ssize; struct uac_fu_params c_fu; int req_number; int fb_max; }; struct g_audio { struct usb_function func; struct usb_gadget *gadget; struct usb_ep *in_ep; struct usb_ep *out_ep; struct usb_ep *in_ep_fback; unsigned int in_ep_maxpsize; unsigned int out_ep_maxpsize; int (*notify)(struct g_audio *, int, int); struct snd_uac_chip *uac; struct uac_params params; }; typedef int snd_pcm_hw_param_t; struct uac2_feature_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bSourceID; __u8 bmaControls[0]; }; struct uac_clock_source_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bmAttributes; __u8 bmControls; __u8 bAssocTerminal; __u8 iClockSource; }; struct uac2_input_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bCSourceID; __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; __le16 bmControls; __u8 iTerminal; } __attribute__((packed)); struct uac2_output_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bSourceID; __u8 bCSourceID; __le16 bmControls; __u8 iTerminal; } __attribute__((packed)); struct uac2_as_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalLink; __u8 bmControls; __u8 bFormatType; __le32 bmFormats; __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; } __attribute__((packed)); struct uac2_format_type_i_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __u8 bSubslotSize; __u8 bBitResolution; }; struct uac2_ac_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __le16 bcdADC; __u8 bCategory; __le16 wTotalLength; __u8 bmControls; } __attribute__((packed)); struct uac2_iso_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bmAttributes; __u8 bmControls; __u8 bLockDelayUnits; __le16 wLockDelay; }; enum { STR_ASSOC = 0, STR_IF_CTRL = 1, STR_CLKSRC_IN = 2, STR_CLKSRC_OUT = 3, STR_USB_IT = 4, STR_IO_IT = 5, STR_USB_OT = 6, STR_IO_OT = 7, STR_FU_IN = 8, STR_FU_OUT = 9, STR_AS_OUT_ALT0 = 10, STR_AS_OUT_ALT1 = 11, STR_AS_IN_ALT0 = 12, STR_AS_IN_ALT1 = 13, }; struct f_uac2_opts { struct usb_function_instance func_inst; int p_chmask; int p_srates[10]; int p_ssize; u8 p_hs_bint; int c_chmask; int c_srates[10]; int c_ssize; int c_sync; u8 c_hs_bint; bool p_mute_present; bool p_volume_present; s16 p_volume_min; s16 p_volume_max; s16 p_volume_res; bool c_mute_present; bool c_volume_present; s16 c_volume_min; s16 c_volume_max; s16 c_volume_res; int req_number; int fb_max; bool bound; char function_name[32]; struct mutex lock; int refcnt; }; struct f_uac2 { struct g_audio g_audio; u8 ac_intf; u8 as_in_intf; u8 as_out_intf; u8 ac_alt; u8 as_in_alt; u8 as_out_alt; struct usb_ctrlrequest setup_cr; struct usb_ep *int_ep; atomic_t int_count; int clock_id; }; struct cntrl_cur_lay2 { __le16 wCUR; }; struct cntrl_cur_lay3 { __le32 dCUR; }; struct cntrl_range_lay2 { __le16 wNumSubRanges; __le16 wMIN; __le16 wMAX; __le16 wRES; }; struct cntrl_subrange_lay3 { __le32 dMIN; __le32 dMAX; __le32 dRES; }; struct cntrl_ranges_lay3_srates { __le16 wNumSubRanges; struct cntrl_subrange_lay3 r[10]; } __attribute__((packed)); struct uac2_interrupt_data_msg { __u8 bInfo; __u8 bAttribute; __le16 wValue; __le16 wIndex; }; struct uvc_control_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __le16 wMaxTransferSize; } __attribute__((packed)); enum media_pad_signal_type { PAD_SIGNAL_DEFAULT = 0, PAD_SIGNAL_ANALOG = 1, PAD_SIGNAL_DV = 2, PAD_SIGNAL_AUDIO = 3, }; enum media_request_state { MEDIA_REQUEST_STATE_IDLE = 0, MEDIA_REQUEST_STATE_VALIDATING = 1, MEDIA_REQUEST_STATE_QUEUED = 2, MEDIA_REQUEST_STATE_COMPLETE = 3, MEDIA_REQUEST_STATE_CLEANING = 4, MEDIA_REQUEST_STATE_UPDATING = 5, NR_OF_MEDIA_REQUEST_STATE = 6, }; enum media_entity_type { MEDIA_ENTITY_TYPE_BASE = 0, MEDIA_ENTITY_TYPE_VIDEO_DEVICE = 1, MEDIA_ENTITY_TYPE_V4L2_SUBDEV = 2, }; enum v4l2_priority { V4L2_PRIORITY_UNSET = 0, V4L2_PRIORITY_BACKGROUND = 1, V4L2_PRIORITY_INTERACTIVE = 2, V4L2_PRIORITY_RECORD = 3, V4L2_PRIORITY_DEFAULT = 2, }; enum v4l2_subdev_ir_mode { V4L2_SUBDEV_IR_MODE_PULSE_WIDTH = 0, }; enum v4l2_mbus_frame_desc_type { V4L2_MBUS_FRAME_DESC_TYPE_UNDEFINED = 0, V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL = 1, V4L2_MBUS_FRAME_DESC_TYPE_CSI2 = 2, }; enum v4l2_mbus_frame_desc_flags { V4L2_MBUS_FRAME_DESC_FL_LEN_MAX = 1, V4L2_MBUS_FRAME_DESC_FL_BLOB = 2, }; enum v4l2_mbus_type { V4L2_MBUS_UNKNOWN = 0, V4L2_MBUS_PARALLEL = 1, V4L2_MBUS_BT656 = 2, V4L2_MBUS_CSI1 = 3, V4L2_MBUS_CCP2 = 4, V4L2_MBUS_CSI2_DPHY = 5, V4L2_MBUS_CSI2_CPHY = 6, V4L2_MBUS_DPI = 7, V4L2_MBUS_INVALID = 8, }; enum v4l2_subdev_format_whence { V4L2_SUBDEV_FORMAT_TRY = 0, V4L2_SUBDEV_FORMAT_ACTIVE = 1, }; enum v4l2_async_match_type { V4L2_ASYNC_MATCH_TYPE_I2C = 0, V4L2_ASYNC_MATCH_TYPE_FWNODE = 1, }; enum vb2_buffer_state { VB2_BUF_STATE_DEQUEUED = 0, VB2_BUF_STATE_IN_REQUEST = 1, VB2_BUF_STATE_PREPARING = 2, VB2_BUF_STATE_QUEUED = 3, VB2_BUF_STATE_ACTIVE = 4, VB2_BUF_STATE_DONE = 5, VB2_BUF_STATE_ERROR = 6, }; enum vfl_devnode_type { VFL_TYPE_VIDEO = 0, VFL_TYPE_VBI = 1, VFL_TYPE_RADIO = 2, VFL_TYPE_SUBDEV = 3, VFL_TYPE_SDR = 4, VFL_TYPE_TOUCH = 5, VFL_TYPE_MAX = 6, }; enum vfl_devnode_direction { VFL_DIR_RX = 0, VFL_DIR_TX = 1, VFL_DIR_M2M = 2, }; enum uvc_state { UVC_STATE_DISCONNECTED = 0, UVC_STATE_CONNECTED = 1, UVC_STATE_STREAMING = 2, }; enum uvc_buffer_state { UVC_BUF_STATE_IDLE = 0, UVC_BUF_STATE_QUEUED = 1, UVC_BUF_STATE_ACTIVE = 2, UVC_BUF_STATE_DONE = 3, UVC_BUF_STATE_ERROR = 4, }; struct uvc_descriptor_header { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; }; struct uvc_camera_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 iTerminal; __le16 wObjectiveFocalLengthMin; __le16 wObjectiveFocalLengthMax; __le16 wOcularFocalLength; __u8 bControlSize; __u8 bmControls[3]; }; struct uvc_processing_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bUnitID; __u8 bSourceID; __le16 wMaxMultiplier; __u8 bControlSize; __u8 bmControls[2]; __u8 iProcessing; __u8 bmVideoStandards; } __attribute__((packed)); struct uvc_output_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bSourceID; __u8 iTerminal; } __attribute__((packed)); struct uvcg_streaming_header; struct f_uvc_opts { struct usb_function_instance func_inst; unsigned int streaming_interval; unsigned int streaming_maxpacket; unsigned int streaming_maxburst; unsigned int control_interface; unsigned int streaming_interface; char function_name[32]; unsigned int last_unit_id; bool enable_interrupt_ep; const struct uvc_descriptor_header * const *fs_control; const struct uvc_descriptor_header * const *ss_control; const struct uvc_descriptor_header * const *fs_streaming; const struct uvc_descriptor_header * const *hs_streaming; const struct uvc_descriptor_header * const *ss_streaming; struct uvc_camera_terminal_descriptor uvc_camera_terminal; struct uvc_processing_unit_descriptor uvc_processing; struct uvc_output_terminal_descriptor uvc_output_terminal; struct uvc_descriptor_header *uvc_fs_control_cls[5]; struct uvc_descriptor_header *uvc_ss_control_cls[5]; struct list_head extension_units; struct uvc_descriptor_header **uvc_fs_streaming_cls; struct uvc_descriptor_header **uvc_hs_streaming_cls; struct uvc_descriptor_header **uvc_ss_streaming_cls; u8 iad_index; u8 vs0_index; u8 vs1_index; struct mutex lock; int refcnt; struct uvcg_streaming_header *header; }; struct uvc_input_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bNumFormats; __le16 wTotalLength; __u8 bEndpointAddress; __u8 bmInfo; __u8 bTerminalLink; __u8 bStillCaptureMethod; __u8 bTriggerSupport; __u8 bTriggerUsage; __u8 bControlSize; __u8 bmaControls[0]; } __attribute__((packed)); struct uvcg_streaming_header { struct config_item item; struct uvc_input_header_descriptor desc; unsigned int linked; struct list_head formats; unsigned int num_fmt; }; struct uvcg_extension_unit_descriptor { u8 bLength; u8 bDescriptorType; u8 bDescriptorSubType; u8 bUnitID; u8 guidExtensionCode[16]; u8 bNumControls; u8 bNrInPins; u8 *baSourceID; u8 bControlSize; u8 *bmControls; u8 iExtension; } __attribute__((packed)); struct uvcg_extension { struct config_item item; struct list_head list; u8 string_descriptor_index; struct uvcg_extension_unit_descriptor desc; }; struct media_device; struct media_gobj { struct media_device *mdev; u32 id; struct list_head list; }; struct media_pad; struct media_entity_operations; struct media_entity { struct media_gobj graph_obj; const char *name; enum media_entity_type obj_type; u32 function; unsigned long flags; u16 num_pads; u16 num_links; u16 num_backlinks; int internal_idx; struct media_pad *pads; struct list_head links; const struct media_entity_operations *ops; int use_count; union { struct { u32 major; u32 minor; } dev; } info; }; struct media_pipeline { bool allocated; struct media_device *mdev; struct list_head pads; int start_count; }; typedef __u64 v4l2_std_id; struct media_intf_devnode; struct v4l2_file_operations; struct v4l2_device; struct v4l2_ctrl_handler; struct vb2_queue; struct v4l2_prio_state; struct v4l2_ioctl_ops; struct video_device { struct media_entity entity; struct media_intf_devnode *intf_devnode; struct media_pipeline pipe; const struct v4l2_file_operations *fops; u32 device_caps; struct device dev; struct cdev *cdev; struct v4l2_device *v4l2_dev; struct device *dev_parent; struct v4l2_ctrl_handler *ctrl_handler; struct vb2_queue *queue; struct v4l2_prio_state *prio; char name[32]; enum vfl_devnode_type vfl_type; enum vfl_devnode_direction vfl_dir; int minor; u16 num; unsigned long flags; int index; spinlock_t fh_lock; struct list_head fh_list; int dev_debug; v4l2_std_id tvnorms; void (*release)(struct video_device *); const struct v4l2_ioctl_ops *ioctl_ops; unsigned long valid_ioctls[3]; struct mutex *lock; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct v4l2_prio_state { atomic_t prios[4]; }; struct v4l2_subdev; struct v4l2_device { struct device *dev; struct media_device *mdev; struct list_head subdevs; spinlock_t lock; char name[36]; void (*notify)(struct v4l2_subdev *, unsigned int, void *); struct v4l2_ctrl_handler *ctrl_handler; struct v4l2_prio_state prio; struct kref ref; void (*release)(struct v4l2_device *); }; struct vb2_ops; struct vb2_mem_ops; struct vb2_buf_ops; struct vb2_buffer; struct vb2_fileio_data; struct vb2_threadio_data; struct vb2_queue { unsigned int type; unsigned int io_modes; struct device *dev; unsigned long dma_attrs; unsigned int bidirectional: 1; unsigned int fileio_read_once: 1; unsigned int fileio_write_immediately: 1; unsigned int allow_zero_bytesused: 1; unsigned int quirk_poll_must_check_waiting_for_buffers: 1; unsigned int supports_requests: 1; unsigned int requires_requests: 1; unsigned int uses_qbuf: 1; unsigned int uses_requests: 1; unsigned int allow_cache_hints: 1; unsigned int non_coherent_mem: 1; struct mutex *lock; void *owner; const struct vb2_ops *ops; const struct vb2_mem_ops *mem_ops; const struct vb2_buf_ops *buf_ops; void *drv_priv; u32 subsystem_flags; unsigned int buf_struct_size; u32 timestamp_flags; gfp_t gfp_flags; u32 min_buffers_needed; struct device *alloc_devs[8]; struct mutex mmap_lock; unsigned int memory; enum dma_data_direction dma_dir; struct vb2_buffer *bufs[64]; unsigned int num_buffers; struct list_head queued_list; unsigned int queued_count; atomic_t owned_by_drv_count; struct list_head done_list; spinlock_t done_lock; wait_queue_head_t done_wq; unsigned int streaming: 1; unsigned int start_streaming_called: 1; unsigned int error: 1; unsigned int waiting_for_buffers: 1; unsigned int waiting_in_dqbuf: 1; unsigned int is_multiplanar: 1; unsigned int is_output: 1; unsigned int copy_timestamp: 1; unsigned int last_buffer_dequeued: 1; struct vb2_fileio_data *fileio; struct vb2_threadio_data *threadio; char name[32]; }; struct uvc_video_queue { struct vb2_queue queue; unsigned int flags; __u32 sequence; unsigned int buf_used; bool use_sg; spinlock_t irqlock; struct list_head irqqueue; }; struct uvc_device; struct uvc_buffer; struct uvc_video { struct uvc_device *uvc; struct usb_ep *ep; struct work_struct pump; struct workqueue_struct *async_wq; u8 bpp; u32 fcc; unsigned int width; unsigned int height; unsigned int imagesize; struct mutex mutex; unsigned int uvc_num_requests; bool is_enabled; unsigned int req_size; struct list_head ureqs; struct list_head req_free; struct list_head req_ready; spinlock_t req_lock; unsigned int req_int_count; void (*encode)(struct usb_request *, struct uvc_video *, struct uvc_buffer *); __u32 payload_size; __u32 max_payload_size; struct uvc_video_queue queue; unsigned int fid; }; struct uvc_device { struct video_device vdev; struct v4l2_device v4l2_dev; enum uvc_state state; struct usb_function func; struct uvc_video video; bool func_connected; wait_queue_head_t func_connected_queue; struct uvcg_streaming_header *header; struct { const struct uvc_descriptor_header * const *fs_control; const struct uvc_descriptor_header * const *ss_control; const struct uvc_descriptor_header * const *fs_streaming; const struct uvc_descriptor_header * const *hs_streaming; const struct uvc_descriptor_header * const *ss_streaming; struct list_head *extension_units; } desc; unsigned int control_intf; struct usb_ep *interrupt_ep; struct usb_request *control_req; void *control_buf; bool enable_interrupt_ep; unsigned int streaming_intf; unsigned int event_length; unsigned int event_setup_out: 1; }; struct media_entity_enum { unsigned long *bmap; int idx_max; }; struct media_graph { struct { struct media_entity *entity; struct list_head *link; } stack[16]; struct media_entity_enum ent_enum; int top; }; struct media_devnode; struct media_device_ops; struct media_device { struct device *dev; struct media_devnode *devnode; char model[32]; char driver_name[32]; char serial[40]; char bus_info[32]; u32 hw_revision; u64 topology_version; u32 id; struct ida entity_internal_idx; int entity_internal_idx_max; struct list_head entities; struct list_head interfaces; struct list_head pads; struct list_head links; struct list_head entity_notify; struct mutex graph_mutex; struct media_graph pm_count_walk; void *source_priv; int (*enable_source)(struct media_entity *, struct media_pipeline *); void (*disable_source)(struct media_entity *); const struct media_device_ops *ops; struct mutex req_queue_mutex; atomic_t request_id; }; struct media_file_operations; struct media_devnode { struct media_device *media_dev; const struct media_file_operations *fops; struct device dev; struct cdev cdev; struct device *parent; int minor; unsigned long flags; void (*release)(struct media_devnode *); }; struct media_file_operations { struct module *owner; ssize_t (*read)(struct file *, char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); ssize_t (*write)(struct file *, const char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); __poll_t (*poll)(struct file *, struct poll_table_struct *); long (*ioctl)(struct file *, unsigned int, unsigned long); long (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*open)(struct file *); int (*release)(struct file *); }; struct media_link; struct media_request; struct media_device_ops { int (*link_notify)(struct media_link *, u32, unsigned int); struct media_request * (*req_alloc)(struct media_device *); void (*req_free)(struct media_request *); int (*req_validate)(struct media_request *); void (*req_queue)(struct media_request *); }; struct media_interface; struct media_link { struct media_gobj graph_obj; struct list_head list; union { struct media_gobj *gobj0; struct media_pad *source; struct media_interface *intf; }; union { struct media_gobj *gobj1; struct media_pad *sink; struct media_entity *entity; }; struct media_link *reverse; unsigned long flags; bool is_backlink; }; struct media_pad { struct media_gobj graph_obj; struct media_entity *entity; u16 index; u16 num_links; enum media_pad_signal_type sig_type; unsigned long flags; struct media_pipeline *pipe; }; struct media_interface { struct media_gobj graph_obj; struct list_head links; u32 type; u32 flags; }; struct media_request { struct media_device *mdev; struct kref kref; char debug_str[27]; enum media_request_state state; unsigned int updating_count; unsigned int access_count; struct list_head objects; unsigned int num_incomplete_objects; wait_queue_head_t poll_wait; spinlock_t lock; }; struct media_entity_operations { int (*get_fwnode_pad)(struct media_entity *, struct fwnode_endpoint *); int (*link_setup)(struct media_entity *, const struct media_pad *, const struct media_pad *, u32); int (*link_validate)(struct media_link *); bool (*has_pad_interdep)(struct media_entity *, unsigned int, unsigned int); }; struct media_intf_devnode { struct media_interface intf; u32 major; u32 minor; }; struct v4l2_file_operations { struct module *owner; ssize_t (*read)(struct file *, char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); ssize_t (*write)(struct file *, const char __attribute__((btf_type_tag("user"))) *, size_t, loff_t *); __poll_t (*poll)(struct file *, struct poll_table_struct *); long (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long (*compat_ioctl32)(struct file *, unsigned int, unsigned long); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct file *); int (*release)(struct file *); u64 android_kabi_reserved1; }; struct v4l2_subdev_ops; struct v4l2_subdev_internal_ops; struct v4l2_async_notifier; struct v4l2_subdev_platform_data; struct v4l2_subdev_state; struct v4l2_subdev { struct media_entity entity; struct list_head list; struct module *owner; bool owner_v4l2_dev; u32 flags; struct v4l2_device *v4l2_dev; const struct v4l2_subdev_ops *ops; const struct v4l2_subdev_internal_ops *internal_ops; struct v4l2_ctrl_handler *ctrl_handler; char name[32]; u32 grp_id; void *dev_priv; void *host_priv; struct video_device *devnode; struct device *dev; struct fwnode_handle *fwnode; struct list_head async_list; struct list_head async_subdev_endpoint_list; struct v4l2_async_notifier *subdev_notifier; struct list_head asc_list; struct v4l2_subdev_platform_data *pdata; struct mutex *state_lock; struct led_classdev *privacy_led; struct v4l2_subdev_state *active_state; u64 enabled_streams; }; struct v4l2_subdev_core_ops; struct v4l2_subdev_tuner_ops; struct v4l2_subdev_audio_ops; struct v4l2_subdev_video_ops; struct v4l2_subdev_vbi_ops; struct v4l2_subdev_ir_ops; struct v4l2_subdev_sensor_ops; struct v4l2_subdev_pad_ops; struct v4l2_subdev_ops { const struct v4l2_subdev_core_ops *core; const struct v4l2_subdev_tuner_ops *tuner; const struct v4l2_subdev_audio_ops *audio; const struct v4l2_subdev_video_ops *video; const struct v4l2_subdev_vbi_ops *vbi; const struct v4l2_subdev_ir_ops *ir; const struct v4l2_subdev_sensor_ops *sensor; const struct v4l2_subdev_pad_ops *pad; }; struct v4l2_subdev_io_pin_config; struct v4l2_fh; struct v4l2_event_subscription; struct v4l2_subdev_core_ops { int (*log_status)(struct v4l2_subdev *); int (*s_io_pin_config)(struct v4l2_subdev *, size_t, struct v4l2_subdev_io_pin_config *); int (*init)(struct v4l2_subdev *, u32); int (*load_fw)(struct v4l2_subdev *); int (*reset)(struct v4l2_subdev *, u32); int (*s_gpio)(struct v4l2_subdev *, u32); long (*command)(struct v4l2_subdev *, unsigned int, void *); long (*ioctl)(struct v4l2_subdev *, unsigned int, void *); long (*compat_ioctl32)(struct v4l2_subdev *, unsigned int, unsigned long); int (*s_power)(struct v4l2_subdev *, int); int (*interrupt_service_routine)(struct v4l2_subdev *, u32, bool *); int (*subscribe_event)(struct v4l2_subdev *, struct v4l2_fh *, struct v4l2_event_subscription *); int (*unsubscribe_event)(struct v4l2_subdev *, struct v4l2_fh *, struct v4l2_event_subscription *); }; struct v4l2_subdev_io_pin_config { u32 flags; u8 pin; u8 function; u8 value; u8 strength; }; struct v4l2_m2m_ctx; struct v4l2_fh { struct list_head list; struct video_device *vdev; struct v4l2_ctrl_handler *ctrl_handler; enum v4l2_priority prio; wait_queue_head_t wait; struct mutex subscribe_lock; struct list_head subscribed; struct list_head available; unsigned int navailable; u32 sequence; struct v4l2_m2m_ctx *m2m_ctx; }; struct v4l2_event_subscription { __u32 type; __u32 id; __u32 flags; __u32 reserved[5]; }; struct tuner_setup; struct v4l2_frequency; struct v4l2_frequency_band; struct v4l2_tuner; struct v4l2_modulator; struct v4l2_priv_tun_config; struct v4l2_subdev_tuner_ops { int (*standby)(struct v4l2_subdev *); int (*s_radio)(struct v4l2_subdev *); int (*s_frequency)(struct v4l2_subdev *, const struct v4l2_frequency *); int (*g_frequency)(struct v4l2_subdev *, struct v4l2_frequency *); int (*enum_freq_bands)(struct v4l2_subdev *, struct v4l2_frequency_band *); int (*g_tuner)(struct v4l2_subdev *, struct v4l2_tuner *); int (*s_tuner)(struct v4l2_subdev *, const struct v4l2_tuner *); int (*g_modulator)(struct v4l2_subdev *, struct v4l2_modulator *); int (*s_modulator)(struct v4l2_subdev *, const struct v4l2_modulator *); int (*s_type_addr)(struct v4l2_subdev *, struct tuner_setup *); int (*s_config)(struct v4l2_subdev *, const struct v4l2_priv_tun_config *); }; struct v4l2_frequency { __u32 tuner; __u32 type; __u32 frequency; __u32 reserved[8]; }; struct v4l2_frequency_band { __u32 tuner; __u32 type; __u32 index; __u32 capability; __u32 rangelow; __u32 rangehigh; __u32 modulation; __u32 reserved[9]; }; struct v4l2_tuner { __u32 index; __u8 name[32]; __u32 type; __u32 capability; __u32 rangelow; __u32 rangehigh; __u32 rxsubchans; __u32 audmode; __s32 signal; __s32 afc; __u32 reserved[4]; }; struct v4l2_modulator { __u32 index; __u8 name[32]; __u32 capability; __u32 rangelow; __u32 rangehigh; __u32 txsubchans; __u32 type; __u32 reserved[3]; }; struct v4l2_priv_tun_config { int tuner; void *priv; }; struct v4l2_subdev_audio_ops { int (*s_clock_freq)(struct v4l2_subdev *, u32); int (*s_i2s_clock_freq)(struct v4l2_subdev *, u32); int (*s_routing)(struct v4l2_subdev *, u32, u32, u32); int (*s_stream)(struct v4l2_subdev *, int); }; struct v4l2_fract; struct v4l2_subdev_frame_interval; struct v4l2_dv_timings; struct v4l2_subdev_video_ops { int (*s_routing)(struct v4l2_subdev *, u32, u32, u32); int (*s_crystal_freq)(struct v4l2_subdev *, u32, u32); int (*g_std)(struct v4l2_subdev *, v4l2_std_id *); int (*s_std)(struct v4l2_subdev *, v4l2_std_id); int (*s_std_output)(struct v4l2_subdev *, v4l2_std_id); int (*g_std_output)(struct v4l2_subdev *, v4l2_std_id *); int (*querystd)(struct v4l2_subdev *, v4l2_std_id *); int (*g_tvnorms)(struct v4l2_subdev *, v4l2_std_id *); int (*g_tvnorms_output)(struct v4l2_subdev *, v4l2_std_id *); int (*g_input_status)(struct v4l2_subdev *, u32 *); int (*s_stream)(struct v4l2_subdev *, int); int (*g_pixelaspect)(struct v4l2_subdev *, struct v4l2_fract *); int (*g_frame_interval)(struct v4l2_subdev *, struct v4l2_subdev_frame_interval *); int (*s_frame_interval)(struct v4l2_subdev *, struct v4l2_subdev_frame_interval *); int (*s_dv_timings)(struct v4l2_subdev *, struct v4l2_dv_timings *); int (*g_dv_timings)(struct v4l2_subdev *, struct v4l2_dv_timings *); int (*query_dv_timings)(struct v4l2_subdev *, struct v4l2_dv_timings *); int (*s_rx_buffer)(struct v4l2_subdev *, void *, unsigned int *); int (*pre_streamon)(struct v4l2_subdev *, u32); int (*post_streamoff)(struct v4l2_subdev *); }; struct v4l2_fract { __u32 numerator; __u32 denominator; }; struct v4l2_subdev_frame_interval { __u32 pad; struct v4l2_fract interval; __u32 stream; __u32 reserved[8]; }; struct v4l2_bt_timings { __u32 width; __u32 height; __u32 interlaced; __u32 polarities; __u64 pixelclock; __u32 hfrontporch; __u32 hsync; __u32 hbackporch; __u32 vfrontporch; __u32 vsync; __u32 vbackporch; __u32 il_vfrontporch; __u32 il_vsync; __u32 il_vbackporch; __u32 standards; __u32 flags; struct v4l2_fract picture_aspect; __u8 cea861_vic; __u8 hdmi_vic; __u8 reserved[46]; } __attribute__((packed)); struct v4l2_dv_timings { __u32 type; union { struct v4l2_bt_timings bt; __u32 reserved[32]; }; }; struct v4l2_decode_vbi_line; struct v4l2_sliced_vbi_data; struct v4l2_sliced_vbi_cap; struct v4l2_vbi_format; struct v4l2_sliced_vbi_format; struct v4l2_subdev_vbi_ops { int (*decode_vbi_line)(struct v4l2_subdev *, struct v4l2_decode_vbi_line *); int (*s_vbi_data)(struct v4l2_subdev *, const struct v4l2_sliced_vbi_data *); int (*g_vbi_data)(struct v4l2_subdev *, struct v4l2_sliced_vbi_data *); int (*g_sliced_vbi_cap)(struct v4l2_subdev *, struct v4l2_sliced_vbi_cap *); int (*s_raw_fmt)(struct v4l2_subdev *, struct v4l2_vbi_format *); int (*g_sliced_fmt)(struct v4l2_subdev *, struct v4l2_sliced_vbi_format *); int (*s_sliced_fmt)(struct v4l2_subdev *, struct v4l2_sliced_vbi_format *); }; struct v4l2_decode_vbi_line { u32 is_second_field; u8 *p; u32 line; u32 type; }; struct v4l2_sliced_vbi_data { __u32 id; __u32 field; __u32 line; __u32 reserved; __u8 data[48]; }; struct v4l2_sliced_vbi_cap { __u16 service_set; __u16 service_lines[48]; __u32 type; __u32 reserved[3]; }; struct v4l2_vbi_format { __u32 sampling_rate; __u32 offset; __u32 samples_per_line; __u32 sample_format; __s32 start[2]; __u32 count[2]; __u32 flags; __u32 reserved[2]; }; struct v4l2_sliced_vbi_format { __u16 service_set; __u16 service_lines[48]; __u32 io_size; __u32 reserved[2]; }; struct v4l2_subdev_ir_parameters; struct v4l2_subdev_ir_ops { int (*rx_read)(struct v4l2_subdev *, u8 *, size_t, ssize_t *); int (*rx_g_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); int (*rx_s_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); int (*tx_write)(struct v4l2_subdev *, u8 *, size_t, ssize_t *); int (*tx_g_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); int (*tx_s_parameters)(struct v4l2_subdev *, struct v4l2_subdev_ir_parameters *); }; struct v4l2_subdev_ir_parameters { unsigned int bytes_per_data_element; enum v4l2_subdev_ir_mode mode; bool enable; bool interrupt_enable; bool shutdown; bool modulation; u32 max_pulse_width; unsigned int carrier_freq; unsigned int duty_cycle; bool invert_level; bool invert_carrier_sense; u32 noise_filter_min_width; unsigned int carrier_range_lower; unsigned int carrier_range_upper; u32 resolution; }; struct v4l2_subdev_sensor_ops { int (*g_skip_top_lines)(struct v4l2_subdev *, u32 *); int (*g_skip_frames)(struct v4l2_subdev *, u32 *); }; struct v4l2_subdev_mbus_code_enum; struct v4l2_subdev_frame_size_enum; struct v4l2_subdev_frame_interval_enum; struct v4l2_subdev_format; struct v4l2_subdev_selection; struct v4l2_edid; struct v4l2_dv_timings_cap; struct v4l2_enum_dv_timings; struct v4l2_mbus_frame_desc; struct v4l2_mbus_config; struct v4l2_subdev_krouting; struct v4l2_subdev_pad_ops { int (*init_cfg)(struct v4l2_subdev *, struct v4l2_subdev_state *); int (*enum_mbus_code)(struct v4l2_subdev *, struct v4l2_subdev_state *, struct v4l2_subdev_mbus_code_enum *); int (*enum_frame_size)(struct v4l2_subdev *, struct v4l2_subdev_state *, struct v4l2_subdev_frame_size_enum *); int (*enum_frame_interval)(struct v4l2_subdev *, struct v4l2_subdev_state *, struct v4l2_subdev_frame_interval_enum *); int (*get_fmt)(struct v4l2_subdev *, struct v4l2_subdev_state *, struct v4l2_subdev_format *); int (*set_fmt)(struct v4l2_subdev *, struct v4l2_subdev_state *, struct v4l2_subdev_format *); int (*get_selection)(struct v4l2_subdev *, struct v4l2_subdev_state *, struct v4l2_subdev_selection *); int (*set_selection)(struct v4l2_subdev *, struct v4l2_subdev_state *, struct v4l2_subdev_selection *); int (*get_edid)(struct v4l2_subdev *, struct v4l2_edid *); int (*set_edid)(struct v4l2_subdev *, struct v4l2_edid *); int (*dv_timings_cap)(struct v4l2_subdev *, struct v4l2_dv_timings_cap *); int (*enum_dv_timings)(struct v4l2_subdev *, struct v4l2_enum_dv_timings *); int (*link_validate)(struct v4l2_subdev *, struct media_link *, struct v4l2_subdev_format *, struct v4l2_subdev_format *); int (*get_frame_desc)(struct v4l2_subdev *, unsigned int, struct v4l2_mbus_frame_desc *); int (*set_frame_desc)(struct v4l2_subdev *, unsigned int, struct v4l2_mbus_frame_desc *); int (*get_mbus_config)(struct v4l2_subdev *, unsigned int, struct v4l2_mbus_config *); int (*set_routing)(struct v4l2_subdev *, struct v4l2_subdev_state *, enum v4l2_subdev_format_whence, struct v4l2_subdev_krouting *); int (*enable_streams)(struct v4l2_subdev *, struct v4l2_subdev_state *, u32, u64); int (*disable_streams)(struct v4l2_subdev *, struct v4l2_subdev_state *, u32, u64); }; struct v4l2_subdev_route; struct v4l2_subdev_krouting { unsigned int num_routes; struct v4l2_subdev_route *routes; }; struct v4l2_subdev_stream_config; struct v4l2_subdev_stream_configs { u32 num_configs; struct v4l2_subdev_stream_config *configs; }; struct v4l2_subdev_pad_config; struct v4l2_subdev_state { struct mutex _lock; struct mutex *lock; struct v4l2_subdev_pad_config *pads; struct v4l2_subdev_krouting routing; struct v4l2_subdev_stream_configs stream_configs; }; struct v4l2_mbus_framefmt { __u32 width; __u32 height; __u32 code; __u32 field; __u32 colorspace; union { __u16 ycbcr_enc; __u16 hsv_enc; }; __u16 quantization; __u16 xfer_func; __u16 flags; __u16 reserved[10]; }; struct v4l2_rect { __s32 left; __s32 top; __u32 width; __u32 height; }; struct v4l2_subdev_pad_config { struct v4l2_mbus_framefmt try_fmt; struct v4l2_rect try_crop; struct v4l2_rect try_compose; }; struct v4l2_subdev_route { __u32 sink_pad; __u32 sink_stream; __u32 source_pad; __u32 source_stream; __u32 flags; __u32 reserved[5]; }; struct v4l2_subdev_stream_config { u32 pad; u32 stream; bool enabled; struct v4l2_mbus_framefmt fmt; struct v4l2_rect crop; struct v4l2_rect compose; }; struct v4l2_subdev_mbus_code_enum { __u32 pad; __u32 index; __u32 code; __u32 which; __u32 flags; __u32 stream; __u32 reserved[6]; }; struct v4l2_subdev_frame_size_enum { __u32 index; __u32 pad; __u32 code; __u32 min_width; __u32 max_width; __u32 min_height; __u32 max_height; __u32 which; __u32 stream; __u32 reserved[7]; }; struct v4l2_subdev_frame_interval_enum { __u32 index; __u32 pad; __u32 code; __u32 width; __u32 height; struct v4l2_fract interval; __u32 which; __u32 stream; __u32 reserved[7]; }; struct v4l2_subdev_format { __u32 which; __u32 pad; struct v4l2_mbus_framefmt format; __u32 stream; __u32 reserved[7]; }; struct v4l2_subdev_selection { __u32 which; __u32 pad; __u32 target; __u32 flags; struct v4l2_rect r; __u32 stream; __u32 reserved[7]; }; struct v4l2_edid { __u32 pad; __u32 start_block; __u32 blocks; __u32 reserved[5]; __u8 *edid; }; struct v4l2_bt_timings_cap { __u32 min_width; __u32 max_width; __u32 min_height; __u32 max_height; __u64 min_pixelclock; __u64 max_pixelclock; __u32 standards; __u32 capabilities; __u32 reserved[16]; }; struct v4l2_dv_timings_cap { __u32 type; __u32 pad; __u32 reserved[2]; union { struct v4l2_bt_timings_cap bt; __u32 raw_data[32]; }; }; struct v4l2_enum_dv_timings { __u32 index; __u32 pad; __u32 reserved[2]; struct v4l2_dv_timings timings; }; struct v4l2_mbus_frame_desc_entry_csi2 { u8 vc; u8 dt; }; struct v4l2_mbus_frame_desc_entry { enum v4l2_mbus_frame_desc_flags flags; u32 stream; u32 pixelcode; u32 length; union { struct v4l2_mbus_frame_desc_entry_csi2 csi2; } bus; }; struct v4l2_mbus_frame_desc { enum v4l2_mbus_frame_desc_type type; struct v4l2_mbus_frame_desc_entry entry[8]; unsigned short num_entries; }; struct v4l2_mbus_config_parallel { unsigned int flags; unsigned char bus_width; unsigned char data_shift; }; struct v4l2_mbus_config_mipi_csi1 { unsigned char clock_inv: 1; unsigned char strobe: 1; bool lane_polarity[2]; unsigned char data_lane; unsigned char clock_lane; }; struct v4l2_mbus_config_mipi_csi2 { unsigned int flags; unsigned char data_lanes[8]; unsigned char clock_lane; unsigned char num_data_lanes; bool lane_polarities[9]; }; struct v4l2_mbus_config { enum v4l2_mbus_type type; union { struct v4l2_mbus_config_parallel parallel; struct v4l2_mbus_config_mipi_csi1 mipi_csi1; struct v4l2_mbus_config_mipi_csi2 mipi_csi2; } bus; }; struct v4l2_subdev_fh; struct v4l2_subdev_internal_ops { int (*registered)(struct v4l2_subdev *); void (*unregistered)(struct v4l2_subdev *); int (*open)(struct v4l2_subdev *, struct v4l2_subdev_fh *); int (*close)(struct v4l2_subdev *, struct v4l2_subdev_fh *); void (*release)(struct v4l2_subdev *); }; struct v4l2_subdev_fh { struct v4l2_fh vfh; struct module *owner; struct v4l2_subdev_state *state; u64 client_caps; }; struct v4l2_async_notifier_operations; struct v4l2_async_notifier { const struct v4l2_async_notifier_operations *ops; struct v4l2_device *v4l2_dev; struct v4l2_subdev *sd; struct v4l2_async_notifier *parent; struct list_head waiting_list; struct list_head done_list; struct list_head notifier_entry; }; struct v4l2_async_connection; struct v4l2_async_notifier_operations { int (*bound)(struct v4l2_async_notifier *, struct v4l2_subdev *, struct v4l2_async_connection *); int (*complete)(struct v4l2_async_notifier *); void (*unbind)(struct v4l2_async_notifier *, struct v4l2_subdev *, struct v4l2_async_connection *); void (*destroy)(struct v4l2_async_connection *); }; struct v4l2_async_match_desc { enum v4l2_async_match_type type; union { struct fwnode_handle *fwnode; struct { int adapter_id; unsigned short address; } i2c; }; }; struct v4l2_async_connection { struct v4l2_async_match_desc match; struct v4l2_async_notifier *notifier; struct list_head asc_entry; struct list_head asc_subdev_entry; struct v4l2_subdev *sd; }; struct v4l2_subdev_platform_data { struct regulator_bulk_data *regulators; int num_regulators; void *host_priv; }; struct vb2_ops { int (*queue_setup)(struct vb2_queue *, unsigned int *, unsigned int *, unsigned int *, struct device **); void (*wait_prepare)(struct vb2_queue *); void (*wait_finish)(struct vb2_queue *); int (*buf_out_validate)(struct vb2_buffer *); int (*buf_init)(struct vb2_buffer *); int (*buf_prepare)(struct vb2_buffer *); void (*buf_finish)(struct vb2_buffer *); void (*buf_cleanup)(struct vb2_buffer *); int (*prepare_streaming)(struct vb2_queue *); int (*start_streaming)(struct vb2_queue *, unsigned int); void (*stop_streaming)(struct vb2_queue *); void (*unprepare_streaming)(struct vb2_queue *); void (*buf_queue)(struct vb2_buffer *); void (*buf_request_complete)(struct vb2_buffer *); }; struct media_request_object_ops; struct media_request_object { const struct media_request_object_ops *ops; void *priv; struct media_request *req; struct list_head list; struct kref kref; bool completed; }; struct vb2_plane { void *mem_priv; struct dma_buf *dbuf; unsigned int dbuf_mapped; unsigned int bytesused; unsigned int length; unsigned int min_length; union { unsigned int offset; unsigned long userptr; int fd; } m; unsigned int data_offset; }; struct vb2_buffer { struct vb2_queue *vb2_queue; unsigned int index; unsigned int type; unsigned int memory; unsigned int num_planes; u64 timestamp; struct media_request *request; struct media_request_object req_obj; enum vb2_buffer_state state; unsigned int synced: 1; unsigned int prepared: 1; unsigned int copied_timestamp: 1; unsigned int skip_cache_sync_on_prepare: 1; unsigned int skip_cache_sync_on_finish: 1; struct vb2_plane planes[8]; struct list_head queued_entry; struct list_head done_entry; }; struct media_request_object_ops { int (*prepare)(struct media_request_object *); void (*unprepare)(struct media_request_object *); void (*queue)(struct media_request_object *); void (*unbind)(struct media_request_object *); void (*release)(struct media_request_object *); }; struct vb2_mem_ops { void * (*alloc)(struct vb2_buffer *, struct device *, unsigned long); void (*put)(void *); struct dma_buf * (*get_dmabuf)(struct vb2_buffer *, void *, unsigned long); void * (*get_userptr)(struct vb2_buffer *, struct device *, unsigned long, unsigned long); void (*put_userptr)(void *); void (*prepare)(void *); void (*finish)(void *); void * (*attach_dmabuf)(struct vb2_buffer *, struct device *, struct dma_buf *, unsigned long); void (*detach_dmabuf)(void *); int (*map_dmabuf)(void *); void (*unmap_dmabuf)(void *); void * (*vaddr)(struct vb2_buffer *, void *); void * (*cookie)(struct vb2_buffer *, void *); unsigned int (*num_users)(void *); int (*mmap)(void *, struct vm_area_struct *); }; struct vb2_buf_ops { int (*verify_planes_array)(struct vb2_buffer *, const void *); void (*init_buffer)(struct vb2_buffer *); void (*fill_user_buffer)(struct vb2_buffer *, void *); int (*fill_vb2_buffer)(struct vb2_buffer *, struct vb2_plane *); void (*copy_timestamp)(struct vb2_buffer *, const void *); }; enum v4l2_buf_type { V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, V4L2_BUF_TYPE_VIDEO_OUTPUT = 2, V4L2_BUF_TYPE_VIDEO_OVERLAY = 3, V4L2_BUF_TYPE_VBI_CAPTURE = 4, V4L2_BUF_TYPE_VBI_OUTPUT = 5, V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6, V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7, V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10, V4L2_BUF_TYPE_SDR_CAPTURE = 11, V4L2_BUF_TYPE_SDR_OUTPUT = 12, V4L2_BUF_TYPE_META_CAPTURE = 13, V4L2_BUF_TYPE_META_OUTPUT = 14, V4L2_BUF_TYPE_PRIVATE = 128, }; struct v4l2_capability; struct v4l2_fmtdesc; struct v4l2_format; struct v4l2_requestbuffers; struct v4l2_buffer; struct v4l2_exportbuffer; struct v4l2_create_buffers; struct v4l2_framebuffer; struct v4l2_input; struct v4l2_output; struct v4l2_queryctrl; struct v4l2_query_ext_ctrl; struct v4l2_control; struct v4l2_ext_controls; struct v4l2_querymenu; struct v4l2_audio; struct v4l2_audioout; struct v4l2_selection; struct v4l2_jpegcompression; struct v4l2_enc_idx; struct v4l2_encoder_cmd; struct v4l2_decoder_cmd; struct v4l2_streamparm; struct v4l2_hw_freq_seek; struct v4l2_frmsizeenum; struct v4l2_frmivalenum; struct v4l2_ioctl_ops { int (*vidioc_querycap)(struct file *, void *, struct v4l2_capability *); int (*vidioc_enum_fmt_vid_cap)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_vid_overlay)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_vid_out)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_sdr_cap)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_sdr_out)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_meta_cap)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_enum_fmt_meta_out)(struct file *, void *, struct v4l2_fmtdesc *); int (*vidioc_g_fmt_vid_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_out_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_sliced_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_sdr_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_sdr_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_meta_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_g_fmt_meta_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_out_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_sliced_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_sdr_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_sdr_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_meta_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_s_fmt_meta_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_out_overlay)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_sliced_vbi_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_cap_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_vid_out_mplane)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_sdr_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_sdr_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_meta_cap)(struct file *, void *, struct v4l2_format *); int (*vidioc_try_fmt_meta_out)(struct file *, void *, struct v4l2_format *); int (*vidioc_reqbufs)(struct file *, void *, struct v4l2_requestbuffers *); int (*vidioc_querybuf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_qbuf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_expbuf)(struct file *, void *, struct v4l2_exportbuffer *); int (*vidioc_dqbuf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_create_bufs)(struct file *, void *, struct v4l2_create_buffers *); int (*vidioc_prepare_buf)(struct file *, void *, struct v4l2_buffer *); int (*vidioc_overlay)(struct file *, void *, unsigned int); int (*vidioc_g_fbuf)(struct file *, void *, struct v4l2_framebuffer *); int (*vidioc_s_fbuf)(struct file *, void *, const struct v4l2_framebuffer *); int (*vidioc_streamon)(struct file *, void *, enum v4l2_buf_type); int (*vidioc_streamoff)(struct file *, void *, enum v4l2_buf_type); int (*vidioc_g_std)(struct file *, void *, v4l2_std_id *); int (*vidioc_s_std)(struct file *, void *, v4l2_std_id); int (*vidioc_querystd)(struct file *, void *, v4l2_std_id *); int (*vidioc_enum_input)(struct file *, void *, struct v4l2_input *); int (*vidioc_g_input)(struct file *, void *, unsigned int *); int (*vidioc_s_input)(struct file *, void *, unsigned int); int (*vidioc_enum_output)(struct file *, void *, struct v4l2_output *); int (*vidioc_g_output)(struct file *, void *, unsigned int *); int (*vidioc_s_output)(struct file *, void *, unsigned int); int (*vidioc_queryctrl)(struct file *, void *, struct v4l2_queryctrl *); int (*vidioc_query_ext_ctrl)(struct file *, void *, struct v4l2_query_ext_ctrl *); int (*vidioc_g_ctrl)(struct file *, void *, struct v4l2_control *); int (*vidioc_s_ctrl)(struct file *, void *, struct v4l2_control *); int (*vidioc_g_ext_ctrls)(struct file *, void *, struct v4l2_ext_controls *); int (*vidioc_s_ext_ctrls)(struct file *, void *, struct v4l2_ext_controls *); int (*vidioc_try_ext_ctrls)(struct file *, void *, struct v4l2_ext_controls *); int (*vidioc_querymenu)(struct file *, void *, struct v4l2_querymenu *); int (*vidioc_enumaudio)(struct file *, void *, struct v4l2_audio *); int (*vidioc_g_audio)(struct file *, void *, struct v4l2_audio *); int (*vidioc_s_audio)(struct file *, void *, const struct v4l2_audio *); int (*vidioc_enumaudout)(struct file *, void *, struct v4l2_audioout *); int (*vidioc_g_audout)(struct file *, void *, struct v4l2_audioout *); int (*vidioc_s_audout)(struct file *, void *, const struct v4l2_audioout *); int (*vidioc_g_modulator)(struct file *, void *, struct v4l2_modulator *); int (*vidioc_s_modulator)(struct file *, void *, const struct v4l2_modulator *); int (*vidioc_g_pixelaspect)(struct file *, void *, int, struct v4l2_fract *); int (*vidioc_g_selection)(struct file *, void *, struct v4l2_selection *); int (*vidioc_s_selection)(struct file *, void *, struct v4l2_selection *); int (*vidioc_g_jpegcomp)(struct file *, void *, struct v4l2_jpegcompression *); int (*vidioc_s_jpegcomp)(struct file *, void *, const struct v4l2_jpegcompression *); int (*vidioc_g_enc_index)(struct file *, void *, struct v4l2_enc_idx *); int (*vidioc_encoder_cmd)(struct file *, void *, struct v4l2_encoder_cmd *); int (*vidioc_try_encoder_cmd)(struct file *, void *, struct v4l2_encoder_cmd *); int (*vidioc_decoder_cmd)(struct file *, void *, struct v4l2_decoder_cmd *); int (*vidioc_try_decoder_cmd)(struct file *, void *, struct v4l2_decoder_cmd *); int (*vidioc_g_parm)(struct file *, void *, struct v4l2_streamparm *); int (*vidioc_s_parm)(struct file *, void *, struct v4l2_streamparm *); int (*vidioc_g_tuner)(struct file *, void *, struct v4l2_tuner *); int (*vidioc_s_tuner)(struct file *, void *, const struct v4l2_tuner *); int (*vidioc_g_frequency)(struct file *, void *, struct v4l2_frequency *); int (*vidioc_s_frequency)(struct file *, void *, const struct v4l2_frequency *); int (*vidioc_enum_freq_bands)(struct file *, void *, struct v4l2_frequency_band *); int (*vidioc_g_sliced_vbi_cap)(struct file *, void *, struct v4l2_sliced_vbi_cap *); int (*vidioc_log_status)(struct file *, void *); int (*vidioc_s_hw_freq_seek)(struct file *, void *, const struct v4l2_hw_freq_seek *); int (*vidioc_enum_framesizes)(struct file *, void *, struct v4l2_frmsizeenum *); int (*vidioc_enum_frameintervals)(struct file *, void *, struct v4l2_frmivalenum *); int (*vidioc_s_dv_timings)(struct file *, void *, struct v4l2_dv_timings *); int (*vidioc_g_dv_timings)(struct file *, void *, struct v4l2_dv_timings *); int (*vidioc_query_dv_timings)(struct file *, void *, struct v4l2_dv_timings *); int (*vidioc_enum_dv_timings)(struct file *, void *, struct v4l2_enum_dv_timings *); int (*vidioc_dv_timings_cap)(struct file *, void *, struct v4l2_dv_timings_cap *); int (*vidioc_g_edid)(struct file *, void *, struct v4l2_edid *); int (*vidioc_s_edid)(struct file *, void *, struct v4l2_edid *); int (*vidioc_subscribe_event)(struct v4l2_fh *, const struct v4l2_event_subscription *); int (*vidioc_unsubscribe_event)(struct v4l2_fh *, const struct v4l2_event_subscription *); long (*vidioc_default)(struct file *, void *, bool, unsigned int, void *); }; struct v4l2_timecode { __u32 type; __u32 flags; __u8 frames; __u8 seconds; __u8 minutes; __u8 hours; __u8 userbits[4]; }; struct vb2_v4l2_buffer { struct vb2_buffer vb2_buf; __u32 flags; __u32 field; struct v4l2_timecode timecode; __u32 sequence; __s32 request_fd; bool is_held; struct vb2_plane planes[8]; u64 android_kabi_reserved1; }; struct uvc_buffer { struct vb2_v4l2_buffer buf; struct list_head queue; enum uvc_buffer_state state; void *mem; struct sg_table *sgt; struct scatterlist *sg; unsigned int offset; unsigned int length; unsigned int bytesused; }; struct v4l2_event_vsync { __u8 field; }; struct v4l2_event_ctrl { __u32 changes; __u32 type; union { __s32 value; __s64 value64; }; __u32 flags; __s32 minimum; __s32 maximum; __s32 step; __s32 default_value; }; struct v4l2_event_frame_sync { __u32 frame_sequence; }; struct v4l2_event_src_change { __u32 changes; }; struct v4l2_event_motion_det { __u32 flags; __u32 frame_sequence; __u32 region_mask; }; struct v4l2_event { __u32 type; union { struct v4l2_event_vsync vsync; struct v4l2_event_ctrl ctrl; struct v4l2_event_frame_sync frame_sync; struct v4l2_event_src_change src_change; struct v4l2_event_motion_det motion_det; __u8 data[64]; } u; __u32 pending; __u32 sequence; struct __kernel_timespec timestamp; __u32 id; __u32 reserved[8]; }; struct uvc_request_data { __s32 length; __u8 data[60]; }; struct uvc_event { union { enum usb_device_speed speed; struct usb_ctrlrequest req; struct uvc_request_data data; }; }; struct uvc_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __le16 bcdUVC; __le16 wTotalLength; __le32 dwClockFrequency; __u8 bInCollection; __u8 baInterfaceNr[0]; } __attribute__((packed)); enum vb2_io_modes { VB2_MMAP = 1, VB2_USERPTR = 2, VB2_READ = 4, VB2_WRITE = 8, VB2_DMABUF = 16, }; enum v4l2_field { V4L2_FIELD_ANY = 0, V4L2_FIELD_NONE = 1, V4L2_FIELD_TOP = 2, V4L2_FIELD_BOTTOM = 3, V4L2_FIELD_INTERLACED = 4, V4L2_FIELD_SEQ_TB = 5, V4L2_FIELD_SEQ_BT = 6, V4L2_FIELD_ALTERNATE = 7, V4L2_FIELD_INTERLACED_TB = 8, V4L2_FIELD_INTERLACED_BT = 9, }; struct v4l2_requestbuffers { __u32 count; __u32 type; __u32 memory; __u32 capabilities; __u8 flags; __u8 reserved[3]; }; struct __kernel_v4l2_timeval { long long tv_sec; long long tv_usec; }; struct v4l2_plane; struct v4l2_buffer { __u32 index; __u32 type; __u32 bytesused; __u32 flags; __u32 field; struct __kernel_v4l2_timeval timestamp; struct v4l2_timecode timecode; __u32 sequence; __u32 memory; union { __u32 offset; unsigned long userptr; struct v4l2_plane *planes; __s32 fd; } m; __u32 length; __u32 reserved2; union { __s32 request_fd; __u32 reserved; }; }; struct v4l2_plane { __u32 bytesused; __u32 length; union { __u32 mem_offset; unsigned long userptr; __s32 fd; } m; __u32 data_offset; __u32 reserved[11]; }; struct v4l2_capability { __u8 driver[16]; __u8 card[32]; __u8 bus_info[32]; __u32 version; __u32 capabilities; __u32 device_caps; __u32 reserved[3]; }; struct v4l2_fmtdesc { __u32 index; __u32 type; __u32 flags; __u8 description[32]; __u32 pixelformat; __u32 mbus_code; __u32 reserved[3]; }; struct v4l2_pix_format { __u32 width; __u32 height; __u32 pixelformat; __u32 field; __u32 bytesperline; __u32 sizeimage; __u32 colorspace; __u32 priv; __u32 flags; union { __u32 ycbcr_enc; __u32 hsv_enc; }; __u32 quantization; __u32 xfer_func; }; struct v4l2_plane_pix_format { __u32 sizeimage; __u32 bytesperline; __u16 reserved[6]; }; struct v4l2_pix_format_mplane { __u32 width; __u32 height; __u32 pixelformat; __u32 field; __u32 colorspace; struct v4l2_plane_pix_format plane_fmt[8]; __u8 num_planes; __u8 flags; union { __u8 ycbcr_enc; __u8 hsv_enc; }; __u8 quantization; __u8 xfer_func; __u8 reserved[7]; }; struct v4l2_clip; struct v4l2_window { struct v4l2_rect w; __u32 field; __u32 chromakey; struct v4l2_clip *clips; __u32 clipcount; void __attribute__((btf_type_tag("user"))) *bitmap; __u8 global_alpha; }; struct v4l2_sdr_format { __u32 pixelformat; __u32 buffersize; __u8 reserved[24]; }; struct v4l2_meta_format { __u32 dataformat; __u32 buffersize; }; struct v4l2_format { __u32 type; union { struct v4l2_pix_format pix; struct v4l2_pix_format_mplane pix_mp; struct v4l2_window win; struct v4l2_vbi_format vbi; struct v4l2_sliced_vbi_format sliced; struct v4l2_sdr_format sdr; struct v4l2_meta_format meta; __u8 raw_data[200]; } fmt; }; struct v4l2_clip { struct v4l2_rect c; struct v4l2_clip __attribute__((btf_type_tag("user"))) *next; }; struct v4l2_exportbuffer { __u32 type; __u32 index; __u32 plane; __u32 flags; __s32 fd; __u32 reserved[11]; }; struct v4l2_create_buffers { __u32 index; __u32 count; __u32 memory; struct v4l2_format format; __u32 capabilities; __u32 flags; __u32 reserved[6]; }; struct v4l2_framebuffer { __u32 capability; __u32 flags; void *base; struct { __u32 width; __u32 height; __u32 pixelformat; __u32 field; __u32 bytesperline; __u32 sizeimage; __u32 colorspace; __u32 priv; } fmt; }; struct v4l2_input { __u32 index; __u8 name[32]; __u32 type; __u32 audioset; __u32 tuner; v4l2_std_id std; __u32 status; __u32 capabilities; __u32 reserved[3]; }; struct v4l2_output { __u32 index; __u8 name[32]; __u32 type; __u32 audioset; __u32 modulator; v4l2_std_id std; __u32 capabilities; __u32 reserved[3]; }; struct v4l2_queryctrl { __u32 id; __u32 type; __u8 name[32]; __s32 minimum; __s32 maximum; __s32 step; __s32 default_value; __u32 flags; __u32 reserved[2]; }; struct v4l2_query_ext_ctrl { __u32 id; __u32 type; char name[32]; __s64 minimum; __s64 maximum; __u64 step; __s64 default_value; __u32 flags; __u32 elem_size; __u32 elems; __u32 nr_of_dims; __u32 dims[4]; __u32 reserved[32]; }; struct v4l2_control { __u32 id; __s32 value; }; struct v4l2_ext_control; struct v4l2_ext_controls { union { __u32 which; }; __u32 count; __u32 error_idx; __s32 request_fd; __u32 reserved[1]; struct v4l2_ext_control *controls; }; struct v4l2_area; struct v4l2_ctrl_h264_sps; struct v4l2_ctrl_h264_pps; struct v4l2_ctrl_h264_scaling_matrix; struct v4l2_ctrl_h264_pred_weights; struct v4l2_ctrl_h264_slice_params; struct v4l2_ctrl_h264_decode_params; struct v4l2_ctrl_fwht_params; struct v4l2_ctrl_vp8_frame; struct v4l2_ctrl_mpeg2_sequence; struct v4l2_ctrl_mpeg2_picture; struct v4l2_ctrl_mpeg2_quantisation; struct v4l2_ctrl_vp9_compressed_hdr; struct v4l2_ctrl_vp9_frame; struct v4l2_ctrl_hevc_sps; struct v4l2_ctrl_hevc_pps; struct v4l2_ctrl_hevc_slice_params; struct v4l2_ctrl_hevc_scaling_matrix; struct v4l2_ctrl_hevc_decode_params; struct v4l2_ctrl_av1_sequence; struct v4l2_ctrl_av1_tile_group_entry; struct v4l2_ctrl_av1_frame; struct v4l2_ctrl_av1_film_grain; struct v4l2_ext_control { __u32 id; __u32 size; __u32 reserved2[1]; union { __s32 value; __s64 value64; char __attribute__((btf_type_tag("user"))) *string; __u8 __attribute__((btf_type_tag("user"))) *p_u8; __u16 __attribute__((btf_type_tag("user"))) *p_u16; __u32 __attribute__((btf_type_tag("user"))) *p_u32; __s32 __attribute__((btf_type_tag("user"))) *p_s32; __s64 __attribute__((btf_type_tag("user"))) *p_s64; struct v4l2_area __attribute__((btf_type_tag("user"))) *p_area; struct v4l2_ctrl_h264_sps __attribute__((btf_type_tag("user"))) *p_h264_sps; struct v4l2_ctrl_h264_pps *p_h264_pps; struct v4l2_ctrl_h264_scaling_matrix __attribute__((btf_type_tag("user"))) *p_h264_scaling_matrix; struct v4l2_ctrl_h264_pred_weights __attribute__((btf_type_tag("user"))) *p_h264_pred_weights; struct v4l2_ctrl_h264_slice_params __attribute__((btf_type_tag("user"))) *p_h264_slice_params; struct v4l2_ctrl_h264_decode_params __attribute__((btf_type_tag("user"))) *p_h264_decode_params; struct v4l2_ctrl_fwht_params __attribute__((btf_type_tag("user"))) *p_fwht_params; struct v4l2_ctrl_vp8_frame __attribute__((btf_type_tag("user"))) *p_vp8_frame; struct v4l2_ctrl_mpeg2_sequence __attribute__((btf_type_tag("user"))) *p_mpeg2_sequence; struct v4l2_ctrl_mpeg2_picture __attribute__((btf_type_tag("user"))) *p_mpeg2_picture; struct v4l2_ctrl_mpeg2_quantisation __attribute__((btf_type_tag("user"))) *p_mpeg2_quantisation; struct v4l2_ctrl_vp9_compressed_hdr __attribute__((btf_type_tag("user"))) *p_vp9_compressed_hdr_probs; struct v4l2_ctrl_vp9_frame __attribute__((btf_type_tag("user"))) *p_vp9_frame; struct v4l2_ctrl_hevc_sps __attribute__((btf_type_tag("user"))) *p_hevc_sps; struct v4l2_ctrl_hevc_pps __attribute__((btf_type_tag("user"))) *p_hevc_pps; struct v4l2_ctrl_hevc_slice_params __attribute__((btf_type_tag("user"))) *p_hevc_slice_params; struct v4l2_ctrl_hevc_scaling_matrix __attribute__((btf_type_tag("user"))) *p_hevc_scaling_matrix; struct v4l2_ctrl_hevc_decode_params __attribute__((btf_type_tag("user"))) *p_hevc_decode_params; struct v4l2_ctrl_av1_sequence __attribute__((btf_type_tag("user"))) *p_av1_sequence; struct v4l2_ctrl_av1_tile_group_entry __attribute__((btf_type_tag("user"))) *p_av1_tile_group_entry; struct v4l2_ctrl_av1_frame __attribute__((btf_type_tag("user"))) *p_av1_frame; struct v4l2_ctrl_av1_film_grain __attribute__((btf_type_tag("user"))) *p_av1_film_grain; void __attribute__((btf_type_tag("user"))) *ptr; }; } __attribute__((packed)); struct v4l2_area { __u32 width; __u32 height; }; struct v4l2_ctrl_h264_sps { __u8 profile_idc; __u8 constraint_set_flags; __u8 level_idc; __u8 seq_parameter_set_id; __u8 chroma_format_idc; __u8 bit_depth_luma_minus8; __u8 bit_depth_chroma_minus8; __u8 log2_max_frame_num_minus4; __u8 pic_order_cnt_type; __u8 log2_max_pic_order_cnt_lsb_minus4; __u8 max_num_ref_frames; __u8 num_ref_frames_in_pic_order_cnt_cycle; __s32 offset_for_ref_frame[255]; __s32 offset_for_non_ref_pic; __s32 offset_for_top_to_bottom_field; __u16 pic_width_in_mbs_minus1; __u16 pic_height_in_map_units_minus1; __u32 flags; }; struct v4l2_ctrl_h264_pps { __u8 pic_parameter_set_id; __u8 seq_parameter_set_id; __u8 num_slice_groups_minus1; __u8 num_ref_idx_l0_default_active_minus1; __u8 num_ref_idx_l1_default_active_minus1; __u8 weighted_bipred_idc; __s8 pic_init_qp_minus26; __s8 pic_init_qs_minus26; __s8 chroma_qp_index_offset; __s8 second_chroma_qp_index_offset; __u16 flags; }; struct v4l2_ctrl_h264_scaling_matrix { __u8 scaling_list_4x4[96]; __u8 scaling_list_8x8[384]; }; struct v4l2_h264_weight_factors { __s16 luma_weight[32]; __s16 luma_offset[32]; __s16 chroma_weight[64]; __s16 chroma_offset[64]; }; struct v4l2_ctrl_h264_pred_weights { __u16 luma_log2_weight_denom; __u16 chroma_log2_weight_denom; struct v4l2_h264_weight_factors weight_factors[2]; }; struct v4l2_h264_reference { __u8 fields; __u8 index; }; struct v4l2_ctrl_h264_slice_params { __u32 header_bit_size; __u32 first_mb_in_slice; __u8 slice_type; __u8 colour_plane_id; __u8 redundant_pic_cnt; __u8 cabac_init_idc; __s8 slice_qp_delta; __s8 slice_qs_delta; __u8 disable_deblocking_filter_idc; __s8 slice_alpha_c0_offset_div2; __s8 slice_beta_offset_div2; __u8 num_ref_idx_l0_active_minus1; __u8 num_ref_idx_l1_active_minus1; __u8 reserved; struct v4l2_h264_reference ref_pic_list0[32]; struct v4l2_h264_reference ref_pic_list1[32]; __u32 flags; }; struct v4l2_h264_dpb_entry { __u64 reference_ts; __u32 pic_num; __u16 frame_num; __u8 fields; __u8 reserved[5]; __s32 top_field_order_cnt; __s32 bottom_field_order_cnt; __u32 flags; }; struct v4l2_ctrl_h264_decode_params { struct v4l2_h264_dpb_entry dpb[16]; __u16 nal_ref_idc; __u16 frame_num; __s32 top_field_order_cnt; __s32 bottom_field_order_cnt; __u16 idr_pic_id; __u16 pic_order_cnt_lsb; __s32 delta_pic_order_cnt_bottom; __s32 delta_pic_order_cnt0; __s32 delta_pic_order_cnt1; __u32 dec_ref_pic_marking_bit_size; __u32 pic_order_cnt_bit_size; __u32 slice_group_change_cycle; __u32 reserved; __u32 flags; }; struct v4l2_ctrl_fwht_params { __u64 backward_ref_ts; __u32 version; __u32 width; __u32 height; __u32 flags; __u32 colorspace; __u32 xfer_func; __u32 ycbcr_enc; __u32 quantization; }; struct v4l2_vp8_segment { __s8 quant_update[4]; __s8 lf_update[4]; __u8 segment_probs[3]; __u8 padding; __u32 flags; }; struct v4l2_vp8_loop_filter { __s8 ref_frm_delta[4]; __s8 mb_mode_delta[4]; __u8 sharpness_level; __u8 level; __u16 padding; __u32 flags; }; struct v4l2_vp8_quantization { __u8 y_ac_qi; __s8 y_dc_delta; __s8 y2_dc_delta; __s8 y2_ac_delta; __s8 uv_dc_delta; __s8 uv_ac_delta; __u16 padding; }; struct v4l2_vp8_entropy { __u8 coeff_probs[1056]; __u8 y_mode_probs[4]; __u8 uv_mode_probs[3]; __u8 mv_probs[38]; __u8 padding[3]; }; struct v4l2_vp8_entropy_coder_state { __u8 range; __u8 value; __u8 bit_count; __u8 padding; }; struct v4l2_ctrl_vp8_frame { struct v4l2_vp8_segment segment; struct v4l2_vp8_loop_filter lf; struct v4l2_vp8_quantization quant; struct v4l2_vp8_entropy entropy; struct v4l2_vp8_entropy_coder_state coder_state; __u16 width; __u16 height; __u8 horizontal_scale; __u8 vertical_scale; __u8 version; __u8 prob_skip_false; __u8 prob_intra; __u8 prob_last; __u8 prob_gf; __u8 num_dct_parts; __u32 first_part_size; __u32 first_part_header_bits; __u32 dct_part_sizes[8]; __u64 last_frame_ts; __u64 golden_frame_ts; __u64 alt_frame_ts; __u64 flags; }; struct v4l2_ctrl_mpeg2_sequence { __u16 horizontal_size; __u16 vertical_size; __u32 vbv_buffer_size; __u16 profile_and_level_indication; __u8 chroma_format; __u8 flags; }; struct v4l2_ctrl_mpeg2_picture { __u64 backward_ref_ts; __u64 forward_ref_ts; __u32 flags; __u8 f_code[4]; __u8 picture_coding_type; __u8 picture_structure; __u8 intra_dc_precision; __u8 reserved[5]; }; struct v4l2_ctrl_mpeg2_quantisation { __u8 intra_quantiser_matrix[64]; __u8 non_intra_quantiser_matrix[64]; __u8 chroma_intra_quantiser_matrix[64]; __u8 chroma_non_intra_quantiser_matrix[64]; }; struct v4l2_vp9_mv_probs { __u8 joint[3]; __u8 sign[2]; __u8 classes[20]; __u8 class0_bit[2]; __u8 bits[20]; __u8 class0_fr[12]; __u8 fr[6]; __u8 class0_hp[2]; __u8 hp[2]; }; struct v4l2_ctrl_vp9_compressed_hdr { __u8 tx_mode; __u8 tx8[2]; __u8 tx16[4]; __u8 tx32[6]; __u8 coef[1728]; __u8 skip[3]; __u8 inter_mode[21]; __u8 interp_filter[8]; __u8 is_inter[4]; __u8 comp_mode[5]; __u8 single_ref[10]; __u8 comp_ref[5]; __u8 y_mode[36]; __u8 uv_mode[90]; __u8 partition[48]; struct v4l2_vp9_mv_probs mv; }; struct v4l2_vp9_loop_filter { __s8 ref_deltas[4]; __s8 mode_deltas[2]; __u8 level; __u8 sharpness; __u8 flags; __u8 reserved[7]; }; struct v4l2_vp9_quantization { __u8 base_q_idx; __s8 delta_q_y_dc; __s8 delta_q_uv_dc; __s8 delta_q_uv_ac; __u8 reserved[4]; }; struct v4l2_vp9_segmentation { __s16 feature_data[32]; __u8 feature_enabled[8]; __u8 tree_probs[7]; __u8 pred_probs[3]; __u8 flags; __u8 reserved[5]; }; struct v4l2_ctrl_vp9_frame { struct v4l2_vp9_loop_filter lf; struct v4l2_vp9_quantization quant; struct v4l2_vp9_segmentation seg; __u32 flags; __u16 compressed_header_size; __u16 uncompressed_header_size; __u16 frame_width_minus_1; __u16 frame_height_minus_1; __u16 render_width_minus_1; __u16 render_height_minus_1; __u64 last_frame_ts; __u64 golden_frame_ts; __u64 alt_frame_ts; __u8 ref_frame_sign_bias; __u8 reset_frame_context; __u8 frame_context_idx; __u8 profile; __u8 bit_depth; __u8 interpolation_filter; __u8 tile_cols_log2; __u8 tile_rows_log2; __u8 reference_mode; __u8 reserved[7]; }; struct v4l2_ctrl_hevc_sps { __u8 video_parameter_set_id; __u8 seq_parameter_set_id; __u16 pic_width_in_luma_samples; __u16 pic_height_in_luma_samples; __u8 bit_depth_luma_minus8; __u8 bit_depth_chroma_minus8; __u8 log2_max_pic_order_cnt_lsb_minus4; __u8 sps_max_dec_pic_buffering_minus1; __u8 sps_max_num_reorder_pics; __u8 sps_max_latency_increase_plus1; __u8 log2_min_luma_coding_block_size_minus3; __u8 log2_diff_max_min_luma_coding_block_size; __u8 log2_min_luma_transform_block_size_minus2; __u8 log2_diff_max_min_luma_transform_block_size; __u8 max_transform_hierarchy_depth_inter; __u8 max_transform_hierarchy_depth_intra; __u8 pcm_sample_bit_depth_luma_minus1; __u8 pcm_sample_bit_depth_chroma_minus1; __u8 log2_min_pcm_luma_coding_block_size_minus3; __u8 log2_diff_max_min_pcm_luma_coding_block_size; __u8 num_short_term_ref_pic_sets; __u8 num_long_term_ref_pics_sps; __u8 chroma_format_idc; __u8 sps_max_sub_layers_minus1; __u8 reserved[6]; __u64 flags; }; struct v4l2_ctrl_hevc_pps { __u8 pic_parameter_set_id; __u8 num_extra_slice_header_bits; __u8 num_ref_idx_l0_default_active_minus1; __u8 num_ref_idx_l1_default_active_minus1; __s8 init_qp_minus26; __u8 diff_cu_qp_delta_depth; __s8 pps_cb_qp_offset; __s8 pps_cr_qp_offset; __u8 num_tile_columns_minus1; __u8 num_tile_rows_minus1; __u8 column_width_minus1[20]; __u8 row_height_minus1[22]; __s8 pps_beta_offset_div2; __s8 pps_tc_offset_div2; __u8 log2_parallel_merge_level_minus2; __u8 reserved; __u64 flags; }; struct v4l2_hevc_pred_weight_table { __s8 delta_luma_weight_l0[16]; __s8 luma_offset_l0[16]; __s8 delta_chroma_weight_l0[32]; __s8 chroma_offset_l0[32]; __s8 delta_luma_weight_l1[16]; __s8 luma_offset_l1[16]; __s8 delta_chroma_weight_l1[32]; __s8 chroma_offset_l1[32]; __u8 luma_log2_weight_denom; __s8 delta_chroma_log2_weight_denom; }; struct v4l2_ctrl_hevc_slice_params { __u32 bit_size; __u32 data_byte_offset; __u32 num_entry_point_offsets; __u8 nal_unit_type; __u8 nuh_temporal_id_plus1; __u8 slice_type; __u8 colour_plane_id; __s32 slice_pic_order_cnt; __u8 num_ref_idx_l0_active_minus1; __u8 num_ref_idx_l1_active_minus1; __u8 collocated_ref_idx; __u8 five_minus_max_num_merge_cand; __s8 slice_qp_delta; __s8 slice_cb_qp_offset; __s8 slice_cr_qp_offset; __s8 slice_act_y_qp_offset; __s8 slice_act_cb_qp_offset; __s8 slice_act_cr_qp_offset; __s8 slice_beta_offset_div2; __s8 slice_tc_offset_div2; __u8 pic_struct; __u8 reserved0[3]; __u32 slice_segment_addr; __u8 ref_idx_l0[16]; __u8 ref_idx_l1[16]; __u16 short_term_ref_pic_set_size; __u16 long_term_ref_pic_set_size; struct v4l2_hevc_pred_weight_table pred_weight_table; __u8 reserved1[2]; __u64 flags; }; struct v4l2_ctrl_hevc_scaling_matrix { __u8 scaling_list_4x4[96]; __u8 scaling_list_8x8[384]; __u8 scaling_list_16x16[384]; __u8 scaling_list_32x32[128]; __u8 scaling_list_dc_coef_16x16[6]; __u8 scaling_list_dc_coef_32x32[2]; }; struct v4l2_hevc_dpb_entry { __u64 timestamp; __u8 flags; __u8 field_pic; __u16 reserved; __s32 pic_order_cnt_val; }; struct v4l2_ctrl_hevc_decode_params { __s32 pic_order_cnt_val; __u16 short_term_ref_pic_set_size; __u16 long_term_ref_pic_set_size; __u8 num_active_dpb_entries; __u8 num_poc_st_curr_before; __u8 num_poc_st_curr_after; __u8 num_poc_lt_curr; __u8 poc_st_curr_before[16]; __u8 poc_st_curr_after[16]; __u8 poc_lt_curr[16]; __u8 num_delta_pocs_of_ref_rps_idx; __u8 reserved[3]; struct v4l2_hevc_dpb_entry dpb[16]; __u64 flags; }; struct v4l2_ctrl_av1_sequence { __u32 flags; __u8 seq_profile; __u8 order_hint_bits; __u8 bit_depth; __u8 reserved; __u16 max_frame_width_minus_1; __u16 max_frame_height_minus_1; }; struct v4l2_ctrl_av1_tile_group_entry { __u32 tile_offset; __u32 tile_size; __u32 tile_row; __u32 tile_col; }; struct v4l2_av1_tile_info { __u8 flags; __u8 context_update_tile_id; __u8 tile_cols; __u8 tile_rows; __u32 mi_col_starts[65]; __u32 mi_row_starts[65]; __u32 width_in_sbs_minus_1[64]; __u32 height_in_sbs_minus_1[64]; __u8 tile_size_bytes; __u8 reserved[3]; }; struct v4l2_av1_quantization { __u8 flags; __u8 base_q_idx; __s8 delta_q_y_dc; __s8 delta_q_u_dc; __s8 delta_q_u_ac; __s8 delta_q_v_dc; __s8 delta_q_v_ac; __u8 qm_y; __u8 qm_u; __u8 qm_v; __u8 delta_q_res; }; struct v4l2_av1_segmentation { __u8 flags; __u8 last_active_seg_id; __u8 feature_enabled[8]; __s16 feature_data[64]; }; struct v4l2_av1_loop_filter { __u8 flags; __u8 level[4]; __u8 sharpness; __s8 ref_deltas[8]; __s8 mode_deltas[2]; __u8 delta_lf_res; }; struct v4l2_av1_cdef { __u8 damping_minus_3; __u8 bits; __u8 y_pri_strength[8]; __u8 y_sec_strength[8]; __u8 uv_pri_strength[8]; __u8 uv_sec_strength[8]; }; enum v4l2_av1_frame_restoration_type { V4L2_AV1_FRAME_RESTORE_NONE = 0, V4L2_AV1_FRAME_RESTORE_WIENER = 1, V4L2_AV1_FRAME_RESTORE_SGRPROJ = 2, V4L2_AV1_FRAME_RESTORE_SWITCHABLE = 3, }; struct v4l2_av1_loop_restoration { __u8 flags; __u8 lr_unit_shift; __u8 lr_uv_shift; __u8 reserved; enum v4l2_av1_frame_restoration_type frame_restoration_type[3]; __u32 loop_restoration_size[3]; }; enum v4l2_av1_warp_model { V4L2_AV1_WARP_MODEL_IDENTITY = 0, V4L2_AV1_WARP_MODEL_TRANSLATION = 1, V4L2_AV1_WARP_MODEL_ROTZOOM = 2, V4L2_AV1_WARP_MODEL_AFFINE = 3, }; struct v4l2_av1_global_motion { __u8 flags[8]; enum v4l2_av1_warp_model type[8]; __s32 params[48]; __u8 invalid; __u8 reserved[3]; }; enum v4l2_av1_frame_type { V4L2_AV1_KEY_FRAME = 0, V4L2_AV1_INTER_FRAME = 1, V4L2_AV1_INTRA_ONLY_FRAME = 2, V4L2_AV1_SWITCH_FRAME = 3, }; enum v4l2_av1_interpolation_filter { V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP = 0, V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH = 1, V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP = 2, V4L2_AV1_INTERPOLATION_FILTER_BILINEAR = 3, V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE = 4, }; enum v4l2_av1_tx_mode { V4L2_AV1_TX_MODE_ONLY_4X4 = 0, V4L2_AV1_TX_MODE_LARGEST = 1, V4L2_AV1_TX_MODE_SELECT = 2, }; struct v4l2_ctrl_av1_frame { struct v4l2_av1_tile_info tile_info; struct v4l2_av1_quantization quantization; __u8 superres_denom; struct v4l2_av1_segmentation segmentation; struct v4l2_av1_loop_filter loop_filter; struct v4l2_av1_cdef cdef; __u8 skip_mode_frame[2]; __u8 primary_ref_frame; struct v4l2_av1_loop_restoration loop_restoration; struct v4l2_av1_global_motion global_motion; __u32 flags; enum v4l2_av1_frame_type frame_type; __u32 order_hint; __u32 upscaled_width; enum v4l2_av1_interpolation_filter interpolation_filter; enum v4l2_av1_tx_mode tx_mode; __u32 frame_width_minus_1; __u32 frame_height_minus_1; __u16 render_width_minus_1; __u16 render_height_minus_1; __u32 current_frame_id; __u32 buffer_removal_time[32]; __u8 reserved[4]; __u32 order_hints[8]; __u64 reference_frame_ts[8]; __s8 ref_frame_idx[7]; __u8 refresh_frame_flags; }; struct v4l2_ctrl_av1_film_grain { __u8 flags; __u8 cr_mult; __u16 grain_seed; __u8 film_grain_params_ref_idx; __u8 num_y_points; __u8 point_y_value[16]; __u8 point_y_scaling[16]; __u8 num_cb_points; __u8 point_cb_value[16]; __u8 point_cb_scaling[16]; __u8 num_cr_points; __u8 point_cr_value[16]; __u8 point_cr_scaling[16]; __u8 grain_scaling_minus_8; __u8 ar_coeff_lag; __u8 ar_coeffs_y_plus_128[25]; __u8 ar_coeffs_cb_plus_128[25]; __u8 ar_coeffs_cr_plus_128[25]; __u8 ar_coeff_shift_minus_6; __u8 grain_scale_shift; __u8 cb_mult; __u8 cb_luma_mult; __u8 cr_luma_mult; __u16 cb_offset; __u16 cr_offset; __u8 reserved[4]; }; struct v4l2_querymenu { __u32 id; __u32 index; union { __u8 name[32]; __s64 value; }; __u32 reserved; } __attribute__((packed)); struct v4l2_audio { __u32 index; __u8 name[32]; __u32 capability; __u32 mode; __u32 reserved[2]; }; struct v4l2_audioout { __u32 index; __u8 name[32]; __u32 capability; __u32 mode; __u32 reserved[2]; }; struct v4l2_selection { __u32 type; __u32 target; __u32 flags; struct v4l2_rect r; __u32 reserved[9]; }; struct v4l2_jpegcompression { int quality; int APPn; int APP_len; char APP_data[60]; int COM_len; char COM_data[60]; __u32 jpeg_markers; }; struct v4l2_enc_idx_entry { __u64 offset; __u64 pts; __u32 length; __u32 flags; __u32 reserved[2]; }; struct v4l2_enc_idx { __u32 entries; __u32 entries_cap; __u32 reserved[4]; struct v4l2_enc_idx_entry entry[64]; }; struct v4l2_encoder_cmd { __u32 cmd; __u32 flags; union { struct { __u32 data[8]; } raw; }; }; struct v4l2_decoder_cmd { __u32 cmd; __u32 flags; union { struct { __u64 pts; } stop; struct { __s32 speed; __u32 format; } start; struct { __u32 data[16]; } raw; }; }; struct v4l2_captureparm { __u32 capability; __u32 capturemode; struct v4l2_fract timeperframe; __u32 extendedmode; __u32 readbuffers; __u32 reserved[4]; }; struct v4l2_outputparm { __u32 capability; __u32 outputmode; struct v4l2_fract timeperframe; __u32 extendedmode; __u32 writebuffers; __u32 reserved[4]; }; struct v4l2_streamparm { __u32 type; union { struct v4l2_captureparm capture; struct v4l2_outputparm output; __u8 raw_data[200]; } parm; }; struct v4l2_hw_freq_seek { __u32 tuner; __u32 type; __u32 seek_upward; __u32 wrap_around; __u32 spacing; __u32 rangelow; __u32 rangehigh; __u32 reserved[5]; }; struct v4l2_frmsize_discrete { __u32 width; __u32 height; }; struct v4l2_frmsize_stepwise { __u32 min_width; __u32 max_width; __u32 step_width; __u32 min_height; __u32 max_height; __u32 step_height; }; struct v4l2_frmsizeenum { __u32 index; __u32 pixel_format; __u32 type; union { struct v4l2_frmsize_discrete discrete; struct v4l2_frmsize_stepwise stepwise; }; __u32 reserved[2]; }; struct v4l2_frmival_stepwise { struct v4l2_fract min; struct v4l2_fract max; struct v4l2_fract step; }; struct v4l2_frmivalenum { __u32 index; __u32 pixel_format; __u32 width; __u32 height; __u32 type; union { struct v4l2_fract discrete; struct v4l2_frmival_stepwise stepwise; }; __u32 reserved[2]; }; enum uvcg_format_type { UVCG_UNCOMPRESSED = 0, UVCG_MJPEG = 1, }; enum v4l2_colorspace { V4L2_COLORSPACE_DEFAULT = 0, V4L2_COLORSPACE_SMPTE170M = 1, V4L2_COLORSPACE_SMPTE240M = 2, V4L2_COLORSPACE_REC709 = 3, V4L2_COLORSPACE_BT878 = 4, V4L2_COLORSPACE_470_SYSTEM_M = 5, V4L2_COLORSPACE_470_SYSTEM_BG = 6, V4L2_COLORSPACE_JPEG = 7, V4L2_COLORSPACE_SRGB = 8, V4L2_COLORSPACE_OPRGB = 9, V4L2_COLORSPACE_BT2020 = 10, V4L2_COLORSPACE_RAW = 11, V4L2_COLORSPACE_DCI_P3 = 12, V4L2_COLORSPACE_LAST = 13, }; enum v4l2_frmsizetypes { V4L2_FRMSIZE_TYPE_DISCRETE = 1, V4L2_FRMSIZE_TYPE_CONTINUOUS = 2, V4L2_FRMSIZE_TYPE_STEPWISE = 3, }; enum v4l2_frmivaltypes { V4L2_FRMIVAL_TYPE_DISCRETE = 1, V4L2_FRMIVAL_TYPE_CONTINUOUS = 2, V4L2_FRMIVAL_TYPE_STEPWISE = 3, }; struct uvcg_format; struct uvcg_format_ptr { struct uvcg_format *fmt; struct list_head entry; }; struct uvcg_color_matching; struct uvcg_format { struct config_group group; enum uvcg_format_type type; unsigned int linked; struct list_head frames; unsigned int num_frames; __u8 bmaControls[1]; struct uvcg_color_matching *color_matching; }; struct uvc_color_matching_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bColorPrimaries; __u8 bTransferCharacteristics; __u8 bMatrixCoefficients; }; struct uvcg_color_matching { struct config_group group; struct uvc_color_matching_descriptor desc; unsigned int refcnt; }; struct uvc_format_uncompressed { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bFormatIndex; __u8 bNumFrameDescriptors; __u8 guidFormat[16]; __u8 bBitsPerPixel; __u8 bDefaultFrameIndex; __u8 bAspectRatioX; __u8 bAspectRatioY; __u8 bmInterlaceFlags; __u8 bCopyProtect; }; struct uvcg_uncompressed { struct uvcg_format fmt; struct uvc_format_uncompressed desc; }; struct uvcg_frame; struct uvcg_frame_ptr { struct uvcg_frame *frm; struct list_head entry; }; struct uvcg_frame { struct config_item item; enum uvcg_format_type fmt_type; struct { u8 b_length; u8 b_descriptor_type; u8 b_descriptor_subtype; u8 b_frame_index; u8 bm_capabilities; u16 w_width; u16 w_height; u32 dw_min_bit_rate; u32 dw_max_bit_rate; u32 dw_max_video_frame_buffer_size; u32 dw_default_frame_interval; u8 b_frame_interval_type; } __attribute__((packed)) frame; u32 *dw_frame_interval; }; struct uvc_file_handle { struct v4l2_fh vfh; struct uvc_video *device; bool is_uvc_app_handle; }; struct uvc_format_desc { u8 guid[16]; u32 fcc; }; struct v4l2_subscribed_event; struct v4l2_subscribed_event_ops { int (*add)(struct v4l2_subscribed_event *, unsigned int); void (*del)(struct v4l2_subscribed_event *); void (*replace)(struct v4l2_event *, const struct v4l2_event *); void (*merge)(const struct v4l2_event *, struct v4l2_event *); }; struct v4l2_kevent { struct list_head list; struct v4l2_subscribed_event *sev; struct v4l2_event event; u64 ts; }; struct v4l2_subscribed_event { struct list_head list; u32 type; u32 id; u32 flags; struct v4l2_fh *fh; struct list_head node; const struct v4l2_subscribed_event_ops *ops; unsigned int elems; unsigned int first; unsigned int in_use; struct v4l2_kevent events[0]; }; struct uvc_request { struct usb_request *req; u8 *req_buffer; struct uvc_video *video; struct sg_table sgt; u8 header[12]; struct uvc_buffer *last_buf; struct list_head list; }; struct uvcg_config_group_type { struct config_item_type type; const char *name; const struct uvcg_config_group_type **children; int (*create_children)(struct config_group *); }; enum uvc_color_primaries_values { UVC_COLOR_PRIMARIES_UNSPECIFIED = 0, UVC_COLOR_PRIMARIES_BT_709_SRGB = 1, UVC_COLOR_PRIMARIES_BT_470_2_M = 2, UVC_COLOR_PRIMARIES_BT_470_2_B_G = 3, UVC_COLOR_PRIMARIES_SMPTE_170M = 4, UVC_COLOR_PRIMARIES_SMPTE_240M = 5, }; enum uvc_transfer_characteristics_values { UVC_TRANSFER_CHARACTERISTICS_UNSPECIFIED = 0, UVC_TRANSFER_CHARACTERISTICS_BT_709 = 1, UVC_TRANSFER_CHARACTERISTICS_BT_470_2_M = 2, UVC_TRANSFER_CHARACTERISTICS_BT_470_2_B_G = 3, UVC_TRANSFER_CHARACTERISTICS_SMPTE_170M = 4, UVC_TRANSFER_CHARACTERISTICS_SMPTE_240M = 5, UVC_TRANSFER_CHARACTERISTICS_LINEAR = 6, UVC_TRANSFER_CHARACTERISTICS_SRGB = 7, }; enum uvc_matrix_coefficients { UVC_MATRIX_COEFFICIENTS_UNSPECIFIED = 0, UVC_MATRIX_COEFFICIENTS_BT_709 = 1, UVC_MATRIX_COEFFICIENTS_FCC = 2, UVC_MATRIX_COEFFICIENTS_BT_470_2_B_G = 3, UVC_MATRIX_COEFFICIENTS_SMPTE_170M = 4, UVC_MATRIX_COEFFICIENTS_SMPTE_240M = 5, }; enum uvcg_strm_type { UVCG_HEADER = 0, UVCG_FORMAT = 1, UVCG_FRAME = 2, UVCG_COLOR_MATCHING = 3, }; struct uvc_header_descriptor_1 { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __le16 bcdUVC; __le16 wTotalLength; __le32 dwClockFrequency; __u8 bInCollection; __u8 baInterfaceNr[1]; } __attribute__((packed)); struct uvcg_control_header { struct config_item item; struct uvc_header_descriptor_1 desc; unsigned int linked; }; struct uvcg_control_class_group { struct config_group group; const char *name; }; struct uvc_format_mjpeg { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubType; __u8 bFormatIndex; __u8 bNumFrameDescriptors; __u8 bmFlags; __u8 bDefaultFrameIndex; __u8 bAspectRatioX; __u8 bAspectRatioY; __u8 bmInterlaceFlags; __u8 bCopyProtect; }; struct uvcg_mjpeg { struct uvcg_format fmt; struct uvc_format_mjpeg desc; }; struct uvcg_streaming_class_group { struct config_group group; const char *name; }; struct snd_device; struct snd_device_ops { int (*dev_free)(struct snd_device *); int (*dev_register)(struct snd_device *); int (*dev_disconnect)(struct snd_device *); u64 android_kabi_reserved1; }; enum snd_device_state { SNDRV_DEV_BUILD = 0, SNDRV_DEV_REGISTERED = 1, SNDRV_DEV_DISCONNECTED = 2, }; enum snd_device_type { SNDRV_DEV_LOWLEVEL = 0, SNDRV_DEV_INFO = 1, SNDRV_DEV_BUS = 2, SNDRV_DEV_CODEC = 3, SNDRV_DEV_PCM = 4, SNDRV_DEV_COMPRESS = 5, SNDRV_DEV_RAWMIDI = 6, SNDRV_DEV_TIMER = 7, SNDRV_DEV_SEQUENCER = 8, SNDRV_DEV_HWDEP = 9, SNDRV_DEV_JACK = 10, SNDRV_DEV_CONTROL = 11, }; struct snd_device { struct list_head list; struct snd_card *card; enum snd_device_state state; enum snd_device_type type; void *device_data; const struct snd_device_ops *ops; u64 android_kabi_reserved1; }; struct snd_info_buffer; struct snd_info_entry_text { void (*read)(struct snd_info_entry *, struct snd_info_buffer *); void (*write)(struct snd_info_entry *, struct snd_info_buffer *); }; struct snd_info_entry_ops; struct snd_info_entry { const char *name; umode_t mode; long size; unsigned short content; union { struct snd_info_entry_text text; const struct snd_info_entry_ops *ops; } c; struct snd_info_entry *parent; struct module *module; void *private_data; void (*private_free)(struct snd_info_entry *); struct proc_dir_entry *p; struct mutex access; struct list_head children; struct list_head list; u64 android_kabi_reserved1; }; struct snd_info_buffer { char *buffer; unsigned int curr; unsigned int size; unsigned int len; int stop; int error; }; struct snd_info_entry_ops { int (*open)(struct snd_info_entry *, unsigned short, void **); int (*release)(struct snd_info_entry *, unsigned short, void *); ssize_t (*read)(struct snd_info_entry *, void *, struct file *, char __attribute__((btf_type_tag("user"))) *, size_t, loff_t); ssize_t (*write)(struct snd_info_entry *, void *, struct file *, const char __attribute__((btf_type_tag("user"))) *, size_t, loff_t); loff_t (*llseek)(struct snd_info_entry *, void *, struct file *, loff_t, int); __poll_t (*poll)(struct snd_info_entry *, void *, struct file *, poll_table *); int (*ioctl)(struct snd_info_entry *, void *, struct file *, unsigned int, unsigned long); int (*mmap)(struct snd_info_entry *, void *, struct inode *, struct file *, struct vm_area_struct *); u64 android_kabi_reserved1; }; struct snd_rawmidi_substream; struct snd_rawmidi_ops { int (*open)(struct snd_rawmidi_substream *); int (*close)(struct snd_rawmidi_substream *); void (*trigger)(struct snd_rawmidi_substream *, int); void (*drain)(struct snd_rawmidi_substream *); }; struct snd_rawmidi; struct snd_rawmidi_str; struct snd_rawmidi_runtime; struct snd_rawmidi_substream { struct list_head list; int stream; int number; bool opened; bool append; bool active_sensing; unsigned int framing; unsigned int clock_type; int use_count; size_t bytes; spinlock_t lock; struct snd_rawmidi *rmidi; struct snd_rawmidi_str *pstr; char name[32]; struct snd_rawmidi_runtime *runtime; struct pid *pid; const struct snd_rawmidi_ops *ops; }; struct snd_rawmidi_str { unsigned int substream_count; unsigned int substream_opened; struct list_head substreams; }; struct snd_rawmidi_global_ops; struct snd_rawmidi { struct snd_card *card; struct list_head list; unsigned int device; unsigned int info_flags; char id[64]; char name[80]; const struct snd_rawmidi_global_ops *ops; struct snd_rawmidi_str streams[2]; void *private_data; void (*private_free)(struct snd_rawmidi *); struct mutex open_mutex; wait_queue_head_t open_wait; struct device *dev; struct snd_info_entry *proc_entry; }; struct snd_seq_port_info; struct snd_rawmidi_global_ops { int (*dev_register)(struct snd_rawmidi *); int (*dev_unregister)(struct snd_rawmidi *); void (*get_port_info)(struct snd_rawmidi *, int, struct snd_seq_port_info *); long (*ioctl)(struct snd_rawmidi *, unsigned int, void __attribute__((btf_type_tag("user"))) *); void (*proc_read)(struct snd_info_entry *, struct snd_info_buffer *); }; struct snd_rawmidi_runtime { struct snd_rawmidi_substream *substream; unsigned int drain: 1; unsigned int oss: 1; unsigned char *buffer; size_t buffer_size; size_t appl_ptr; size_t hw_ptr; size_t avail_min; size_t avail; size_t xruns; size_t align; int buffer_ref; wait_queue_head_t sleep; void (*event)(struct snd_rawmidi_substream *); struct work_struct event_work; void *private_data; void (*private_free)(struct snd_rawmidi_substream *); }; struct uac1_ac_header_descriptor_1 { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __le16 bcdADC; __le16 wTotalLength; __u8 bInCollection; __u8 baInterfaceNr[1]; } __attribute__((packed)); struct usb_ms_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __le16 bcdMSC; __le16 wTotalLength; } __attribute__((packed)); struct usb_ms_endpoint_descriptor_16 { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bNumEmbMIDIJack; __u8 baAssocJackID[16]; }; enum { STATE_INITIAL = 0, STATE_1PARAM = 1, STATE_2PARAM_1 = 2, STATE_2PARAM_2 = 3, STATE_SYSEX_0 = 4, STATE_SYSEX_1 = 5, STATE_SYSEX_2 = 6, STATE_REAL_TIME = 7, STATE_FINISHED = 8, }; enum { SNDRV_RAWMIDI_STREAM_OUTPUT = 0, SNDRV_RAWMIDI_STREAM_INPUT = 1, SNDRV_RAWMIDI_STREAM_LAST = 1, }; struct f_midi_info { struct device *dev; int card_number; u32 rmidi_device; bool configured; spinlock_t lock; }; struct f_midi_opts { struct usb_function_instance func_inst; int index; char *id; bool id_allocated; unsigned int in_ports; unsigned int out_ports; unsigned int buflen; unsigned int qlen; struct mutex lock; int refcnt; struct f_midi_info android_midi_info; }; struct gmidi_in_port { struct snd_rawmidi_substream *substream; int active; uint8_t cable; uint8_t state; uint8_t data[2]; }; struct f_midi { struct usb_function func; struct usb_gadget *gadget; struct usb_ep *in_ep; struct usb_ep *out_ep; struct snd_card *card; struct snd_rawmidi *rmidi; u8 ms_id; struct snd_rawmidi_substream *out_substream[16]; unsigned long out_triggered; struct work_struct work; unsigned int in_ports; unsigned int out_ports; int index; char *id; unsigned int buflen; unsigned int qlen; struct { union { struct __kfifo kfifo; struct usb_request **type; const struct usb_request **const_type; char (*rectype)[0]; struct usb_request **ptr; struct usb_request * const *ptr_const; }; struct usb_request *buf[0]; } in_req_fifo; spinlock_t transmit_lock; unsigned int in_last_port; unsigned char free_ref; struct gmidi_in_port in_ports_array[0]; }; struct usb_midi_in_jack_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bJackType; __u8 bJackID; __u8 iJack; }; struct usb_midi_source_pin { __u8 baSourceID; __u8 baSourcePin; }; struct usb_midi_out_jack_descriptor_1 { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bJackType; __u8 bJackID; __u8 bNrInputPins; struct usb_midi_source_pin pins[1]; __u8 iJack; }; struct hid_class_descriptor { __u8 bDescriptorType; __le16 wDescriptorLength; } __attribute__((packed)); struct hid_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdHID; __u8 bCountryCode; __u8 bNumDescriptors; struct hid_class_descriptor desc[1]; } __attribute__((packed)); struct f_hid_opts { struct usb_function_instance func_inst; int minor; unsigned char subclass; unsigned char protocol; unsigned char no_out_endpoint; unsigned short report_length; unsigned short report_desc_length; unsigned char *report_desc; bool report_desc_alloc; struct mutex lock; int refcnt; }; struct f_hidg { unsigned char bInterfaceSubClass; unsigned char bInterfaceProtocol; unsigned char protocol; unsigned char idle; unsigned short report_desc_length; char *report_desc; unsigned short report_length; bool use_out_ep; spinlock_t read_spinlock; wait_queue_head_t read_queue; struct list_head completed_out_req; unsigned int qlen; char *set_report_buf; unsigned int set_report_length; spinlock_t write_spinlock; bool write_pending; wait_queue_head_t write_queue; struct usb_request *req; struct device dev; struct cdev cdev; struct usb_function func; struct usb_ep *in_ep; struct usb_ep *out_ep; }; struct f_hidg_req_list { struct usb_request *req; unsigned int pos; struct list_head list; }; struct acc_dev { struct usb_function function; struct usb_composite_dev *cdev; spinlock_t lock; struct kref kref; struct usb_ep *ep_in; struct usb_ep *ep_out; bool online; bool disconnected; char manufacturer[256]; char model[256]; char description[256]; char version[256]; char uri[256]; char serial[256]; int string_index; int start_requested; int audio_mode; struct list_head tx_idle; wait_queue_head_t read_wq; wait_queue_head_t write_wq; struct usb_request *rx_req[2]; int rx_done; struct delayed_work start_work; struct work_struct getprotocol_work; struct work_struct sendstring_work; struct work_struct hid_work; struct list_head hid_list; struct list_head new_hid_list; struct list_head dead_hid_list; }; struct hid_device; struct hid_report; struct hid_ll_driver { int (*start)(struct hid_device *); void (*stop)(struct hid_device *); int (*open)(struct hid_device *); void (*close)(struct hid_device *); int (*power)(struct hid_device *, int); int (*parse)(struct hid_device *); void (*request)(struct hid_device *, struct hid_report *, int); int (*wait)(struct hid_device *); int (*raw_request)(struct hid_device *, unsigned char, __u8 *, size_t, unsigned char, int); int (*output_report)(struct hid_device *, __u8 *, size_t); int (*idle)(struct hid_device *, int, int, int); bool (*may_wakeup)(struct hid_device *); unsigned int max_buffer_size; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; enum hid_type { HID_TYPE_OTHER = 0, HID_TYPE_USBMOUSE = 1, HID_TYPE_USBNONE = 2, }; struct hid_report_enum { unsigned int numbered; struct list_head report_list; struct hid_report *report_id_hash[256]; }; enum hid_battery_status { HID_BATTERY_UNKNOWN = 0, HID_BATTERY_QUERIED = 1, HID_BATTERY_REPORTED = 2, }; struct hid_bpf_prog_list; struct hid_bpf { u8 *device_data; u32 allocated_data; struct hid_bpf_prog_list __attribute__((btf_type_tag("rcu"))) *progs[2]; bool destroyed; spinlock_t progs_lock; }; struct hid_collection; struct hid_driver; struct hid_field; struct hid_usage; struct hid_device { __u8 *dev_rdesc; unsigned int dev_rsize; __u8 *rdesc; unsigned int rsize; struct hid_collection *collection; unsigned int collection_size; unsigned int maxcollection; unsigned int maxapplication; __u16 bus; __u16 group; __u32 vendor; __u32 product; __u32 version; enum hid_type type; unsigned int country; struct hid_report_enum report_enum[3]; struct work_struct led_work; struct semaphore driver_input_lock; struct device dev; struct hid_driver *driver; void *devres_group_id; const struct hid_ll_driver *ll_driver; struct mutex ll_open_lock; unsigned int ll_open_count; struct power_supply *battery; __s32 battery_capacity; __s32 battery_min; __s32 battery_max; __s32 battery_report_type; __s32 battery_report_id; __s32 battery_charge_status; enum hid_battery_status battery_status; bool battery_avoid_query; ktime_t battery_ratelimit_time; unsigned long status; unsigned int claimed; unsigned int quirks; unsigned int initial_quirks; bool io_started; struct list_head inputs; void *hiddev; void *hidraw; char name[128]; char phys[64]; char uniq[64]; void *driver_data; int (*ff_init)(struct hid_device *); int (*hiddev_connect)(struct hid_device *, unsigned int); void (*hiddev_disconnect)(struct hid_device *); void (*hiddev_hid_event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); void (*hiddev_report_event)(struct hid_device *, struct hid_report *); unsigned short debug; struct dentry *debug_dir; struct dentry *debug_rdesc; struct dentry *debug_events; struct list_head debug_list; spinlock_t debug_list_lock; wait_queue_head_t debug_wait; struct kref ref; unsigned int id; struct hid_bpf bpf; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct hid_collection { int parent_idx; unsigned int type; unsigned int usage; unsigned int level; }; enum hid_report_type { HID_INPUT_REPORT = 0, HID_OUTPUT_REPORT = 1, HID_FEATURE_REPORT = 2, HID_REPORT_TYPES = 3, }; struct hid_field_entry; struct hid_report { struct list_head list; struct list_head hidinput_list; struct list_head field_entry_list; unsigned int id; enum hid_report_type type; unsigned int application; struct hid_field *field[256]; struct hid_field_entry *field_entries; unsigned int maxfield; unsigned int size; struct hid_device *device; bool tool_active; unsigned int tool; u64 android_kabi_reserved1; }; struct hid_input; struct hid_field { unsigned int physical; unsigned int logical; unsigned int application; struct hid_usage *usage; unsigned int maxusage; unsigned int flags; unsigned int report_offset; unsigned int report_size; unsigned int report_count; unsigned int report_type; __s32 *value; __s32 *new_value; __s32 *usages_priorities; __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; __s32 physical_maximum; __s32 unit_exponent; unsigned int unit; bool ignored; struct hid_report *report; unsigned int index; struct hid_input *hidinput; __u16 dpad; unsigned int slot_idx; }; struct hid_usage { unsigned int hid; unsigned int collection_index; unsigned int usage_index; __s8 resolution_multiplier; __s8 wheel_factor; __u16 code; __u8 type; __s8 hat_min; __s8 hat_max; __s8 hat_dir; __s16 wheel_accumulated; }; struct hid_input { struct list_head list; struct hid_report *report; struct input_dev *input; const char *name; struct list_head reports; unsigned int application; bool registered; u64 android_kabi_reserved1; }; struct hid_field_entry { struct list_head list; struct hid_field *field; unsigned int index; __s32 priority; }; struct hid_device_id; struct hid_report_id; struct hid_usage_id; struct hid_driver { char *name; const struct hid_device_id *id_table; struct list_head dyn_list; spinlock_t dyn_lock; bool (*match)(struct hid_device *, bool); int (*probe)(struct hid_device *, const struct hid_device_id *); void (*remove)(struct hid_device *); const struct hid_report_id *report_table; int (*raw_event)(struct hid_device *, struct hid_report *, u8 *, int); const struct hid_usage_id *usage_table; int (*event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); void (*report)(struct hid_device *, struct hid_report *); __u8 * (*report_fixup)(struct hid_device *, __u8 *, unsigned int *); int (*input_mapping)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, unsigned long **, int *); int (*input_mapped)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, unsigned long **, int *); int (*input_configured)(struct hid_device *, struct hid_input *); void (*feature_mapping)(struct hid_device *, struct hid_field *, struct hid_usage *); int (*suspend)(struct hid_device *, pm_message_t); int (*resume)(struct hid_device *); int (*reset_resume)(struct hid_device *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; struct device_driver driver; }; struct hid_device_id { __u16 bus; __u16 group; __u32 vendor; __u32 product; kernel_ulong_t driver_data; }; struct hid_report_id { __u32 report_type; }; struct hid_usage_id { __u32 usage_hid; __u32 usage_type; __u32 usage_code; }; struct hid_bpf_prog_list { u16 prog_idx[64]; u8 prog_cnt; }; struct acc_hid_dev { struct list_head list; struct hid_device *hid; struct acc_dev *dev; int id; u8 *report_desc; int report_desc_len; int report_desc_offset; }; struct acc_instance { struct usb_function_instance func_inst; const char *name; }; enum typec_data_role { TYPEC_DEVICE = 0, TYPEC_HOST = 1, }; enum typec_role { TYPEC_SINK = 0, TYPEC_SOURCE = 1, }; enum typec_pwr_opmode { TYPEC_PWR_MODE_USB = 0, TYPEC_PWR_MODE_1_5A = 1, TYPEC_PWR_MODE_3_0A = 2, TYPEC_PWR_MODE_PD = 3, }; enum typec_port_type { TYPEC_PORT_SRC = 0, TYPEC_PORT_SNK = 1, TYPEC_PORT_DRP = 2, }; enum typec_orientation { TYPEC_ORIENTATION_NONE = 0, TYPEC_ORIENTATION_NORMAL = 1, TYPEC_ORIENTATION_REVERSE = 2, }; enum typec_port_data { TYPEC_PORT_DFP = 0, TYPEC_PORT_UFP = 1, TYPEC_PORT_DRD = 2, }; enum usb_pd_svdm_ver { SVDM_VER_1_0 = 0, SVDM_VER_2_0 = 1, SVDM_VER_MAX = 1, }; enum typec_accessory { TYPEC_ACCESSORY_NONE = 0, TYPEC_ACCESSORY_AUDIO = 1, TYPEC_ACCESSORY_DEBUG = 2, }; enum typec_plug_index { TYPEC_PLUG_SOP_P = 0, TYPEC_PLUG_SOP_PP = 1, }; enum typec_plug_type { USB_PLUG_NONE = 0, USB_PLUG_TYPE_A = 1, USB_PLUG_TYPE_B = 2, USB_PLUG_TYPE_C = 3, USB_PLUG_CAPTIVE = 4, }; struct usb_power_delivery; struct typec_switch; struct typec_mux; struct typec_retimer; struct typec_capability; struct typec_operations; struct typec_port { unsigned int id; struct device dev; struct ida mode_ids; struct usb_power_delivery *pd; int prefer_role; enum typec_data_role data_role; enum typec_role pwr_role; enum typec_role vconn_role; enum typec_pwr_opmode pwr_opmode; enum typec_port_type port_type; struct mutex port_type_lock; enum typec_orientation orientation; struct typec_switch *sw; struct typec_mux *mux; struct typec_retimer *retimer; const struct typec_capability *cap; const struct typec_operations *ops; u64 android_kabi_reserved1; }; struct usb_power_delivery { struct device dev; int id; u16 revision; u16 version; }; struct typec_capability { enum typec_port_type type; enum typec_port_data data; u16 revision; u16 pd_revision; enum usb_pd_svdm_ver svdm_version; int prefer_role; enum typec_accessory accessory[3]; unsigned int orientation_aware: 1; struct fwnode_handle *fwnode; void *driver_data; struct usb_power_delivery *pd; const struct typec_operations *ops; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct typec_operations { int (*try_role)(struct typec_port *, int); int (*dr_set)(struct typec_port *, enum typec_data_role); int (*pr_set)(struct typec_port *, enum typec_role); int (*vconn_set)(struct typec_port *, enum typec_role); int (*port_type_set)(struct typec_port *, enum typec_port_type); struct usb_power_delivery ** (*pd_get)(struct typec_port *); int (*pd_set)(struct typec_port *, struct usb_power_delivery *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct typec_altmode_ops; struct typec_altmode { struct device dev; u16 svid; int mode; u32 vdo; unsigned int active: 1; char *desc; const struct typec_altmode_ops *ops; u64 android_kabi_reserved1; }; struct altmode { unsigned int id; struct typec_altmode adev; struct typec_mux *mux; struct typec_retimer *retimer; enum typec_port_data roles; struct attribute *attrs[5]; char group_name[8]; struct attribute_group group; const struct attribute_group *groups[2]; struct altmode *partner; struct altmode *plug[2]; u64 android_kabi_reserved1; }; struct typec_altmode_ops { int (*enter)(struct typec_altmode *, u32 *); int (*exit)(struct typec_altmode *); void (*attention)(struct typec_altmode *, u32); int (*vdm)(struct typec_altmode *, const u32, const u32 *, int); int (*notify)(struct typec_altmode *, unsigned long, void *); int (*activate)(struct typec_altmode *, int); u64 android_kabi_reserved1; }; struct usb_pd_identity; struct typec_cable { struct device dev; enum typec_plug_type type; struct usb_pd_identity *identity; unsigned int active: 1; u16 pd_revision; u64 android_kabi_reserved1; }; struct usb_pd_identity { u32 id_header; u32 cert_stat; u32 product; u32 vdo[3]; }; struct typec_partner { struct device dev; unsigned int usb_pd: 1; struct usb_pd_identity *identity; enum typec_accessory accessory; struct ida mode_ids; int num_altmodes; u16 pd_revision; enum usb_pd_svdm_ver svdm_version; struct usb_power_delivery *pd; u64 android_kabi_reserved1; }; struct typec_plug { struct device dev; enum typec_plug_index index; struct ida mode_ids; int num_altmodes; u64 android_kabi_reserved1; }; struct typec_device_id { __u16 svid; __u8 mode; kernel_ulong_t driver_data; }; struct usb_power_delivery_desc { u16 revision; u16 version; }; struct typec_mux_state { struct typec_altmode *alt; unsigned long mode; void *data; }; struct typec_altmode_desc { u16 svid; u8 mode; u32 vdo; enum typec_port_data roles; }; struct typec_partner_desc { unsigned int usb_pd: 1; enum typec_accessory accessory; struct usb_pd_identity *identity; u16 pd_revision; }; struct typec_plug_desc { enum typec_plug_index index; }; struct typec_cable_desc { enum typec_plug_type type; unsigned int active: 1; struct usb_pd_identity *identity; u16 pd_revision; }; struct typec_switch_dev; typedef int (*typec_switch_set_fn_t)(struct typec_switch_dev *, enum typec_orientation); struct typec_switch_dev { struct device dev; typec_switch_set_fn_t set; u64 android_kabi_reserved1; }; struct typec_mux_dev; typedef int (*typec_mux_set_fn_t)(struct typec_mux_dev *, struct typec_mux_state *); struct typec_mux_dev { struct device dev; typec_mux_set_fn_t set; u64 android_kabi_reserved1; }; struct typec_switch { struct typec_switch_dev *sw_devs[3]; unsigned int num_sw_devs; }; struct typec_mux { struct typec_mux_dev *mux_devs[3]; unsigned int num_mux_devs; }; struct typec_switch_desc { struct fwnode_handle *fwnode; typec_switch_set_fn_t set; const char *name; void *drvdata; }; struct typec_mux_desc { struct fwnode_handle *fwnode; typec_mux_set_fn_t set; const char *name; void *drvdata; }; enum { TYPEC_STATE_SAFE = 0, TYPEC_STATE_USB = 1, TYPEC_STATE_MODAL = 2, }; struct typec_retimer_state; typedef int (*typec_retimer_set_fn_t)(struct typec_retimer *, struct typec_retimer_state *); struct typec_retimer { struct device dev; typec_retimer_set_fn_t set; }; struct typec_retimer_state { struct typec_altmode *alt; unsigned long mode; void *data; }; struct typec_altmode_driver { const struct typec_device_id *id_table; int (*probe)(struct typec_altmode *); void (*remove)(struct typec_altmode *); struct device_driver driver; }; enum pd_pdo_type { PDO_TYPE_FIXED = 0, PDO_TYPE_BATT = 1, PDO_TYPE_VAR = 2, PDO_TYPE_APDO = 3, }; enum pd_apdo_type { APDO_TYPE_PPS = 0, }; struct usb_power_delivery_capabilities { struct device dev; struct usb_power_delivery *pd; enum typec_role role; }; struct pdo { struct device dev; int object_position; u32 pdo; }; struct usb_power_delivery_capabilities_desc { u32 pdo[7]; enum typec_role role; }; struct typec_retimer_desc { struct fwnode_handle *fwnode; typec_retimer_set_fn_t set; const char *name; void *drvdata; }; enum dp_state { DP_STATE_IDLE = 0, DP_STATE_ENTER = 1, DP_STATE_UPDATE = 2, DP_STATE_CONFIGURE = 3, DP_STATE_EXIT = 4, }; enum { DP_PIN_ASSIGN_A = 0, DP_PIN_ASSIGN_B = 1, DP_PIN_ASSIGN_C = 2, DP_PIN_ASSIGN_D = 3, DP_PIN_ASSIGN_E = 4, DP_PIN_ASSIGN_F = 5, }; enum { DP_CONF_USB = 0, DP_CONF_DFP_D = 1, DP_CONF_UFP_D = 2, DP_CONF_DUAL_D = 3, }; struct typec_displayport_data { u32 status; u32 conf; }; struct dp_altmode { struct typec_displayport_data data; enum dp_state state; bool hpd; bool pending_hpd; struct mutex lock; struct work_struct work; struct typec_altmode *alt; const struct typec_altmode *port; struct fwnode_handle *connector_fwnode; }; enum typec_cc_status { TYPEC_CC_OPEN = 0, TYPEC_CC_RA = 1, TYPEC_CC_RD = 2, TYPEC_CC_RP_DEF = 3, TYPEC_CC_RP_1_5 = 4, TYPEC_CC_RP_3_0 = 5, }; enum typec_cc_polarity { TYPEC_POLARITY_CC1 = 0, TYPEC_POLARITY_CC2 = 1, }; enum tcpm_transmit_type { TCPC_TX_SOP = 0, TCPC_TX_SOP_PRIME = 1, TCPC_TX_SOP_PRIME_PRIME = 2, TCPC_TX_SOP_DEBUG_PRIME = 3, TCPC_TX_SOP_DEBUG_PRIME_PRIME = 4, TCPC_TX_HARD_RESET = 5, TCPC_TX_CABLE_RESET = 6, TCPC_TX_BIST_MODE_2 = 7, }; enum pd_msg_request { PD_MSG_NONE = 0, PD_MSG_CTRL_REJECT = 1, PD_MSG_CTRL_WAIT = 2, PD_MSG_CTRL_NOT_SUPP = 3, PD_MSG_DATA_SINK_CAP = 4, PD_MSG_DATA_SOURCE_CAP = 5, }; enum tcpm_state { INVALID_STATE = 0, TOGGLING = 1, CHECK_CONTAMINANT = 2, SRC_UNATTACHED = 3, SRC_ATTACH_WAIT = 4, SRC_ATTACHED = 5, SRC_STARTUP = 6, SRC_SEND_CAPABILITIES = 7, SRC_SEND_CAPABILITIES_TIMEOUT = 8, SRC_NEGOTIATE_CAPABILITIES = 9, SRC_TRANSITION_SUPPLY = 10, SRC_READY = 11, SRC_WAIT_NEW_CAPABILITIES = 12, SNK_UNATTACHED = 13, SNK_ATTACH_WAIT = 14, SNK_DEBOUNCED = 15, SNK_ATTACHED = 16, SNK_STARTUP = 17, SNK_DISCOVERY = 18, SNK_DISCOVERY_DEBOUNCE = 19, SNK_DISCOVERY_DEBOUNCE_DONE = 20, SNK_WAIT_CAPABILITIES = 21, SNK_NEGOTIATE_CAPABILITIES = 22, SNK_NEGOTIATE_PPS_CAPABILITIES = 23, SNK_TRANSITION_SINK = 24, SNK_TRANSITION_SINK_VBUS = 25, SNK_READY = 26, ACC_UNATTACHED = 27, DEBUG_ACC_ATTACHED = 28, AUDIO_ACC_ATTACHED = 29, AUDIO_ACC_DEBOUNCE = 30, HARD_RESET_SEND = 31, HARD_RESET_START = 32, SRC_HARD_RESET_VBUS_OFF = 33, SRC_HARD_RESET_VBUS_ON = 34, SNK_HARD_RESET_SINK_OFF = 35, SNK_HARD_RESET_WAIT_VBUS = 36, SNK_HARD_RESET_SINK_ON = 37, SOFT_RESET = 38, SRC_SOFT_RESET_WAIT_SNK_TX = 39, SNK_SOFT_RESET = 40, SOFT_RESET_SEND = 41, DR_SWAP_ACCEPT = 42, DR_SWAP_SEND = 43, DR_SWAP_SEND_TIMEOUT = 44, DR_SWAP_CANCEL = 45, DR_SWAP_CHANGE_DR = 46, PR_SWAP_ACCEPT = 47, PR_SWAP_SEND = 48, PR_SWAP_SEND_TIMEOUT = 49, PR_SWAP_CANCEL = 50, PR_SWAP_START = 51, PR_SWAP_SRC_SNK_TRANSITION_OFF = 52, PR_SWAP_SRC_SNK_SOURCE_OFF = 53, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED = 54, PR_SWAP_SRC_SNK_SINK_ON = 55, PR_SWAP_SNK_SRC_SINK_OFF = 56, PR_SWAP_SNK_SRC_SOURCE_ON = 57, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP = 58, VCONN_SWAP_ACCEPT = 59, VCONN_SWAP_SEND = 60, VCONN_SWAP_SEND_TIMEOUT = 61, VCONN_SWAP_CANCEL = 62, VCONN_SWAP_START = 63, VCONN_SWAP_WAIT_FOR_VCONN = 64, VCONN_SWAP_TURN_ON_VCONN = 65, VCONN_SWAP_TURN_OFF_VCONN = 66, FR_SWAP_SEND = 67, FR_SWAP_SEND_TIMEOUT = 68, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF = 69, FR_SWAP_SNK_SRC_NEW_SINK_READY = 70, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED = 71, FR_SWAP_CANCEL = 72, SNK_TRY = 73, SNK_TRY_WAIT = 74, SNK_TRY_WAIT_DEBOUNCE = 75, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS = 76, SRC_TRYWAIT = 77, SRC_TRYWAIT_DEBOUNCE = 78, SRC_TRYWAIT_UNATTACHED = 79, SRC_TRY = 80, SRC_TRY_WAIT = 81, SRC_TRY_DEBOUNCE = 82, SNK_TRYWAIT = 83, SNK_TRYWAIT_DEBOUNCE = 84, SNK_TRYWAIT_VBUS = 85, BIST_RX = 86, GET_STATUS_SEND = 87, GET_STATUS_SEND_TIMEOUT = 88, GET_PPS_STATUS_SEND = 89, GET_PPS_STATUS_SEND_TIMEOUT = 90, GET_SINK_CAP = 91, GET_SINK_CAP_TIMEOUT = 92, ERROR_RECOVERY = 93, PORT_RESET = 94, PORT_RESET_WAIT_OFF = 95, AMS_START = 96, CHUNK_NOT_SUPP = 97, }; enum tcpm_transmit_status { TCPC_TX_SUCCESS = 0, TCPC_TX_DISCARDED = 1, TCPC_TX_FAILED = 2, }; enum vdm_states { VDM_STATE_ERR_BUSY = -3, VDM_STATE_ERR_SEND = -2, VDM_STATE_ERR_TMOUT = -1, VDM_STATE_DONE = 0, VDM_STATE_READY = 1, VDM_STATE_BUSY = 2, VDM_STATE_WAIT_RSP_BUSY = 3, VDM_STATE_SEND_MESSAGE = 4, }; enum frs_typec_current { FRS_NOT_SUPPORTED = 0, FRS_DEFAULT_POWER = 1, FRS_5V_1P5A = 2, FRS_5V_3A = 3, }; enum tcpm_ams { NONE_AMS = 0, POWER_NEGOTIATION = 1, GOTOMIN = 2, SOFT_RESET_AMS = 3, HARD_RESET = 4, CABLE_RESET = 5, GET_SOURCE_CAPABILITIES = 6, GET_SINK_CAPABILITIES = 7, POWER_ROLE_SWAP = 8, FAST_ROLE_SWAP = 9, DATA_ROLE_SWAP = 10, VCONN_SWAP = 11, SOURCE_ALERT = 12, GETTING_SOURCE_EXTENDED_CAPABILITIES = 13, GETTING_SOURCE_SINK_STATUS = 14, GETTING_BATTERY_CAPABILITIES = 15, GETTING_BATTERY_STATUS = 16, GETTING_MANUFACTURER_INFORMATION = 17, SECURITY = 18, FIRMWARE_UPDATE = 19, DISCOVER_IDENTITY = 20, SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY = 21, DISCOVER_SVIDS = 22, DISCOVER_MODES = 23, DFP_TO_UFP_ENTER_MODE = 24, DFP_TO_UFP_EXIT_MODE = 25, DFP_TO_CABLE_PLUG_ENTER_MODE = 26, DFP_TO_CABLE_PLUG_EXIT_MODE = 27, ATTENTION = 28, BIST = 29, UNSTRUCTURED_VDMS = 30, STRUCTURED_VDMS = 31, COUNTRY_INFO = 32, COUNTRY_CODES = 33, }; enum pd_ctrl_msg_type { PD_CTRL_GOOD_CRC = 1, PD_CTRL_GOTO_MIN = 2, PD_CTRL_ACCEPT = 3, PD_CTRL_REJECT = 4, PD_CTRL_PING = 5, PD_CTRL_PS_RDY = 6, PD_CTRL_GET_SOURCE_CAP = 7, PD_CTRL_GET_SINK_CAP = 8, PD_CTRL_DR_SWAP = 9, PD_CTRL_PR_SWAP = 10, PD_CTRL_VCONN_SWAP = 11, PD_CTRL_WAIT = 12, PD_CTRL_SOFT_RESET = 13, PD_CTRL_NOT_SUPP = 16, PD_CTRL_GET_SOURCE_CAP_EXT = 17, PD_CTRL_GET_STATUS = 18, PD_CTRL_FR_SWAP = 19, PD_CTRL_GET_PPS_STATUS = 20, PD_CTRL_GET_COUNTRY_CODES = 21, }; enum pd_ext_msg_type { PD_EXT_SOURCE_CAP_EXT = 1, PD_EXT_STATUS = 2, PD_EXT_GET_BATT_CAP = 3, PD_EXT_GET_BATT_STATUS = 4, PD_EXT_BATT_CAP = 5, PD_EXT_GET_MANUFACTURER_INFO = 6, PD_EXT_MANUFACTURER_INFO = 7, PD_EXT_SECURITY_REQUEST = 8, PD_EXT_SECURITY_RESPONSE = 9, PD_EXT_FW_UPDATE_REQUEST = 10, PD_EXT_FW_UPDATE_RESPONSE = 11, PD_EXT_PPS_STATUS = 12, PD_EXT_COUNTRY_INFO = 13, PD_EXT_COUNTRY_CODES = 14, }; enum pd_data_msg_type { PD_DATA_SOURCE_CAP = 1, PD_DATA_REQUEST = 2, PD_DATA_BIST = 3, PD_DATA_SINK_CAP = 4, PD_DATA_BATT_STATUS = 5, PD_DATA_ALERT = 6, PD_DATA_GET_COUNTRY_INFO = 7, PD_DATA_ENTER_USB = 8, PD_DATA_VENDOR_DEF = 15, }; enum pdo_err { PDO_NO_ERR = 0, PDO_ERR_NO_VSAFE5V = 1, PDO_ERR_VSAFE5V_NOT_FIRST = 2, PDO_ERR_PDO_TYPE_NOT_IN_ORDER = 3, PDO_ERR_FIXED_NOT_SORTED = 4, PDO_ERR_VARIABLE_BATT_NOT_SORTED = 5, PDO_ERR_DUPE_PDO = 6, PDO_ERR_PPS_APDO_NOT_SORTED = 7, PDO_ERR_DUPE_PPS_APDO = 8, }; enum adev_actions { ADEV_NONE = 0, ADEV_NOTIFY_USB_AND_QUEUE_VDM = 1, ADEV_QUEUE_VDM = 2, ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL = 3, ADEV_ATTENTION = 4, }; enum tcpm_psy_online_states { TCPM_PSY_OFFLINE = 0, TCPM_PSY_FIXED_ONLINE = 1, TCPM_PSY_PROG_ONLINE = 2, }; struct pd_chunked_ext_message_data { __le16 header; u8 data[26]; }; struct pd_message { __le16 header; union { __le32 payload[7]; struct pd_chunked_ext_message_data ext_msg; }; } __attribute__((packed)); struct tcpm_port; struct pd_rx_event { struct kthread_work work; struct tcpm_port *port; struct pd_message msg; }; struct pd_pps_data { u32 min_volt; u32 req_min_volt; u32 max_volt; u32 req_max_volt; u32 max_curr; u32 req_max_curr; u32 req_out_volt; u32 req_op_curr; bool supported; bool active; }; struct pd_mode_data { int svid_index; int nsvids; u16 svids[16]; int altmodes; struct typec_altmode_desc altmode_desc[96]; }; struct tcpc_dev; struct pd_data; struct tcpm_port { struct device *dev; struct mutex lock; struct kthread_worker *wq; struct typec_capability typec_caps; struct typec_port *typec_port; struct tcpc_dev *tcpc; struct usb_role_switch *role_sw; enum typec_role vconn_role; enum typec_role pwr_role; enum typec_data_role data_role; enum typec_pwr_opmode pwr_opmode; struct usb_pd_identity partner_ident; struct typec_partner_desc partner_desc; struct typec_partner *partner; enum typec_cc_status cc_req; enum typec_cc_status src_rp; enum typec_cc_status cc1; enum typec_cc_status cc2; enum typec_cc_polarity polarity; bool attached; bool connected; bool registered; bool pd_supported; enum typec_port_type port_type; bool vbus_present; bool vbus_vsafe0v; bool vbus_never_low; bool vbus_source; bool vbus_charge; bool send_discover; bool op_vsafe5v; int try_role; int try_snk_count; int try_src_count; enum pd_msg_request queued_message; enum tcpm_state enter_state; enum tcpm_state prev_state; enum tcpm_state state; enum tcpm_state delayed_state; ktime_t delayed_runtime; unsigned long delay_ms; spinlock_t pd_event_lock; u32 pd_events; struct kthread_work event_work; struct hrtimer state_machine_timer; struct kthread_work state_machine; struct hrtimer vdm_state_machine_timer; struct kthread_work vdm_state_machine; struct hrtimer enable_frs_timer; struct kthread_work enable_frs; struct hrtimer send_discover_timer; struct kthread_work send_discover_work; bool state_machine_running; bool vdm_sm_running; struct completion tx_complete; enum tcpm_transmit_status tx_status; struct mutex swap_lock; bool swap_pending; bool non_pd_role_swap; struct completion swap_complete; int swap_status; unsigned int negotiated_rev; unsigned int message_id; unsigned int caps_count; unsigned int hard_reset_count; bool pd_capable; bool explicit_contract; unsigned int rx_msgid; struct usb_power_delivery **pds; struct pd_data **pd_list; struct usb_power_delivery_capabilities *port_source_caps; struct usb_power_delivery_capabilities *port_sink_caps; struct usb_power_delivery *partner_pd; struct usb_power_delivery_capabilities *partner_source_caps; struct usb_power_delivery_capabilities *partner_sink_caps; struct usb_power_delivery *selected_pd; u32 sink_request; u32 source_caps[7]; unsigned int nr_source_caps; u32 sink_caps[7]; unsigned int nr_sink_caps; unsigned int pd_count; u32 src_pdo[7]; unsigned int nr_src_pdo; u32 snk_pdo[7]; unsigned int nr_snk_pdo; u32 snk_vdo_v1[6]; unsigned int nr_snk_vdo_v1; u32 snk_vdo[6]; unsigned int nr_snk_vdo; unsigned int operating_snk_mw; bool update_sink_caps; u32 req_current_limit; u32 req_supply_voltage; u32 current_limit; u32 supply_voltage; struct power_supply *psy; struct power_supply_desc psy_desc; enum power_supply_usb_type usb_type; u32 bist_request; enum vdm_states vdm_state; u32 vdm_retries; u32 vdo_data[7]; u8 vdo_count; u32 vdo_retry; struct pd_pps_data pps_data; struct completion pps_complete; bool pps_pending; int pps_status; struct pd_mode_data mode_data; struct typec_altmode *partner_altmode[96]; struct typec_altmode *port_altmode[96]; unsigned long max_wait; bool self_powered; enum frs_typec_current new_source_frs_current; bool sink_cap_done; enum tcpm_state upcoming_state; enum tcpm_ams ams; enum tcpm_ams next_ams; bool in_ams; bool auto_vbus_discharge_enabled; bool slow_charger_loop; bool potential_contaminant; struct dentry *dentry; struct mutex logbuffer_lock; int logbuffer_head; int logbuffer_tail; u8 *logbuffer[1024]; }; struct tcpc_dev { struct fwnode_handle *fwnode; int (*init)(struct tcpc_dev *); int (*get_vbus)(struct tcpc_dev *); int (*get_current_limit)(struct tcpc_dev *); int (*set_cc)(struct tcpc_dev *, enum typec_cc_status); int (*apply_rc)(struct tcpc_dev *, enum typec_cc_status, enum typec_cc_polarity); int (*get_cc)(struct tcpc_dev *, enum typec_cc_status *, enum typec_cc_status *); int (*set_polarity)(struct tcpc_dev *, enum typec_cc_polarity); int (*set_vconn)(struct tcpc_dev *, bool); int (*set_vbus)(struct tcpc_dev *, bool, bool); int (*set_current_limit)(struct tcpc_dev *, u32, u32); int (*set_pd_rx)(struct tcpc_dev *, bool); int (*set_roles)(struct tcpc_dev *, bool, enum typec_role, enum typec_data_role); int (*start_toggling)(struct tcpc_dev *, enum typec_port_type, enum typec_cc_status); int (*try_role)(struct tcpc_dev *, int); int (*pd_transmit)(struct tcpc_dev *, enum tcpm_transmit_type, const struct pd_message *, unsigned int); int (*set_bist_data)(struct tcpc_dev *, bool); int (*enable_frs)(struct tcpc_dev *, bool); void (*frs_sourcing_vbus)(struct tcpc_dev *); int (*enable_auto_vbus_discharge)(struct tcpc_dev *, bool); int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *, enum typec_pwr_opmode, bool, u32); bool (*is_vbus_vsafe0v)(struct tcpc_dev *); void (*set_partner_usb_comm_capable)(struct tcpc_dev *, bool); void (*check_contaminant)(struct tcpc_dev *); }; struct pd_data { struct usb_power_delivery *pd; struct usb_power_delivery_capabilities *source_cap; struct usb_power_delivery_capabilities_desc source_desc; struct usb_power_delivery_capabilities *sink_cap; struct usb_power_delivery_capabilities_desc sink_desc; unsigned int operating_snk_mw; }; struct power_supply_config { struct device_node *of_node; struct fwnode_handle *fwnode; void *drv_data; const struct attribute_group **attr_grp; char **supplied_to; size_t num_supplicants; u64 android_kabi_reserved1; }; struct tcpci_data; struct tcpci { struct device *dev; struct tcpm_port *port; struct regmap *regmap; unsigned int alert_mask; bool controls_vbus; struct tcpc_dev tcpc; struct tcpci_data *data; }; struct tcpci_data { struct regmap *regmap; unsigned char TX_BUF_BYTE_x_hidden: 1; unsigned char auto_discharge_disconnect: 1; unsigned char vbus_vsafe0v: 1; int (*init)(struct tcpci *, struct tcpci_data *); int (*set_vconn)(struct tcpci *, struct tcpci_data *, bool); int (*start_drp_toggling)(struct tcpci *, struct tcpci_data *, enum typec_cc_status); int (*set_vbus)(struct tcpci *, struct tcpci_data *, bool, bool); void (*frs_sourcing_vbus)(struct tcpci *, struct tcpci_data *); void (*set_partner_usb_comm_capable)(struct tcpci *, struct tcpci_data *, bool); void (*check_contaminant)(struct tcpci *, struct tcpci_data *); }; struct tcpci_chip { struct tcpci *tcpci; struct tcpci_data data; }; enum { TYPEC_MODE_USB2 = 2, TYPEC_MODE_USB3 = 3, TYPEC_MODE_USB4 = 4, TYPEC_MODE_AUDIO = 5, TYPEC_MODE_DEBUG = 6, }; struct ucsi_connector; struct ucsi_work { struct delayed_work work; struct list_head node; unsigned long delay; unsigned int count; struct ucsi_connector *con; int (*cb)(struct ucsi_connector *); }; struct ucsi_connector_status { u16 change; u16 flags; u32 request_data_obj; u8 pwr_status; } __attribute__((packed)); struct ucsi_connector_capability { u8 op_mode; u8 flags; }; struct ucsi; struct ucsi_connector { int num; struct ucsi *ucsi; struct mutex lock; struct work_struct work; struct completion complete; struct workqueue_struct *wq; struct list_head partner_tasks; struct typec_port *port; struct typec_partner *partner; struct typec_altmode *port_altmode[30]; struct typec_altmode *partner_altmode[30]; struct typec_capability typec_cap; struct ucsi_connector_status status; struct ucsi_connector_capability cap; struct power_supply *psy; struct power_supply_desc psy_desc; u32 rdo; u32 src_pdos[7]; int num_pdos; struct usb_power_delivery *pd; struct usb_power_delivery_capabilities *port_source_caps; struct usb_power_delivery_capabilities *port_sink_caps; struct usb_power_delivery *partner_pd; struct usb_power_delivery_capabilities *partner_source_caps; struct usb_power_delivery_capabilities *partner_sink_caps; struct usb_role_switch *usb_role_sw; }; struct driver_data; struct ucsi_capability { u32 attributes; u8 num_connectors; u8 features; u16 reserved_1; u8 num_alt_modes; u8 reserved_2; u16 bc_version; u16 pd_version; u16 typec_version; }; struct ucsi_operations; struct ucsi_debugfs_entry; struct ucsi { u16 version; struct device *dev; struct driver_data *driver_data; const struct ucsi_operations *ops; struct ucsi_capability cap; struct ucsi_connector *connector; struct ucsi_debugfs_entry *debugfs; struct work_struct resume_work; struct delayed_work work; int work_count; struct mutex ppm_lock; u64 ntfy; unsigned long flags; }; struct ucsi_altmode; struct ucsi_operations { int (*read)(struct ucsi *, unsigned int, void *, size_t); int (*sync_write)(struct ucsi *, unsigned int, const void *, size_t); int (*async_write)(struct ucsi *, unsigned int, const void *, size_t); bool (*update_altmodes)(struct ucsi *, struct ucsi_altmode *, struct ucsi_altmode *); }; struct ucsi_altmode { u16 svid; u32 mid; } __attribute__((packed)); struct ucsi_data { u64 low; u64 high; }; struct ucsi_debugfs_entry { u64 command; struct ucsi_data response; u32 status; struct dentry *dentry; }; typedef void (*btf_trace_ucsi_run_command)(void *, u64, int); typedef void (*btf_trace_ucsi_reset_ppm)(void *, u64, int); typedef void (*btf_trace_ucsi_connector_change)(void *, int, struct ucsi_connector_status *); typedef void (*btf_trace_ucsi_register_port)(void *, int, struct ucsi_connector_status *); typedef void (*btf_trace_ucsi_register_altmode)(void *, u8, struct typec_altmode *); struct trace_event_raw_ucsi_log_command { struct trace_entry ent; u64 ctrl; int ret; char __data[0]; }; struct trace_event_raw_ucsi_log_connector_status { struct trace_entry ent; int port; u16 change; u8 opmode; u8 connected; u8 pwr_dir; u8 partner_flags; u8 partner_type; u32 request_data_obj; u8 bc_status; char __data[0]; }; struct trace_event_raw_ucsi_log_register_altmode { struct trace_entry ent; u8 recipient; u16 svid; u8 mode; u32 vdo; char __data[0]; }; struct trace_event_data_offsets_ucsi_log_command {}; struct trace_event_data_offsets_ucsi_log_connector_status {}; struct trace_event_data_offsets_ucsi_log_register_altmode {}; enum ucsi_psy_online_states { UCSI_PSY_OFFLINE = 0, UCSI_PSY_FIXED_ONLINE = 1, UCSI_PSY_PROG_ONLINE = 2, }; enum { POWER_SUPPLY_SCOPE_UNKNOWN = 0, POWER_SUPPLY_SCOPE_SYSTEM = 1, POWER_SUPPLY_SCOPE_DEVICE = 2, }; struct ucsi_dp { struct typec_displayport_data data; struct ucsi_connector *con; struct typec_altmode *alt; struct work_struct work; int offset; bool override; bool initialized; u32 header; u32 *vdo_data; u8 vdo_size; }; struct usb_role_switch { struct device dev; struct mutex lock; struct module *module; enum usb_role role; bool registered; struct device *usb2_port; struct device *usb3_port; struct device *udc; usb_role_switch_set_t set; usb_role_switch_get_t get; bool allow_userspace_control; }; enum serio_event_type { SERIO_RESCAN_PORT = 0, SERIO_RECONNECT_PORT = 1, SERIO_RECONNECT_SUBTREE = 2, SERIO_REGISTER_PORT = 3, SERIO_ATTACH_DRIVER = 4, }; struct serio_device_id { __u8 type; __u8 extra; __u8 id; __u8 proto; }; struct serio_driver; struct serio { void *port_data; char name[32]; char phys[32]; char firmware_id[128]; bool manual_bind; struct serio_device_id id; spinlock_t lock; int (*write)(struct serio *, unsigned char); int (*open)(struct serio *); void (*close)(struct serio *); int (*start)(struct serio *); void (*stop)(struct serio *); struct serio *parent; struct list_head child_node; struct list_head children; unsigned int depth; struct serio_driver *drv; struct mutex drv_mutex; struct device dev; struct list_head node; struct mutex *ps2_cmd_mutex; u64 android_kabi_reserved1; }; struct serio_driver { const char *description; const struct serio_device_id *id_table; bool manual_bind; void (*write_wakeup)(struct serio *); irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); int (*connect)(struct serio *, struct serio_driver *); int (*reconnect)(struct serio *); int (*fast_reconnect)(struct serio *); void (*disconnect)(struct serio *); void (*cleanup)(struct serio *); struct device_driver driver; u64 android_kabi_reserved1; }; struct serio_event { enum serio_event_type type; void *object; struct module *owner; struct list_head node; }; struct serport___2 { struct tty_struct *tty; wait_queue_head_t wait; struct serio *serio; struct serio_device_id id; spinlock_t lock; unsigned long flags; }; enum ps2_disposition { PS2_PROCESS = 0, PS2_IGNORE = 1, PS2_ERROR = 2, }; struct ps2dev; typedef enum ps2_disposition (*ps2_pre_receive_handler_t)(struct ps2dev *, u8, unsigned int); typedef void (*ps2_receive_handler_t)(struct ps2dev *, u8); struct ps2dev { struct serio *serio; struct mutex cmd_mutex; wait_queue_head_t wait; unsigned long flags; u8 cmdbuf[8]; u8 cmdcnt; u8 nak; ps2_pre_receive_handler_t pre_receive_handler; ps2_receive_handler_t receive_handler; }; enum input_clock_type { INPUT_CLK_REAL = 0, INPUT_CLK_MONO = 1, INPUT_CLK_BOOT = 2, INPUT_CLK_MAX = 3, }; struct input_mt_slot { int abs[14]; unsigned int frame; unsigned int key; }; struct input_mt { int trkid; int num_slots; int slot; unsigned int flags; unsigned int frame; int *red; struct input_mt_slot slots[0]; }; union input_seq_state { struct { unsigned short pos; bool mutex_acquired; }; void *p; }; struct input_devres { struct input_dev *input; }; struct ff_periodic_effect_compat { __u16 waveform; __u16 period; __s16 magnitude; __s16 offset; __u16 phase; struct ff_envelope envelope; __u32 custom_len; compat_uptr_t custom_data; }; struct ff_effect_compat { __u16 type; __s16 id; __u16 direction; struct ff_trigger trigger; struct ff_replay replay; union { struct ff_constant_effect constant; struct ff_ramp_effect ramp; struct ff_periodic_effect_compat periodic; struct ff_condition_effect condition[2]; struct ff_rumble_effect rumble; } u; }; struct input_event_compat { compat_ulong_t sec; compat_ulong_t usec; __u16 type; __u16 code; __s32 value; }; struct input_event { __kernel_ulong_t __sec; __kernel_ulong_t __usec; __u16 type; __u16 code; __s32 value; }; struct input_mt_pos { s16 x; s16 y; }; struct input_dev_poller { void (*poll)(struct input_dev *); unsigned int poll_interval; unsigned int poll_interval_max; unsigned int poll_interval_min; struct input_dev *input; struct delayed_work work; }; struct touchscreen_properties { unsigned int max_x; unsigned int max_y; bool invert_x; bool invert_y; bool swap_x_y; }; struct ml_effect_state { struct ff_effect *effect; unsigned long flags; int count; unsigned long play_at; unsigned long stop_at; unsigned long adj_at; }; struct ml_device { void *private; struct ml_effect_state states[16]; int gain; struct timer_list timer; struct input_dev *dev; int (*play_effect)(struct input_dev *, void *, struct ff_effect *); }; struct vivaldi_data { u32 function_row_physmap[24]; unsigned int num_function_row_keys; }; struct input_led { struct led_classdev cdev; struct input_handle *handle; unsigned int code; }; struct input_leds { struct input_handle handle; unsigned int num_leds; struct input_led leds[0]; }; struct evdev; struct evdev_client { unsigned int head; unsigned int tail; unsigned int packet_head; spinlock_t buffer_lock; wait_queue_head_t wait; struct fasync_struct *fasync; struct evdev *evdev; struct list_head node; enum input_clock_type clk_type; bool revoked; unsigned long *evmasks[32]; unsigned int bufsize; struct input_event buffer[0]; }; struct evdev { int open; struct input_handle handle; struct evdev_client __attribute__((btf_type_tag("rcu"))) *grab; struct list_head client_list; spinlock_t client_lock; struct mutex mutex; struct device dev; struct cdev cdev; bool exist; }; struct input_mask { __u32 type; __u32 codes_size; __u64 codes_ptr; }; struct atkbd { struct ps2dev ps2dev; struct input_dev *dev; char name[64]; char phys[32]; unsigned short id; unsigned short keycode[512]; unsigned long force_release_mask[8]; unsigned char set; bool translated; bool extra; bool write; bool softrepeat; bool softraw; bool scroll; bool enabled; unsigned char emul; bool resend; bool release; unsigned long xl_bit; unsigned int last; unsigned long time; unsigned long err_count; struct delayed_work event_work; unsigned long event_jiffies; unsigned long event_mask; struct mutex mutex; struct vivaldi_data vdata; }; struct gpio_keys_button { unsigned int code; int gpio; int active_low; const char *desc; unsigned int type; int wakeup; int wakeup_event_action; int debounce_interval; bool can_disable; int value; unsigned int irq; }; struct gpio_button_data { const struct gpio_keys_button *button; struct input_dev *input; struct gpio_desc *gpiod; unsigned short *code; struct hrtimer release_timer; unsigned int release_delay; struct delayed_work work; struct hrtimer debounce_timer; unsigned int software_debounce; unsigned int irq; unsigned int wakeup_trigger_type; spinlock_t lock; bool disabled; bool key_pressed; bool suspended; bool debounce_use_hrtimer; }; struct gpio_keys_platform_data; struct gpio_keys_drvdata { const struct gpio_keys_platform_data *pdata; struct input_dev *input; struct mutex disable_lock; unsigned short *keymap; struct gpio_button_data data[0]; }; struct gpio_keys_platform_data { const struct gpio_keys_button *buttons; int nbuttons; unsigned int poll_interval; unsigned int rep: 1; int (*enable)(struct device *); void (*disable)(struct device *); const char *name; }; struct xpad_device { u16 idVendor; u16 idProduct; char *name; u8 mapping; u8 xtype; }; struct xboxone_init_packet { u16 idVendor; u16 idProduct; const u8 *data; u8 len; }; struct xpad_output_packet { u8 data[64]; u8 len; bool pending; }; struct usb_xpad { struct input_dev *dev; struct input_dev __attribute__((btf_type_tag("rcu"))) *x360w_dev; struct usb_device *udev; struct usb_interface *intf; bool pad_present; bool input_created; struct urb *irq_in; unsigned char *idata; dma_addr_t idata_dma; struct urb *irq_out; struct usb_anchor irq_out_anchor; bool irq_out_active; u8 odata_serial; unsigned char *odata; dma_addr_t odata_dma; spinlock_t odata_lock; struct xpad_output_packet out_packets[1]; int last_out_packet; int init_seq; char phys[64]; int mapping; int xtype; int packet_type; int pad_nr; const char *name; struct work_struct work; time64_t mode_btn_down_ts; }; enum uinput_state { UIST_NEW_DEVICE = 0, UIST_SETUP_COMPLETE = 1, UIST_CREATED = 2, }; struct uinput_request; struct uinput_device { struct input_dev *dev; struct mutex mutex; enum uinput_state state; wait_queue_head_t waitq; unsigned char ready; unsigned char head; unsigned char tail; struct input_event buff[16]; unsigned int ff_effects_max; struct uinput_request *requests[16]; wait_queue_head_t requests_waitq; spinlock_t requests_lock; }; struct uinput_request { unsigned int id; unsigned int code; int retval; struct completion done; union { unsigned int effect_id; struct { struct ff_effect *effect; struct ff_effect *old; } upload; } u; }; struct uinput_user_dev { char name[80]; struct input_id id; __u32 ff_effects_max; __s32 absmax[64]; __s32 absmin[64]; __s32 absfuzz[64]; __s32 absflat[64]; }; struct uinput_ff_upload { __u32 request_id; __s32 retval; struct ff_effect effect; struct ff_effect old; }; struct uinput_ff_erase { __u32 request_id; __s32 retval; __u32 effect_id; }; struct uinput_setup { struct input_id id; char name[80]; __u32 ff_effects_max; }; struct uinput_ff_upload_compat { __u32 request_id; __s32 retval; struct ff_effect_compat effect; struct ff_effect_compat old; }; struct uinput_abs_setup { __u16 code; struct input_absinfo absinfo; }; typedef void (*btf_trace_rtc_set_time)(void *, time64_t, int); typedef void (*btf_trace_rtc_read_time)(void *, time64_t, int); typedef void (*btf_trace_rtc_set_alarm)(void *, time64_t, int); typedef void (*btf_trace_rtc_read_alarm)(void *, time64_t, int); typedef void (*btf_trace_rtc_irq_set_freq)(void *, int, int); typedef void (*btf_trace_rtc_irq_set_state)(void *, int, int); typedef void (*btf_trace_rtc_alarm_irq_enable)(void *, unsigned int, int); typedef void (*btf_trace_rtc_set_offset)(void *, long, int); typedef void (*btf_trace_rtc_read_offset)(void *, long, int); typedef void (*btf_trace_rtc_timer_enqueue)(void *, struct rtc_timer *); typedef void (*btf_trace_rtc_timer_dequeue)(void *, struct rtc_timer *); typedef void (*btf_trace_rtc_timer_fired)(void *, struct rtc_timer *); enum { none = 0, day = 1, month = 2, year = 3, }; struct trace_event_raw_rtc_time_alarm_class { struct trace_entry ent; time64_t secs; int err; char __data[0]; }; struct trace_event_raw_rtc_irq_set_freq { struct trace_entry ent; int freq; int err; char __data[0]; }; struct trace_event_raw_rtc_irq_set_state { struct trace_entry ent; int enabled; int err; char __data[0]; }; struct trace_event_raw_rtc_alarm_irq_enable { struct trace_entry ent; unsigned int enabled; int err; char __data[0]; }; struct trace_event_raw_rtc_offset_class { struct trace_entry ent; long offset; int err; char __data[0]; }; struct trace_event_raw_rtc_timer_class { struct trace_entry ent; struct rtc_timer *timer; ktime_t expires; ktime_t period; char __data[0]; }; struct trace_event_data_offsets_rtc_time_alarm_class {}; struct trace_event_data_offsets_rtc_irq_set_freq {}; struct trace_event_data_offsets_rtc_irq_set_state {}; struct trace_event_data_offsets_rtc_alarm_irq_enable {}; struct trace_event_data_offsets_rtc_offset_class {}; struct trace_event_data_offsets_rtc_timer_class {}; struct pl030_rtc { struct rtc_device *rtc; void *base; }; struct pl031_vendor_data { struct rtc_class_ops ops; bool clockwatch; bool st_weekday; unsigned long irqflags; time64_t range_min; timeu64_t range_max; }; struct pl031_local { struct pl031_vendor_data *vendor; struct rtc_device *rtc; void *base; }; struct i2c_devinfo { struct list_head list; int busnum; struct i2c_board_info board_info; }; typedef void (*btf_trace_i2c_write)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); typedef void (*btf_trace_i2c_read)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); typedef void (*btf_trace_i2c_reply)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); typedef void (*btf_trace_i2c_result)(void *, const struct i2c_adapter *, int, int); struct trace_event_raw_i2c_write { struct trace_entry ent; int adapter_nr; __u16 msg_nr; __u16 addr; __u16 flags; __u16 len; u32 __data_loc_buf; char __data[0]; }; struct trace_event_raw_i2c_read { struct trace_entry ent; int adapter_nr; __u16 msg_nr; __u16 addr; __u16 flags; __u16 len; char __data[0]; }; struct trace_event_raw_i2c_reply { struct trace_entry ent; int adapter_nr; __u16 msg_nr; __u16 addr; __u16 flags; __u16 len; u32 __data_loc_buf; char __data[0]; }; struct trace_event_raw_i2c_result { struct trace_entry ent; int adapter_nr; __u16 nr_msgs; __s16 ret; char __data[0]; }; struct trace_event_data_offsets_i2c_write { u32 buf; }; struct trace_event_data_offsets_i2c_reply { u32 buf; }; struct trace_event_data_offsets_i2c_read {}; struct trace_event_data_offsets_i2c_result {}; struct i2c_timings { u32 bus_freq_hz; u32 scl_rise_ns; u32 scl_fall_ns; u32 scl_int_delay_ns; u32 sda_fall_ns; u32 sda_hold_ns; u32 digital_filter_width_ns; u32 analog_filter_cutoff_freq_hz; }; struct i2c_cmd_arg { unsigned int cmd; void *arg; }; struct i2c_device_identity { u16 manufacturer_id; u16 part_id; u8 die_revision; }; typedef void (*btf_trace_smbus_write)(void *, const struct i2c_adapter *, u16, unsigned short, char, u8, int, const union i2c_smbus_data *); typedef void (*btf_trace_smbus_read)(void *, const struct i2c_adapter *, u16, unsigned short, char, u8, int); typedef void (*btf_trace_smbus_reply)(void *, const struct i2c_adapter *, u16, unsigned short, char, u8, int, const union i2c_smbus_data *, int); typedef void (*btf_trace_smbus_result)(void *, const struct i2c_adapter *, u16, unsigned short, char, u8, int, int); struct trace_event_raw_smbus_write { struct trace_entry ent; int adapter_nr; __u16 addr; __u16 flags; __u8 command; __u8 len; __u32 protocol; __u8 buf[34]; char __data[0]; }; struct trace_event_raw_smbus_read { struct trace_entry ent; int adapter_nr; __u16 flags; __u16 addr; __u8 command; __u32 protocol; __u8 buf[34]; char __data[0]; }; struct trace_event_raw_smbus_reply { struct trace_entry ent; int adapter_nr; __u16 addr; __u16 flags; __u8 command; __u8 len; __u32 protocol; __u8 buf[34]; char __data[0]; }; struct trace_event_raw_smbus_result { struct trace_entry ent; int adapter_nr; __u16 addr; __u16 flags; __u8 read_write; __u8 command; __s16 res; __u32 protocol; char __data[0]; }; struct trace_event_data_offsets_smbus_write {}; struct trace_event_data_offsets_smbus_read {}; struct trace_event_data_offsets_smbus_reply {}; struct trace_event_data_offsets_smbus_result {}; struct i2c_smbus_alert_setup { int irq; }; enum i3c_error_code { I3C_ERROR_UNKNOWN = 0, I3C_ERROR_M0 = 1, I3C_ERROR_M1 = 2, I3C_ERROR_M2 = 3, }; enum i3c_bus_mode { I3C_BUS_MODE_PURE = 0, I3C_BUS_MODE_MIXED_FAST = 1, I3C_BUS_MODE_MIXED_LIMITED = 2, I3C_BUS_MODE_MIXED_SLOW = 3, }; struct i3c_dev_desc; struct i3c_bus { struct i3c_dev_desc *cur_master; int id; unsigned long addrslots[4]; enum i3c_bus_mode mode; struct { unsigned long i3c; unsigned long i2c; } scl_rate; struct { struct list_head i3c; struct list_head i2c; } devs; struct rw_semaphore lock; }; struct i3c_master_controller; struct i3c_i2c_dev_desc { struct list_head node; struct i3c_master_controller *master; void *master_priv; }; struct i3c_device_info { u64 pid; u8 bcr; u8 dcr; u8 static_addr; u8 dyn_addr; u8 hdr_cap; u8 max_read_ds; u8 max_write_ds; u8 max_ibi_len; u32 max_read_turnaround; u16 max_read_len; u16 max_write_len; }; struct i3c_device_ibi_info; struct i3c_device; struct i3c_dev_boardinfo; struct i3c_dev_desc { struct i3c_i2c_dev_desc common; struct i3c_device_info info; struct mutex ibi_lock; struct i3c_device_ibi_info *ibi; struct i3c_device *dev; const struct i3c_dev_boardinfo *boardinfo; }; struct i3c_master_controller_ops; struct i3c_master_controller { struct device dev; struct i3c_dev_desc *this; struct i2c_adapter i2c; const struct i3c_master_controller_ops *ops; unsigned int secondary: 1; unsigned int init_done: 1; struct { struct list_head i3c; struct list_head i2c; } boardinfo; struct i3c_bus bus; struct workqueue_struct *wq; }; struct i3c_ccc_cmd; struct i3c_priv_xfer; struct i2c_dev_desc; struct i3c_ibi_setup; struct i3c_ibi_slot; struct i3c_master_controller_ops { int (*bus_init)(struct i3c_master_controller *); void (*bus_cleanup)(struct i3c_master_controller *); int (*attach_i3c_dev)(struct i3c_dev_desc *); int (*reattach_i3c_dev)(struct i3c_dev_desc *, u8); void (*detach_i3c_dev)(struct i3c_dev_desc *); int (*do_daa)(struct i3c_master_controller *); bool (*supports_ccc_cmd)(struct i3c_master_controller *, const struct i3c_ccc_cmd *); int (*send_ccc_cmd)(struct i3c_master_controller *, struct i3c_ccc_cmd *); int (*priv_xfers)(struct i3c_dev_desc *, struct i3c_priv_xfer *, int); int (*attach_i2c_dev)(struct i2c_dev_desc *); void (*detach_i2c_dev)(struct i2c_dev_desc *); int (*i2c_xfers)(struct i2c_dev_desc *, const struct i2c_msg *, int); int (*request_ibi)(struct i3c_dev_desc *, const struct i3c_ibi_setup *); void (*free_ibi)(struct i3c_dev_desc *); int (*enable_ibi)(struct i3c_dev_desc *); int (*disable_ibi)(struct i3c_dev_desc *); void (*recycle_ibi_slot)(struct i3c_dev_desc *, struct i3c_ibi_slot *); }; struct i3c_ccc_cmd_dest; struct i3c_ccc_cmd { u8 rnw; u8 id; unsigned int ndests; struct i3c_ccc_cmd_dest *dests; enum i3c_error_code err; }; struct i3c_ccc_cmd_payload { u16 len; void *data; }; struct i3c_ccc_cmd_dest { u8 addr; struct i3c_ccc_cmd_payload payload; }; struct i3c_priv_xfer { u8 rnw; u16 len; union { void *in; const void *out; } data; enum i3c_error_code err; }; struct i2c_dev_desc { struct i3c_i2c_dev_desc common; struct i2c_client *dev; u16 addr; u8 lvr; }; struct i3c_ibi_payload; struct i3c_ibi_setup { unsigned int max_payload_len; unsigned int num_slots; void (*handler)(struct i3c_device *, const struct i3c_ibi_payload *); }; struct i3c_device { struct device dev; struct i3c_dev_desc *desc; struct i3c_bus *bus; }; struct i3c_ibi_payload { unsigned int len; const void *data; }; struct i3c_ibi_slot { struct work_struct work; struct i3c_dev_desc *dev; unsigned int len; void *data; }; struct i3c_device_ibi_info { struct completion all_ibis_handled; atomic_t pending_ibis; unsigned int max_payload_len; unsigned int num_slots; unsigned int enabled; void (*handler)(struct i3c_device *, const struct i3c_ibi_payload *); }; struct i3c_dev_boardinfo { struct list_head node; u8 init_dyn_addr; u8 static_addr; u64 pid; struct device_node *of_node; }; struct i3c_device_id { __u8 match_flags; __u8 dcr; __u16 manuf_id; __u16 part_id; __u16 extra_info; const void *data; }; struct i3c_driver { struct device_driver driver; int (*probe)(struct i3c_device *); void (*remove)(struct i3c_device *); const struct i3c_device_id *id_table; }; enum i3c_addr_slot_status { I3C_ADDR_SLOT_FREE = 0, I3C_ADDR_SLOT_RSVD = 1, I3C_ADDR_SLOT_I2C_DEV = 2, I3C_ADDR_SLOT_I3C_DEV = 3, I3C_ADDR_SLOT_STATUS_MASK = 3, }; struct i3c_generic_ibi_slot { struct list_head node; struct i3c_ibi_slot base; }; struct i2c_dev_boardinfo { struct list_head node; struct i2c_board_info base; u8 lvr; }; struct i3c_ccc_getpid { u8 pid[6]; }; struct i3c_ccc_getbcr { u8 bcr; }; struct i3c_ccc_getdcr { u8 dcr; }; struct i3c_generic_ibi_pool { spinlock_t lock; unsigned int num_slots; struct i3c_generic_ibi_slot *slots; void *payload_buf; struct list_head free_slots; struct list_head pending; }; struct i3c_ccc_events { u8 events; }; struct i3c_ccc_dev_desc { u8 dyn_addr; union { u8 dcr; u8 lvr; }; u8 bcr; u8 static_addr; }; struct i3c_ccc_defslvs { u8 count; struct i3c_ccc_dev_desc master; struct i3c_ccc_dev_desc slaves[0]; }; struct i3c_ccc_getmxds { u8 maxwr; u8 maxrd; u8 maxrdturn[3]; }; struct i3c_ccc_mrl { __be16 read_len; u8 ibi_len; } __attribute__((packed)); struct i3c_ccc_mwl { __be16 len; }; struct i3c_ccc_gethdrcap { u8 modes; }; struct i3c_ccc_setda { u8 addr; }; enum rc_proto { RC_PROTO_UNKNOWN = 0, RC_PROTO_OTHER = 1, RC_PROTO_RC5 = 2, RC_PROTO_RC5X_20 = 3, RC_PROTO_RC5_SZ = 4, RC_PROTO_JVC = 5, RC_PROTO_SONY12 = 6, RC_PROTO_SONY15 = 7, RC_PROTO_SONY20 = 8, RC_PROTO_NEC = 9, RC_PROTO_NECX = 10, RC_PROTO_NEC32 = 11, RC_PROTO_SANYO = 12, RC_PROTO_MCIR2_KBD = 13, RC_PROTO_MCIR2_MSE = 14, RC_PROTO_RC6_0 = 15, RC_PROTO_RC6_6A_20 = 16, RC_PROTO_RC6_6A_24 = 17, RC_PROTO_RC6_6A_32 = 18, RC_PROTO_RC6_MCE = 19, RC_PROTO_SHARP = 20, RC_PROTO_XMP = 21, RC_PROTO_CEC = 22, RC_PROTO_IMON = 23, RC_PROTO_RCMM12 = 24, RC_PROTO_RCMM24 = 25, RC_PROTO_RCMM32 = 26, RC_PROTO_XBOX_DVD = 27, RC_PROTO_MAX = 27, }; enum rc_driver_type { RC_DRIVER_SCANCODE = 0, RC_DRIVER_IR_RAW = 1, RC_DRIVER_IR_RAW_TX = 2, }; enum ir_kbd_get_key_fn { IR_KBD_GET_KEY_CUSTOM = 0, IR_KBD_GET_KEY_PIXELVIEW = 1, IR_KBD_GET_KEY_HAUP = 2, IR_KBD_GET_KEY_KNC1 = 3, IR_KBD_GET_KEY_GENIATECH = 4, IR_KBD_GET_KEY_FUSIONHDTV = 5, IR_KBD_GET_KEY_HAUP_XVR = 6, IR_KBD_GET_KEY_AVERMEDIA_CARDBUS = 7, }; struct rc_dev; struct IR_i2c { char *ir_codes; struct i2c_client *c; struct rc_dev *rc; unsigned char old; u32 polling_interval; struct delayed_work work; char phys[32]; int (*get_key)(struct IR_i2c *, enum rc_proto *, u32 *, u8 *); struct i2c_client *tx_c; struct mutex lock; unsigned int carrier; unsigned int duty_cycle; }; struct rc_map_table; struct rc_map { struct rc_map_table *scan; unsigned int size; unsigned int len; unsigned int alloc; enum rc_proto rc_proto; const char *name; spinlock_t lock; }; struct rc_scancode_filter { u32 data; u32 mask; }; struct ir_raw_event_ctrl; struct rc_dev { struct device dev; bool managed_alloc; const struct attribute_group *sysfs_groups[5]; const char *device_name; const char *input_phys; struct input_id input_id; const char *driver_name; const char *map_name; struct rc_map rc_map; struct mutex lock; unsigned int minor; struct ir_raw_event_ctrl *raw; struct input_dev *input_dev; enum rc_driver_type driver_type; bool idle; bool encode_wakeup; u64 allowed_protocols; u64 enabled_protocols; u64 allowed_wakeup_protocols; enum rc_proto wakeup_protocol; struct rc_scancode_filter scancode_filter; struct rc_scancode_filter scancode_wakeup_filter; u32 scancode_mask; u32 users; void *priv; spinlock_t keylock; bool keypressed; unsigned long keyup_jiffies; struct timer_list timer_keyup; struct timer_list timer_repeat; u32 last_keycode; enum rc_proto last_protocol; u64 last_scancode; u8 last_toggle; u32 timeout; u32 min_timeout; u32 max_timeout; u32 rx_resolution; u32 tx_resolution; struct device lirc_dev; struct cdev lirc_cdev; ktime_t gap_start; spinlock_t lirc_fh_lock; struct list_head lirc_fh; bool registered; int (*change_protocol)(struct rc_dev *, u64 *); int (*open)(struct rc_dev *); void (*close)(struct rc_dev *); int (*s_tx_mask)(struct rc_dev *, u32); int (*s_tx_carrier)(struct rc_dev *, u32); int (*s_tx_duty_cycle)(struct rc_dev *, u32); int (*s_rx_carrier_range)(struct rc_dev *, u32, u32); int (*tx_ir)(struct rc_dev *, unsigned int *, unsigned int); void (*s_idle)(struct rc_dev *, bool); int (*s_wideband_receiver)(struct rc_dev *, int); int (*s_carrier_report)(struct rc_dev *, int); int (*s_filter)(struct rc_dev *, struct rc_scancode_filter *); int (*s_wakeup_filter)(struct rc_dev *, struct rc_scancode_filter *); int (*s_timeout)(struct rc_dev *, unsigned int); }; struct rc_map_table { u64 scancode; u32 keycode; }; struct code_block { u8 length; u16 pulse[7]; u8 carrier_pulse; u8 carrier_space; u16 space[8]; u8 codes[61]; u8 csum[2]; } __attribute__((packed)); struct IR_i2c_init_data { char *ir_codes; const char *name; u64 type; u32 polling_interval; int (*get_key)(struct IR_i2c *, enum rc_proto *, u32 *, u8 *); enum ir_kbd_get_key_fn internal_get_key_func; struct rc_dev *rc_dev; }; struct media_ioctl_info { unsigned int cmd; unsigned short flags; long (*fn)(struct media_device *, void *); long (*arg_from_user)(void *, void __attribute__((btf_type_tag("user"))) *, unsigned int); long (*arg_to_user)(void __attribute__((btf_type_tag("user"))) *, void *, unsigned int); }; enum media_gobj_type { MEDIA_GRAPH_ENTITY = 0, MEDIA_GRAPH_PAD = 1, MEDIA_GRAPH_LINK = 2, MEDIA_GRAPH_INTF_DEVNODE = 3, }; struct media_entity_notify { struct list_head list; void *notify_data; void (*notify)(struct media_entity *, void *); }; struct media_links_enum32 { __u32 entity; compat_uptr_t pads; compat_uptr_t links; __u32 reserved[4]; }; struct media_pad_desc; struct media_link_desc; struct media_links_enum { __u32 entity; struct media_pad_desc __attribute__((btf_type_tag("user"))) *pads; struct media_link_desc __attribute__((btf_type_tag("user"))) *links; __u32 reserved[4]; }; struct media_pad_desc { __u32 entity; __u16 index; __u32 flags; __u32 reserved[2]; }; struct media_link_desc { struct media_pad_desc source; struct media_pad_desc sink; __u32 flags; __u32 reserved[2]; }; struct media_device_info { char driver[16]; char model[32]; char serial[40]; char bus_info[32]; __u32 media_version; __u32 hw_revision; __u32 driver_version; __u32 reserved[31]; }; struct media_entity_desc { __u32 id; char name[32]; __u32 type; __u32 revision; __u32 flags; __u32 group_id; __u16 pads; __u16 links; __u32 reserved[4]; union { struct { __u32 major; __u32 minor; } dev; __u8 raw[184]; }; }; struct media_v2_entity { __u32 id; char name[64]; __u32 function; __u32 flags; __u32 reserved[5]; }; struct media_v2_intf_devnode { __u32 major; __u32 minor; }; struct media_v2_interface { __u32 id; __u32 intf_type; __u32 flags; __u32 reserved[9]; union { struct media_v2_intf_devnode devnode; __u32 raw[16]; }; }; struct media_v2_pad { __u32 id; __u32 entity_id; __u32 flags; __u32 index; __u32 reserved[4]; }; struct media_v2_link { __u32 id; __u32 source_id; __u32 sink_id; __u32 flags; __u32 reserved[6]; }; struct media_v2_topology { __u64 topology_version; __u32 num_entities; __u32 reserved1; __u64 ptr_entities; __u32 num_interfaces; __u32 reserved2; __u64 ptr_interfaces; __u32 num_pads; __u32 reserved3; __u64 ptr_pads; __u32 num_links; __u32 reserved4; __u64 ptr_links; }; struct media_pipeline_pad { struct list_head list; struct media_pipeline *pipe; struct media_pad *pad; }; struct media_pipeline_walk_entry; struct media_pipeline_walk { struct media_device *mdev; struct { unsigned int size; int top; struct media_pipeline_walk_entry *entries; } stack; }; struct media_pipeline_walk_entry { struct media_pad *pad; struct list_head *links; }; struct media_pipeline_pad_iter { struct list_head *cursor; }; struct media_pipeline_entity_iter { struct media_entity_enum ent_enum; struct list_head *cursor; }; struct media_device_instance { struct media_device mdev; struct module *owner; struct list_head list; struct kref refcount; }; struct v4l2_async_subdev_endpoint { struct list_head async_subdev_endpoint_entry; struct fwnode_handle *endpoint; }; enum v4l2_fwnode_bus_type { V4L2_FWNODE_BUS_TYPE_GUESS = 0, V4L2_FWNODE_BUS_TYPE_CSI2_CPHY = 1, V4L2_FWNODE_BUS_TYPE_CSI1 = 2, V4L2_FWNODE_BUS_TYPE_CCP2 = 3, V4L2_FWNODE_BUS_TYPE_CSI2_DPHY = 4, V4L2_FWNODE_BUS_TYPE_PARALLEL = 5, V4L2_FWNODE_BUS_TYPE_BT656 = 6, V4L2_FWNODE_BUS_TYPE_DPI = 7, NR_OF_V4L2_FWNODE_BUS_TYPE = 8, }; struct v4l2_fwnode_bus_conv { enum v4l2_fwnode_bus_type fwnode_bus_type; enum v4l2_mbus_type mbus_type; const char *name; }; enum v4l2_connector_type { V4L2_CONN_UNKNOWN = 0, V4L2_CONN_COMPOSITE = 1, V4L2_CONN_SVIDEO = 2, }; struct v4l2_fwnode_connector_conv { enum v4l2_connector_type type; const char *compatible; }; struct v4l2_fwnode_int_props { const char *name; const char * const *props; unsigned int nprops; }; enum v4l2_fwnode_orientation { V4L2_FWNODE_ORIENTATION_FRONT = 0, V4L2_FWNODE_ORIENTATION_BACK = 1, V4L2_FWNODE_ORIENTATION_EXTERNAL = 2, }; struct v4l2_fwnode_link { struct fwnode_handle *local_node; unsigned int local_port; unsigned int local_id; struct fwnode_handle *remote_node; unsigned int remote_port; unsigned int remote_id; }; struct v4l2_connector_link { struct list_head head; struct v4l2_fwnode_link fwnode_link; }; struct v4l2_fwnode_endpoint { struct fwnode_endpoint base; enum v4l2_mbus_type bus_type; struct { struct v4l2_mbus_config_parallel parallel; struct v4l2_mbus_config_mipi_csi1 mipi_csi1; struct v4l2_mbus_config_mipi_csi2 mipi_csi2; } bus; u64 *link_frequencies; unsigned int nr_of_link_frequencies; }; struct v4l2_fwnode_connector_analog { v4l2_std_id sdtv_stds; }; struct v4l2_fwnode_connector { const char *name; const char *label; enum v4l2_connector_type type; struct list_head links; unsigned int nr_of_links; union { struct v4l2_fwnode_connector_analog analog; } connector; }; struct v4l2_fwnode_device_properties { enum v4l2_fwnode_orientation orientation; unsigned int rotation; }; enum v4l2_video_device_flags { V4L2_FL_REGISTERED = 0, V4L2_FL_USES_V4L2_FH = 1, V4L2_FL_QUIRK_INVERTED_CROP = 2, V4L2_FL_SUBDEV_RO_DEVNODE = 3, }; enum v4l2_m2m_entity_type { MEM2MEM_ENT_TYPE_SOURCE = 0, MEM2MEM_ENT_TYPE_SINK = 1, MEM2MEM_ENT_TYPE_PROC = 2, }; enum v4l2_memory { V4L2_MEMORY_MMAP = 1, V4L2_MEMORY_USERPTR = 2, V4L2_MEMORY_OVERLAY = 3, V4L2_MEMORY_DMABUF = 4, }; struct v4l2_m2m_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; struct v4l2_m2m_queue_ctx { struct vb2_queue q; struct list_head rdy_queue; spinlock_t rdy_spinlock; u8 num_rdy; bool buffered; }; struct v4l2_m2m_dev; struct v4l2_m2m_ctx { struct mutex *q_lock; bool new_frame; bool is_draining; struct vb2_v4l2_buffer *last_src_buf; bool next_buf_last; bool has_stopped; struct v4l2_m2m_dev *m2m_dev; struct v4l2_m2m_queue_ctx cap_q_ctx; struct v4l2_m2m_queue_ctx out_q_ctx; struct list_head queue; unsigned long job_flags; wait_queue_head_t finished; void *priv; }; struct v4l2_m2m_ops; struct v4l2_m2m_dev { struct v4l2_m2m_ctx *curr_ctx; struct media_entity *source; struct media_pad source_pad; struct media_entity sink; struct media_pad sink_pad; struct media_entity proc; struct media_pad proc_pads[2]; struct media_intf_devnode *intf_devnode; struct list_head job_queue; spinlock_t job_spinlock; struct work_struct job_work; unsigned long job_queue_flags; const struct v4l2_m2m_ops *m2m_ops; }; struct v4l2_m2m_ops { void (*device_run)(void *); int (*job_ready)(void *); void (*job_abort)(void *); }; enum v4l2_ycbcr_encoding { V4L2_YCBCR_ENC_DEFAULT = 0, V4L2_YCBCR_ENC_601 = 1, V4L2_YCBCR_ENC_709 = 2, V4L2_YCBCR_ENC_XV601 = 3, V4L2_YCBCR_ENC_XV709 = 4, V4L2_YCBCR_ENC_BT2020 = 6, V4L2_YCBCR_ENC_BT2020_CONST_LUM = 7, V4L2_YCBCR_ENC_SMPTE240M = 8, V4L2_YCBCR_ENC_LAST = 9, }; enum v4l2_quantization { V4L2_QUANTIZATION_DEFAULT = 0, V4L2_QUANTIZATION_FULL_RANGE = 1, V4L2_QUANTIZATION_LIM_RANGE = 2, }; enum v4l2_xfer_func { V4L2_XFER_FUNC_DEFAULT = 0, V4L2_XFER_FUNC_709 = 1, V4L2_XFER_FUNC_SRGB = 2, V4L2_XFER_FUNC_OPRGB = 3, V4L2_XFER_FUNC_SMPTE240M = 4, V4L2_XFER_FUNC_NONE = 5, V4L2_XFER_FUNC_DCI_P3 = 6, V4L2_XFER_FUNC_SMPTE2084 = 7, V4L2_XFER_FUNC_LAST = 8, }; struct v4l2_hdmi_colorimetry { enum v4l2_colorspace colorspace; enum v4l2_ycbcr_encoding ycbcr_enc; enum v4l2_quantization quantization; enum v4l2_xfer_func xfer_func; }; typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *, void *); struct std_descr { v4l2_std_id std; const char *descr; }; struct v4l2_ioctl_info { unsigned int ioctl; u32 flags; const char * const name; int (*func)(const struct v4l2_ioctl_ops *, struct file *, void *, void *); void (*debug)(const void *, bool); }; struct v4l2_ctrl; typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *, void *); struct v4l2_ctrl_ref; struct v4l2_ctrl_handler { struct mutex _lock; struct mutex *lock; struct list_head ctrls; struct list_head ctrl_refs; struct v4l2_ctrl_ref *cached; struct v4l2_ctrl_ref **buckets; v4l2_ctrl_notify_fnc notify; void *notify_priv; u16 nr_of_buckets; int error; bool request_is_queued; struct list_head requests; struct list_head requests_queued; struct media_request_object req_obj; u64 android_kabi_reserved1; }; struct v4l2_ctrl_hdr10_cll_info; struct v4l2_ctrl_hdr10_mastering_display; union v4l2_ctrl_ptr { s32 *p_s32; s64 *p_s64; u8 *p_u8; u16 *p_u16; u32 *p_u32; char *p_char; struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation; struct v4l2_ctrl_fwht_params *p_fwht_params; struct v4l2_ctrl_h264_sps *p_h264_sps; struct v4l2_ctrl_h264_pps *p_h264_pps; struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; struct v4l2_ctrl_h264_decode_params *p_h264_decode_params; struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; struct v4l2_ctrl_vp8_frame *p_vp8_frame; struct v4l2_ctrl_hevc_sps *p_hevc_sps; struct v4l2_ctrl_hevc_pps *p_hevc_pps; struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params; struct v4l2_ctrl_vp9_compressed_hdr *p_vp9_compressed_hdr_probs; struct v4l2_ctrl_vp9_frame *p_vp9_frame; struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll; struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering; struct v4l2_area *p_area; struct v4l2_ctrl_av1_sequence *p_av1_sequence; struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry; struct v4l2_ctrl_av1_frame *p_av1_frame; struct v4l2_ctrl_av1_film_grain *p_av1_film_grain; void *p; const void *p_const; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct v4l2_ctrl_helper; struct v4l2_ctrl_ref { struct list_head node; struct v4l2_ctrl_ref *next; struct v4l2_ctrl *ctrl; struct v4l2_ctrl_helper *helper; bool from_other_dev; bool req_done; bool p_req_valid; bool p_req_array_enomem; u32 p_req_array_alloc_elems; u32 p_req_elems; union v4l2_ctrl_ptr p_req; u64 android_kabi_reserved1; }; enum v4l2_ctrl_type { V4L2_CTRL_TYPE_INTEGER = 1, V4L2_CTRL_TYPE_BOOLEAN = 2, V4L2_CTRL_TYPE_MENU = 3, V4L2_CTRL_TYPE_BUTTON = 4, V4L2_CTRL_TYPE_INTEGER64 = 5, V4L2_CTRL_TYPE_CTRL_CLASS = 6, V4L2_CTRL_TYPE_STRING = 7, V4L2_CTRL_TYPE_BITMASK = 8, V4L2_CTRL_TYPE_INTEGER_MENU = 9, V4L2_CTRL_COMPOUND_TYPES = 256, V4L2_CTRL_TYPE_U8 = 256, V4L2_CTRL_TYPE_U16 = 257, V4L2_CTRL_TYPE_U32 = 258, V4L2_CTRL_TYPE_AREA = 262, V4L2_CTRL_TYPE_HDR10_CLL_INFO = 272, V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY = 273, V4L2_CTRL_TYPE_H264_SPS = 512, V4L2_CTRL_TYPE_H264_PPS = 513, V4L2_CTRL_TYPE_H264_SCALING_MATRIX = 514, V4L2_CTRL_TYPE_H264_SLICE_PARAMS = 515, V4L2_CTRL_TYPE_H264_DECODE_PARAMS = 516, V4L2_CTRL_TYPE_H264_PRED_WEIGHTS = 517, V4L2_CTRL_TYPE_FWHT_PARAMS = 544, V4L2_CTRL_TYPE_VP8_FRAME = 576, V4L2_CTRL_TYPE_MPEG2_QUANTISATION = 592, V4L2_CTRL_TYPE_MPEG2_SEQUENCE = 593, V4L2_CTRL_TYPE_MPEG2_PICTURE = 594, V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR = 608, V4L2_CTRL_TYPE_VP9_FRAME = 609, V4L2_CTRL_TYPE_HEVC_SPS = 624, V4L2_CTRL_TYPE_HEVC_PPS = 625, V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 626, V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 627, V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 628, V4L2_CTRL_TYPE_AV1_SEQUENCE = 640, V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY = 641, V4L2_CTRL_TYPE_AV1_FRAME = 642, V4L2_CTRL_TYPE_AV1_FILM_GRAIN = 643, }; struct v4l2_ctrl_ops; struct v4l2_ctrl_type_ops; struct v4l2_ctrl { struct list_head node; struct list_head ev_subs; struct v4l2_ctrl_handler *handler; struct v4l2_ctrl **cluster; unsigned int ncontrols; unsigned int done: 1; unsigned int is_new: 1; unsigned int has_changed: 1; unsigned int is_private: 1; unsigned int is_auto: 1; unsigned int is_int: 1; unsigned int is_string: 1; unsigned int is_ptr: 1; unsigned int is_array: 1; unsigned int is_dyn_array: 1; unsigned int has_volatiles: 1; unsigned int call_notify: 1; unsigned int manual_mode_value: 8; const struct v4l2_ctrl_ops *ops; const struct v4l2_ctrl_type_ops *type_ops; u32 id; const char *name; enum v4l2_ctrl_type type; s64 minimum; s64 maximum; s64 default_value; u32 elems; u32 elem_size; u32 new_elems; u32 dims[4]; u32 nr_of_dims; union { u64 step; u64 menu_skip_mask; }; union { const char * const *qmenu; const s64 *qmenu_int; }; unsigned long flags; void *priv; void *p_array; u32 p_array_alloc_elems; s32 val; struct { s32 val; } cur; union v4l2_ctrl_ptr p_def; union v4l2_ctrl_ptr p_new; union v4l2_ctrl_ptr p_cur; u64 android_kabi_reserved1; }; struct v4l2_ctrl_ops { int (*g_volatile_ctrl)(struct v4l2_ctrl *); int (*try_ctrl)(struct v4l2_ctrl *); int (*s_ctrl)(struct v4l2_ctrl *); u64 android_kabi_reserved1; }; struct v4l2_ctrl_type_ops { bool (*equal)(const struct v4l2_ctrl *, union v4l2_ctrl_ptr, union v4l2_ctrl_ptr); void (*init)(const struct v4l2_ctrl *, u32, union v4l2_ctrl_ptr); void (*log)(const struct v4l2_ctrl *); int (*validate)(const struct v4l2_ctrl *, union v4l2_ctrl_ptr); u64 android_kabi_reserved1; }; struct v4l2_ctrl_hdr10_cll_info { __u16 max_content_light_level; __u16 max_pic_average_light_level; }; struct v4l2_ctrl_hdr10_mastering_display { __u16 display_primaries_x[3]; __u16 display_primaries_y[3]; __u16 white_point_x; __u16 white_point_y; __u32 max_display_mastering_luminance; __u32 min_display_mastering_luminance; }; enum v4l2_hsv_encoding { V4L2_HSV_ENC_180 = 128, V4L2_HSV_ENC_256 = 129, }; enum v4l2_tuner_type { V4L2_TUNER_RADIO = 1, V4L2_TUNER_ANALOG_TV = 2, V4L2_TUNER_DIGITAL_TV = 3, V4L2_TUNER_SDR = 4, V4L2_TUNER_RF = 5, }; struct v4l2_subdev_routing { __u32 which; __u32 num_routes; __u64 routes; __u32 reserved[6]; }; struct v4l2_standard { __u32 index; v4l2_std_id id; __u8 name[24]; struct v4l2_fract frameperiod; __u32 framelines; __u32 reserved[4]; }; typedef long (*v4l2_kioctl)(struct file *, unsigned int, void *); struct v4l2_cropcap { __u32 type; struct v4l2_rect bounds; struct v4l2_rect defrect; struct v4l2_fract pixelaspect; }; struct v4l2_crop { __u32 type; struct v4l2_rect c; }; struct v4l2_dbg_match { __u32 type; union { __u32 addr; char name[32]; }; }; struct v4l2_dbg_register { struct v4l2_dbg_match match; __u32 size; __u64 reg; __u64 val; }; struct v4l2_dbg_chip_info { struct v4l2_dbg_match match; char name[32]; __u32 flags; __u32 reserved[32]; }; typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *); enum v4l2_subdev_routing_restriction { V4L2_SUBDEV_ROUTING_NO_1_TO_N = 1, V4L2_SUBDEV_ROUTING_NO_N_TO_1 = 2, V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX = 4, V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX = 8, V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING = 16, V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING = 32, V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 = 3, V4L2_SUBDEV_ROUTING_NO_STREAM_MIX = 12, V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING = 48, }; struct v4l2_subdev_crop { __u32 which; __u32 pad; struct v4l2_rect rect; __u32 stream; __u32 reserved[7]; }; struct v4l2_subdev_client_capability { __u64 capabilities; }; struct v4l2_subdev_capability { __u32 version; __u32 capabilities; __u32 reserved[14]; }; struct v4l2_format_info { u32 format; u8 pixel_enc; u8 mem_planes; u8 comp_planes; u8 bpp[4]; u8 bpp_div[4]; u8 hdiv; u8 vdiv; u8 block_w[4]; u8 block_h[4]; }; struct v4l2_ctrl_config { const struct v4l2_ctrl_ops *ops; const struct v4l2_ctrl_type_ops *type_ops; u32 id; const char *name; enum v4l2_ctrl_type type; s64 min; s64 max; u64 step; s64 def; union v4l2_ctrl_ptr p_def; u32 dims[4]; u32 elem_size; u32 flags; u64 menu_skip_mask; const char * const *qmenu; const s64 *qmenu_int; unsigned int is_private: 1; u64 android_kabi_reserved1; }; struct v4l2_ctrl_helper { struct v4l2_ctrl_ref *mref; struct v4l2_ctrl_ref *ref; u32 next; }; struct v4l2_ext_controls32 { __u32 which; __u32 count; __u32 error_idx; __s32 request_fd; __u32 reserved[1]; compat_caddr_t controls; }; struct v4l2_input32 { __u32 index; __u8 name[32]; __u32 type; __u32 audioset; __u32 tuner; compat_u64 std; __u32 status; __u32 capabilities; __u32 reserved[3]; }; struct v4l2_standard32 { __u32 index; compat_u64 id; __u8 name[24]; struct v4l2_fract frameperiod; __u32 framelines; __u32 reserved[4]; }; struct v4l2_buffer32_time32 { __u32 index; __u32 type; __u32 bytesused; __u32 flags; __u32 field; struct old_timeval32 timestamp; struct v4l2_timecode timecode; __u32 sequence; __u32 memory; union { __u32 offset; compat_long_t userptr; compat_caddr_t planes; __s32 fd; } m; __u32 length; __u32 reserved2; __s32 request_fd; }; struct v4l2_framebuffer32 { __u32 capability; __u32 flags; compat_caddr_t base; struct { __u32 width; __u32 height; __u32 pixelformat; __u32 field; __u32 bytesperline; __u32 sizeimage; __u32 colorspace; __u32 priv; } fmt; }; struct v4l2_edid32 { __u32 pad; __u32 start_block; __u32 blocks; __u32 reserved[5]; compat_caddr_t edid; }; struct v4l2_buffer32 { __u32 index; __u32 type; __u32 bytesused; __u32 flags; __u32 field; struct { compat_s64 tv_sec; compat_s64 tv_usec; } timestamp; struct v4l2_timecode timecode; __u32 sequence; __u32 memory; union { __u32 offset; compat_long_t userptr; compat_caddr_t planes; __s32 fd; } m; __u32 length; __u32 reserved2; __s32 request_fd; }; struct v4l2_window32 { struct v4l2_rect w; __u32 field; __u32 chromakey; compat_caddr_t clips; __u32 clipcount; compat_caddr_t bitmap; __u8 global_alpha; }; struct v4l2_format32 { __u32 type; union { struct v4l2_pix_format pix; struct v4l2_pix_format_mplane pix_mp; struct v4l2_window32 win; struct v4l2_vbi_format vbi; struct v4l2_sliced_vbi_format sliced; struct v4l2_sdr_format sdr; struct v4l2_meta_format meta; __u8 raw_data[200]; } fmt; }; struct v4l2_create_buffers32 { __u32 index; __u32 count; __u32 memory; struct v4l2_format32 format; __u32 capabilities; __u32 flags; __u32 reserved[6]; }; struct v4l2_event32_time32 { __u32 type; union { compat_s64 value64; __u8 data[64]; } u; __u32 pending; __u32 sequence; struct old_timespec32 timestamp; __u32 id; __u32 reserved[8]; }; struct v4l2_plane32 { __u32 bytesused; __u32 length; union { __u32 mem_offset; compat_long_t userptr; __s32 fd; } m; __u32 data_offset; __u32 reserved[11]; }; struct v4l2_ext_control32 { __u32 id; __u32 size; __u32 reserved2[1]; union { __s32 value; __s64 value64; compat_caddr_t string; }; } __attribute__((packed)); typedef void (*btf_trace_v4l2_dqbuf)(void *, int, struct v4l2_buffer *); typedef void (*btf_trace_v4l2_qbuf)(void *, int, struct v4l2_buffer *); typedef void (*btf_trace_vb2_v4l2_buf_done)(void *, struct vb2_queue *, struct vb2_buffer *); typedef void (*btf_trace_vb2_v4l2_buf_queue)(void *, struct vb2_queue *, struct vb2_buffer *); typedef void (*btf_trace_vb2_v4l2_dqbuf)(void *, struct vb2_queue *, struct vb2_buffer *); typedef void (*btf_trace_vb2_v4l2_qbuf)(void *, struct vb2_queue *, struct vb2_buffer *); struct trace_event_raw_v4l2_event_class { struct trace_entry ent; int minor; u32 index; u32 type; u32 bytesused; u32 flags; u32 field; s64 timestamp; u32 timecode_type; u32 timecode_flags; u8 timecode_frames; u8 timecode_seconds; u8 timecode_minutes; u8 timecode_hours; u8 timecode_userbits0; u8 timecode_userbits1; u8 timecode_userbits2; u8 timecode_userbits3; u32 sequence; char __data[0]; }; struct trace_event_raw_vb2_v4l2_event_class { struct trace_entry ent; int minor; u32 flags; u32 field; u64 timestamp; u32 timecode_type; u32 timecode_flags; u8 timecode_frames; u8 timecode_seconds; u8 timecode_minutes; u8 timecode_hours; u8 timecode_userbits0; u8 timecode_userbits1; u8 timecode_userbits2; u8 timecode_userbits3; u32 sequence; char __data[0]; }; struct trace_event_data_offsets_v4l2_event_class {}; struct trace_event_data_offsets_vb2_v4l2_event_class {}; enum v4l2_i2c_tuner_type { ADDRS_RADIO = 0, ADDRS_DEMOD = 1, ADDRS_TV = 2, ADDRS_TV_WITH_DEMOD = 3, }; enum rc_filter_type { RC_FILTER_NORMAL = 0, RC_FILTER_WAKEUP = 1, RC_FILTER_MAX = 2, }; struct rc_filter_attribute { struct device_attribute attr; enum rc_filter_type type; bool mask; }; struct rc_map_list { struct list_head list; struct rc_map map; }; struct ir_raw_event { union { u32 duration; u32 carrier; }; u8 duty_cycle; unsigned int pulse: 1; unsigned int overflow: 1; unsigned int timeout: 1; unsigned int carrier_report: 1; }; struct ir_raw_event_ctrl { struct list_head list; struct task_struct *thread; struct { union { struct __kfifo kfifo; struct ir_raw_event *type; const struct ir_raw_event *const_type; char (*rectype)[0]; struct ir_raw_event *ptr; const struct ir_raw_event *ptr_const; }; struct ir_raw_event buf[512]; } kfifo; ktime_t last_event; struct rc_dev *dev; spinlock_t edge_spinlock; struct timer_list edge_handle; struct ir_raw_event prev_ev; struct ir_raw_event this_ev; u32 bpf_sample; struct bpf_prog_array __attribute__((btf_type_tag("rcu"))) *progs; }; struct lirc_scancode { __u64 timestamp; __u16 flags; __u16 rc_proto; __u32 keycode; __u64 scancode; }; struct ir_raw_handler { struct list_head list; u64 protocols; int (*decode)(struct rc_dev *, struct ir_raw_event); int (*encode)(enum rc_proto, u32, struct ir_raw_event *, unsigned int); u32 carrier; u32 min_timeout; int (*raw_register)(struct rc_dev *); int (*raw_unregister)(struct rc_dev *); }; struct ir_raw_timings_manchester { unsigned int leader_pulse; unsigned int leader_space; unsigned int clock; unsigned int invert: 1; unsigned int trailer_space; }; struct ir_raw_timings_pd { unsigned int header_pulse; unsigned int header_space; unsigned int bit_pulse; unsigned int bit_space[2]; unsigned int trailer_pulse; unsigned int trailer_space; unsigned int msb_first: 1; }; struct ir_raw_timings_pl { unsigned int header_pulse; unsigned int bit_space; unsigned int bit_pulse[2]; unsigned int trailer_space; unsigned int msb_first: 1; }; struct lirc_fh { struct list_head list; struct rc_dev *rc; int carrier_low; struct { union { struct __kfifo kfifo; unsigned int *type; const unsigned int *const_type; char (*rectype)[0]; unsigned int *ptr; const unsigned int *ptr_const; }; unsigned int buf[0]; } rawir; struct { union { struct __kfifo kfifo; struct lirc_scancode *type; const struct lirc_scancode *const_type; char (*rectype)[0]; struct lirc_scancode *ptr; const struct lirc_scancode *ptr_const; }; struct lirc_scancode buf[0]; } scancodes; wait_queue_head_t wait_poll; u8 send_mode; u8 rec_mode; }; typedef u64 (*btf_bpf_rc_repeat)(u32 *); typedef u64 (*btf_bpf_rc_keydown)(u32 *, u32, u64, u32); typedef u64 (*btf_bpf_rc_pointer_rel)(u32 *, s32, s32); struct cec_devnode { struct device dev; struct cdev cdev; int minor; struct mutex lock; bool registered; bool unregistered; struct mutex lock_fhs; struct list_head fhs; }; struct cec_log_addrs { __u8 log_addr[4]; __u16 log_addr_mask; __u8 cec_version; __u8 num_log_addrs; __u32 vendor_id; __u32 flags; char osd_name[15]; __u8 primary_device_type[4]; __u8 log_addr_type[4]; __u8 all_device_types[4]; __u8 features[48]; }; struct cec_drm_connector_info { __u32 card_no; __u32 connector_id; }; struct cec_connector_info { __u32 type; union { struct cec_drm_connector_info drm; __u32 raw[16]; }; }; struct cec_data; struct cec_adap_ops; struct cec_fh; struct cec_notifier; struct cec_pin; struct cec_adapter { struct module *owner; char name[32]; struct cec_devnode devnode; struct mutex lock; struct rc_dev *rc; struct list_head transmit_queue; unsigned int transmit_queue_sz; struct list_head wait_queue; struct cec_data *transmitting; bool transmit_in_progress; bool transmit_in_progress_aborted; unsigned int xfer_timeout_ms; struct task_struct *kthread_config; struct completion config_completion; struct task_struct *kthread; wait_queue_head_t kthread_waitq; const struct cec_adap_ops *ops; void *priv; u32 capabilities; u8 available_log_addrs; u16 phys_addr; bool needs_hpd; bool is_enabled; bool is_configuring; bool must_reconfigure; bool is_configured; bool cec_pin_is_high; bool adap_controls_phys_addr; u8 last_initiator; u32 monitor_all_cnt; u32 monitor_pin_cnt; u32 follower_cnt; struct cec_fh *cec_follower; struct cec_fh *cec_initiator; bool passthrough; struct cec_log_addrs log_addrs; struct cec_connector_info conn_info; u32 tx_timeouts; struct cec_notifier *notifier; struct cec_pin *pin; struct dentry *cec_dir; u32 sequence; char input_phys[32]; }; struct cec_msg { __u64 tx_ts; __u64 rx_ts; __u32 len; __u32 timeout; __u32 sequence; __u32 flags; __u8 msg[16]; __u8 reply; __u8 rx_status; __u8 tx_status; __u8 tx_arb_lost_cnt; __u8 tx_nack_cnt; __u8 tx_low_drive_cnt; __u8 tx_error_cnt; }; struct cec_data { struct list_head list; struct list_head xfer_list; struct cec_adapter *adap; struct cec_msg msg; struct cec_fh *fh; struct delayed_work work; struct completion c; u8 attempts; bool blocking; bool completed; }; struct cec_event_state_change { __u16 phys_addr; __u16 log_addr_mask; __u16 have_conn_info; }; struct cec_event_lost_msgs { __u32 lost_msgs; }; struct cec_event { __u64 ts; __u32 event; __u32 flags; union { struct cec_event_state_change state_change; struct cec_event_lost_msgs lost_msgs; __u32 raw[16]; }; }; struct cec_event_entry { struct list_head list; struct cec_event ev; }; struct cec_fh { struct list_head list; struct list_head xfer_list; struct cec_adapter *adap; u8 mode_initiator; u8 mode_follower; wait_queue_head_t wait; struct mutex lock; struct list_head events[8]; u16 queued_events[8]; unsigned int total_queued_events; struct cec_event_entry core_events[2]; struct list_head msgs; unsigned int queued_msgs; }; struct cec_adap_ops { int (*adap_enable)(struct cec_adapter *, bool); int (*adap_monitor_all_enable)(struct cec_adapter *, bool); int (*adap_monitor_pin_enable)(struct cec_adapter *, bool); int (*adap_log_addr)(struct cec_adapter *, u8); void (*adap_unconfigured)(struct cec_adapter *); int (*adap_transmit)(struct cec_adapter *, u8, u32, struct cec_msg *); void (*adap_nb_transmit_canceled)(struct cec_adapter *, const struct cec_msg *); void (*adap_status)(struct cec_adapter *, struct seq_file *); void (*adap_free)(struct cec_adapter *); int (*error_inj_show)(struct cec_adapter *, struct seq_file *); bool (*error_inj_parse_line)(struct cec_adapter *, char *); void (*configured)(struct cec_adapter *); int (*received)(struct cec_adapter *, struct cec_msg *); }; struct cec_msg_entry { struct list_head list; struct cec_msg msg; }; enum cec_pin_state { CEC_ST_OFF = 0, CEC_ST_IDLE = 1, CEC_ST_TX_WAIT = 2, CEC_ST_TX_WAIT_FOR_HIGH = 3, CEC_ST_TX_START_BIT_LOW = 4, CEC_ST_TX_START_BIT_HIGH = 5, CEC_ST_TX_START_BIT_HIGH_SHORT = 6, CEC_ST_TX_START_BIT_HIGH_LONG = 7, CEC_ST_TX_START_BIT_LOW_CUSTOM = 8, CEC_ST_TX_START_BIT_HIGH_CUSTOM = 9, CEC_ST_TX_DATA_BIT_0_LOW = 10, CEC_ST_TX_DATA_BIT_0_HIGH = 11, CEC_ST_TX_DATA_BIT_0_HIGH_SHORT = 12, CEC_ST_TX_DATA_BIT_0_HIGH_LONG = 13, CEC_ST_TX_DATA_BIT_1_LOW = 14, CEC_ST_TX_DATA_BIT_1_HIGH = 15, CEC_ST_TX_DATA_BIT_1_HIGH_SHORT = 16, CEC_ST_TX_DATA_BIT_1_HIGH_LONG = 17, CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE = 18, CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE = 19, CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_SHORT = 20, CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_LONG = 21, CEC_ST_TX_DATA_BIT_LOW_CUSTOM = 22, CEC_ST_TX_DATA_BIT_HIGH_CUSTOM = 23, CEC_ST_TX_PULSE_LOW_CUSTOM = 24, CEC_ST_TX_PULSE_HIGH_CUSTOM = 25, CEC_ST_TX_LOW_DRIVE = 26, CEC_ST_RX_START_BIT_LOW = 27, CEC_ST_RX_START_BIT_HIGH = 28, CEC_ST_RX_DATA_SAMPLE = 29, CEC_ST_RX_DATA_POST_SAMPLE = 30, CEC_ST_RX_DATA_WAIT_FOR_LOW = 31, CEC_ST_RX_ACK_LOW = 32, CEC_ST_RX_ACK_LOW_POST = 33, CEC_ST_RX_ACK_HIGH_POST = 34, CEC_ST_RX_ACK_FINISH = 35, CEC_ST_RX_LOW_DRIVE = 36, CEC_ST_RX_IRQ = 37, CEC_PIN_STATES = 38, }; struct cec_pin_ops; struct cec_pin { struct cec_adapter *adap; const struct cec_pin_ops *ops; struct task_struct *kthread; wait_queue_head_t kthread_waitq; struct hrtimer timer; ktime_t ts; unsigned int wait_usecs; u16 la_mask; bool monitor_all; bool rx_eom; bool enabled_irq; bool enable_irq_failed; enum cec_pin_state state; struct cec_msg tx_msg; u32 tx_bit; bool tx_nacked; u32 tx_signal_free_time; bool tx_toggle; struct cec_msg rx_msg; u32 rx_bit; bool rx_toggle; u32 rx_start_bit_low_too_short_cnt; u64 rx_start_bit_low_too_short_ts; u32 rx_start_bit_low_too_short_delta; u32 rx_start_bit_too_short_cnt; u64 rx_start_bit_too_short_ts; u32 rx_start_bit_too_short_delta; u32 rx_start_bit_too_long_cnt; u32 rx_data_bit_too_short_cnt; u64 rx_data_bit_too_short_ts; u32 rx_data_bit_too_short_delta; u32 rx_data_bit_too_long_cnt; u32 rx_low_drive_cnt; struct cec_msg work_rx_msg; u8 work_tx_status; ktime_t work_tx_ts; atomic_t work_irq_change; atomic_t work_pin_num_events; unsigned int work_pin_events_wr; unsigned int work_pin_events_rd; ktime_t work_pin_ts[128]; u8 work_pin_events[128]; bool work_pin_events_dropped; u32 work_pin_events_dropped_cnt; ktime_t timer_ts; u32 timer_cnt; u32 timer_100us_overruns; u32 timer_300us_overruns; u32 timer_max_overrun; u32 timer_sum_overrun; u32 tx_custom_low_usecs; u32 tx_custom_high_usecs; bool tx_ignore_nack_until_eom; bool tx_custom_pulse; bool tx_generated_poll; bool tx_post_eom; u8 tx_extra_bytes; u32 tx_low_drive_cnt; }; struct cec_pin_ops { int (*read)(struct cec_adapter *); void (*low)(struct cec_adapter *); void (*high)(struct cec_adapter *); bool (*enable_irq)(struct cec_adapter *); void (*disable_irq)(struct cec_adapter *); void (*free)(struct cec_adapter *); void (*status)(struct cec_adapter *, struct seq_file *); int (*read_hpd)(struct cec_adapter *); int (*read_5v)(struct cec_adapter *); int (*received)(struct cec_adapter *, struct cec_msg *); }; struct cec_caps { char driver[32]; char name[32]; __u32 available_log_addrs; __u32 capabilities; __u32 version; }; struct cec_notifier { struct mutex lock; struct list_head head; struct kref kref; struct device *hdmi_dev; struct cec_connector_info conn_info; const char *port_name; struct cec_adapter *cec_adap; u16 phys_addr; }; struct cec_state { const char * const name; unsigned int usecs; }; enum vb2_memory { VB2_MEMORY_UNKNOWN = 0, VB2_MEMORY_MMAP = 1, VB2_MEMORY_USERPTR = 2, VB2_MEMORY_DMABUF = 4, }; struct vb2_fileio_buf { void *vaddr; unsigned int size; unsigned int pos; unsigned int queued: 1; }; struct vb2_fileio_data { unsigned int count; unsigned int type; unsigned int memory; struct vb2_fileio_buf bufs[64]; unsigned int cur_index; unsigned int initial_index; unsigned int q_count; unsigned int dq_count; unsigned int read_once: 1; unsigned int write_immediately: 1; }; typedef int (*vb2_thread_fnc)(struct vb2_buffer *, void *); struct vb2_threadio_data { struct task_struct *thread; vb2_thread_fnc fnc; void *priv; bool stop; }; struct frame_vector { unsigned int nr_allocated; unsigned int nr_frames; bool got_ref; bool is_pfns; void *ptrs[0]; }; typedef void (*btf_trace_vb2_buf_done)(void *, struct vb2_queue *, struct vb2_buffer *); typedef void (*btf_trace_vb2_buf_queue)(void *, struct vb2_queue *, struct vb2_buffer *); typedef void (*btf_trace_vb2_dqbuf)(void *, struct vb2_queue *, struct vb2_buffer *); typedef void (*btf_trace_vb2_qbuf)(void *, struct vb2_queue *, struct vb2_buffer *); struct trace_event_raw_vb2_event_class { struct trace_entry ent; void *owner; u32 queued_count; int owned_by_drv_count; u32 index; u32 type; u32 bytesused; u64 timestamp; char __data[0]; }; struct trace_event_data_offsets_vb2_event_class {}; struct vb2_vmarea_handler { refcount_t *refcount; void (*put)(void *); void *arg; }; struct vb2_dc_buf { struct device *dev; void *vaddr; unsigned long size; void *cookie; dma_addr_t dma_addr; unsigned long attrs; enum dma_data_direction dma_dir; struct sg_table *dma_sgt; struct frame_vector *vec; struct vb2_vmarea_handler handler; refcount_t refcount; struct sg_table *sgt_base; struct dma_buf_attachment *db_attach; struct vb2_buffer *vb; bool non_coherent_mem; }; struct vb2_dc_attachment { struct sg_table sgt; enum dma_data_direction dma_dir; }; struct vb2_dma_sg_buf { struct device *dev; void *vaddr; struct page **pages; struct frame_vector *vec; int offset; enum dma_data_direction dma_dir; struct sg_table sg_table; struct sg_table *dma_sgt; size_t size; unsigned int num_pages; refcount_t refcount; struct vb2_vmarea_handler handler; struct dma_buf_attachment *db_attach; struct vb2_buffer *vb; }; struct vb2_dma_sg_attachment { struct sg_table sgt; enum dma_data_direction dma_dir; }; struct vb2_vmalloc_buf { void *vaddr; struct frame_vector *vec; enum dma_data_direction dma_dir; unsigned long size; refcount_t refcount; struct vb2_vmarea_handler handler; struct dma_buf *dbuf; }; struct vb2_vmalloc_attachment { struct sg_table sgt; enum dma_data_direction dma_dir; }; enum gspca_packet_type { DISCARD_PACKET = 0, FIRST_PACKET = 1, INTER_PACKET = 2, LAST_PACKET = 3, }; struct gspca_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; struct framerates; struct cam { const struct v4l2_pix_format *cam_mode; const struct framerates *mode_framerates; u32 bulk_size; u32 input_flags; u8 nmodes; u8 no_urb_create; u8 bulk_nurbs; u8 bulk; u8 npkt; u8 needs_full_bandwidth; }; struct sd_desc; struct gspca_dev { struct video_device vdev; struct module *module; struct v4l2_device v4l2_dev; struct usb_device *dev; struct input_dev *input_dev; char phys[64]; struct cam cam; const struct sd_desc *sd_desc; struct v4l2_ctrl_handler ctrl_handler; struct { struct v4l2_ctrl *autogain; struct v4l2_ctrl *exposure; struct v4l2_ctrl *gain; int exp_too_low_cnt; int exp_too_high_cnt; }; __u8 *usb_buf; struct urb *urb[4]; struct urb *int_urb; u8 *image; u32 image_len; __u8 last_packet_type; __s8 empty_packet; bool streaming; __u8 curr_mode; struct v4l2_pix_format pixfmt; __u32 sequence; struct vb2_queue queue; spinlock_t qlock; struct list_head buf_list; wait_queue_head_t wq; struct mutex usb_lock; int usb_err; u16 pkt_size; char frozen; bool present; char memory; __u8 iface; __u8 alt; int xfer_ep; u8 audio; }; struct framerates { const u8 *rates; int nrates; }; typedef int (*cam_cf_op)(struct gspca_dev *, const struct usb_device_id *); typedef int (*cam_op)(struct gspca_dev *); typedef void (*cam_v_op)(struct gspca_dev *); typedef void (*cam_pkt_op)(struct gspca_dev *, u8 *, int); typedef int (*cam_get_jpg_op)(struct gspca_dev *, struct v4l2_jpegcompression *); typedef int (*cam_set_jpg_op)(struct gspca_dev *, const struct v4l2_jpegcompression *); typedef void (*cam_streamparm_op)(struct gspca_dev *, struct v4l2_streamparm *); typedef void (*cam_format_op)(struct gspca_dev *, struct v4l2_format *); typedef int (*cam_frmsize_op)(struct gspca_dev *, struct v4l2_frmsizeenum *); typedef int (*cam_int_pkt_op)(struct gspca_dev *, u8 *, int); struct sd_desc { const char *name; cam_cf_op config; cam_op init; cam_op init_controls; cam_v_op probe_error; cam_op start; cam_pkt_op pkt_scan; cam_op isoc_init; cam_op isoc_nego; cam_v_op stopN; cam_v_op stop0; cam_v_op dq_callback; cam_get_jpg_op get_jcomp; cam_set_jpg_op set_jcomp; cam_streamparm_op get_streamparm; cam_streamparm_op set_streamparm; cam_format_op try_fmt; cam_frmsize_op enum_framesizes; cam_int_pkt_op int_pkt_scan; u8 other_input; }; struct ep_tb_s { u32 alt; u32 bandwidth; }; struct uvc_driver { struct usb_driver driver; }; struct uvc_control_mapping; struct uvc_device_info { u32 quirks; u32 meta_format; u16 uvc_version; const struct uvc_control_mapping **mappings; }; struct uvc_control_mapping { struct list_head list; struct list_head ev_subs; u32 id; char *name; u8 entity[16]; u8 selector; u8 size; u8 offset; enum v4l2_ctrl_type v4l2_type; u32 data_type; const u32 *menu_mapping; const char (*menu_names)[32]; unsigned long menu_mask; u32 master_id; s32 master_manual; u32 slave_ids[2]; s32 (*get)(struct uvc_control_mapping *, u8, const u8 *); void (*set)(struct uvc_control_mapping *, s32, u8 *); }; enum uvc_buffer_state___2 { UVC_BUF_STATE_IDLE___2 = 0, UVC_BUF_STATE_QUEUED___2 = 1, UVC_BUF_STATE_ACTIVE___2 = 2, UVC_BUF_STATE_READY = 3, UVC_BUF_STATE_DONE___2 = 4, UVC_BUF_STATE_ERROR___2 = 5, }; enum uvc_handle_state { UVC_HANDLE_PASSIVE = 0, UVC_HANDLE_ACTIVE = 1, }; struct uvc_device___2; struct uvc_control; struct uvc_entity { struct list_head list; struct list_head chain; unsigned int flags; u16 id; u16 type; char name[64]; u8 guid[16]; struct video_device *vdev; struct v4l2_subdev subdev; unsigned int num_pads; unsigned int num_links; struct media_pad *pads; union { struct { u16 wObjectiveFocalLengthMin; u16 wObjectiveFocalLengthMax; u16 wOcularFocalLength; u8 bControlSize; u8 *bmControls; } camera; struct { u8 bControlSize; u8 *bmControls; u8 bTransportModeSize; u8 *bmTransportModes; } media; struct {} output; struct { u16 wMaxMultiplier; u8 bControlSize; u8 *bmControls; u8 bmVideoStandards; } processing; struct {} selector; struct { u8 bNumControls; u8 bControlSize; u8 *bmControls; u8 *bmControlsType; } extension; struct { u8 bControlSize; u8 *bmControls; struct gpio_desc *gpio_privacy; int irq; } gpio; }; u8 bNrInPins; u8 *baSourceID; int (*get_info)(struct uvc_device___2 *, struct uvc_entity *, u8, u8 *); int (*get_cur)(struct uvc_device___2 *, struct uvc_entity *, u8, void *, u16); unsigned int ncontrols; struct uvc_control *controls; }; struct uvc_video_chain; struct uvc_ctrl_work { struct work_struct work; struct urb *urb; struct uvc_video_chain *chain; struct uvc_control *ctrl; const void *data; }; struct uvc_status; struct uvc_device___2 { struct usb_device *udev; struct usb_interface *intf; unsigned long warnings; u32 quirks; int intfnum; char name[32]; const struct uvc_device_info *info; struct mutex lock; unsigned int users; atomic_t nmappings; struct media_device mdev; struct v4l2_device vdev; u16 uvc_version; u32 clock_frequency; struct list_head entities; struct list_head chains; struct list_head streams; struct kref ref; struct usb_host_endpoint *int_ep; struct urb *int_urb; struct uvc_status *status; bool flush_status; struct input_dev *input; char input_phys[64]; struct uvc_ctrl_work async_ctrl; struct uvc_entity *gpio_unit; }; struct uvc_status_control { u8 bSelector; u8 bAttribute; u8 bValue[11]; }; struct uvc_status_streaming { u8 button; }; struct uvc_status { u8 bStatusType; u8 bOriginator; u8 bEvent; union { struct uvc_status_control control; struct uvc_status_streaming streaming; }; }; struct uvc_video_chain { struct uvc_device___2 *dev; struct list_head list; struct list_head entities; struct uvc_entity *processing; struct uvc_entity *selector; struct mutex ctrl_mutex; struct v4l2_prio_state prio; u32 caps; u8 ctrl_class_bitmap; }; struct uvc_control_info { struct list_head mappings; u8 entity[16]; u8 index; u8 selector; u16 size; u32 flags; }; struct uvc_fh; struct uvc_control { struct uvc_entity *entity; struct uvc_control_info info; u8 index; u8 dirty: 1; u8 loaded: 1; u8 modified: 1; u8 cached: 1; u8 initialized: 1; u8 *uvc_data; struct uvc_fh *handle; }; struct uvc_streaming; struct uvc_fh { struct v4l2_fh vfh; struct uvc_video_chain *chain; struct uvc_streaming *stream; enum uvc_handle_state state; }; struct uvc_video_queue___2 { struct vb2_queue queue; struct mutex mutex; unsigned int flags; unsigned int buf_used; spinlock_t irqlock; struct list_head irqqueue; }; struct uvc_stats_frame { unsigned int size; unsigned int first_data; unsigned int nb_packets; unsigned int nb_empty; unsigned int nb_invalid; unsigned int nb_errors; unsigned int nb_pts; unsigned int nb_pts_diffs; unsigned int last_pts_diff; bool has_initial_pts; bool has_early_pts; u32 pts; unsigned int nb_scr; unsigned int nb_scr_diffs; u16 scr_sof; u32 scr_stc; }; struct uvc_stats_stream { ktime_t start_ts; ktime_t stop_ts; unsigned int nb_frames; unsigned int nb_packets; unsigned int nb_empty; unsigned int nb_invalid; unsigned int nb_errors; unsigned int nb_pts_constant; unsigned int nb_pts_early; unsigned int nb_pts_initial; unsigned int nb_scr_count_ok; unsigned int nb_scr_diffs_ok; unsigned int scr_sof_count; unsigned int scr_sof; unsigned int min_sof; unsigned int max_sof; }; struct uvc_streaming_header { u8 bNumFormats; u8 bEndpointAddress; u8 bTerminalLink; u8 bControlSize; u8 *bmaControls; u8 bmInfo; u8 bStillCaptureMethod; u8 bTriggerSupport; u8 bTriggerUsage; }; struct uvc_streaming_control { __u16 bmHint; __u8 bFormatIndex; __u8 bFrameIndex; __u32 dwFrameInterval; __u16 wKeyFrameRate; __u16 wPFrameRate; __u16 wCompQuality; __u16 wCompWindowSize; __u16 wDelay; __u32 dwMaxVideoFrameSize; __u32 dwMaxPayloadTransferSize; __u32 dwClockFrequency; __u8 bmFramingInfo; __u8 bPreferedVersion; __u8 bMinVersion; __u8 bMaxVersion; } __attribute__((packed)); struct uvc_buffer___2; struct uvc_copy_op { struct uvc_buffer___2 *buf; void *dst; const __u8 *src; size_t len; }; struct uvc_urb { struct urb *urb; struct uvc_streaming *stream; char *buffer; dma_addr_t dma; struct sg_table *sgt; unsigned int async_operations; struct uvc_copy_op copy_operations[32]; struct work_struct work; }; struct uvc_clock_sample; struct uvc_clock { struct uvc_clock_sample *samples; unsigned int head; unsigned int count; unsigned int size; u16 last_sof; u16 sof_offset; u8 last_scr[6]; spinlock_t lock; }; struct uvc_format; struct uvc_frame; struct uvc_streaming { struct list_head list; struct uvc_device___2 *dev; struct video_device vdev; struct uvc_video_chain *chain; atomic_t active; struct usb_interface *intf; int intfnum; u16 maxpsize; struct uvc_streaming_header header; enum v4l2_buf_type type; unsigned int nformats; const struct uvc_format *formats; struct uvc_streaming_control ctrl; const struct uvc_format *def_format; const struct uvc_format *cur_format; const struct uvc_frame *cur_frame; struct mutex mutex; unsigned int frozen: 1; struct uvc_video_queue___2 queue; struct workqueue_struct *async_wq; void (*decode)(struct uvc_urb *, struct uvc_buffer___2 *, struct uvc_buffer___2 *); struct { struct video_device vdev; struct uvc_video_queue___2 queue; u32 format; } meta; struct { u8 header[256]; unsigned int header_size; int skip_payload; u32 payload_size; u32 max_payload_size; } bulk; struct uvc_urb uvc_urb[5]; unsigned int urb_size; u32 sequence; u8 last_fid; struct dentry *debugfs_dir; struct { struct uvc_stats_frame frame; struct uvc_stats_stream stream; } stats; struct uvc_clock clock; }; struct uvc_format { u8 type; u8 index; u8 bpp; enum v4l2_colorspace colorspace; enum v4l2_xfer_func xfer_func; enum v4l2_ycbcr_encoding ycbcr_enc; u32 fcc; u32 flags; unsigned int nframes; const struct uvc_frame *frames; }; struct uvc_frame { u8 bFrameIndex; u8 bmCapabilities; u16 wWidth; u16 wHeight; u32 dwMinBitRate; u32 dwMaxBitRate; u32 dwMaxVideoFrameBufferSize; u8 bFrameIntervalType; u32 dwDefaultFrameInterval; const u32 *dwFrameInterval; }; struct uvc_buffer___2 { struct vb2_v4l2_buffer buf; struct list_head queue; enum uvc_buffer_state___2 state; unsigned int error; void *mem; unsigned int length; unsigned int bytesused; u32 pts; struct kref ref; }; struct uvc_clock_sample { u32 dev_stc; u16 dev_sof; u16 host_sof; ktime_t host_time; }; struct uvc_xu_control_query { __u8 unit; __u8 selector; __u8 query; __u16 size; __u8 __attribute__((btf_type_tag("user"))) *data; }; struct uvc_menu_info; struct uvc_xu_control_mapping { __u32 id; __u8 name[32]; __u8 entity[16]; __u8 selector; __u8 size; __u8 offset; __u32 v4l2_type; __u32 data_type; struct uvc_menu_info __attribute__((btf_type_tag("user"))) *menu_info; __u32 menu_count; __u32 reserved[4]; }; struct uvc_menu_info { __u32 value; __u8 name[32]; }; struct uvc_xu_control_mapping32 { u32 id; u8 name[32]; u8 entity[16]; u8 selector; u8 size; u8 offset; u32 v4l2_type; u32 data_type; compat_caddr_t menu_info; u32 menu_count; u32 reserved[4]; }; struct uvc_xu_control_query32 { u8 unit; u8 selector; u8 query; u16 size; compat_caddr_t data; }; struct uvc_meta_buf { __u64 ns; __u16 sof; __u8 length; __u8 flags; __u8 buf[0]; } __attribute__((packed)); struct uvc_ctrl_fixup { struct usb_device_id id; u8 entity; u8 selector; u8 flags; }; struct uvc_ctrl_blacklist { struct usb_device_id id; u8 index; }; struct uvc_debugfs_buffer { size_t count; char data[1024]; }; struct syscon_reboot_context { struct regmap *map; u32 offset; u32 value; u32 mask; struct notifier_block restart_handler; }; enum thermal_device_mode { THERMAL_DEVICE_DISABLED = 0, THERMAL_DEVICE_ENABLED = 1, }; enum thermal_trend { THERMAL_TREND_STABLE = 0, THERMAL_TREND_RAISING = 1, THERMAL_TREND_DROPPING = 2, }; struct thermal_trip; struct thermal_zone_device_ops { int (*bind)(struct thermal_zone_device *, struct thermal_cooling_device *); int (*unbind)(struct thermal_zone_device *, struct thermal_cooling_device *); int (*get_temp)(struct thermal_zone_device *, int *); int (*set_trips)(struct thermal_zone_device *, int, int); int (*change_mode)(struct thermal_zone_device *, enum thermal_device_mode); int (*set_trip_temp)(struct thermal_zone_device *, int, int); int (*set_trip_hyst)(struct thermal_zone_device *, int, int); int (*get_crit_temp)(struct thermal_zone_device *, int *); int (*set_emul_temp)(struct thermal_zone_device *, int); int (*get_trend)(struct thermal_zone_device *, const struct thermal_trip *, enum thermal_trend *); void (*hot)(struct thermal_zone_device *); void (*critical)(struct thermal_zone_device *); u64 android_kabi_reserved1; }; enum thermal_notify_event { THERMAL_EVENT_UNSPECIFIED = 0, THERMAL_EVENT_TEMP_SAMPLE = 1, THERMAL_TRIP_VIOLATED = 2, THERMAL_TRIP_CHANGED = 3, THERMAL_DEVICE_DOWN = 4, THERMAL_DEVICE_UP = 5, THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6, THERMAL_TABLE_CHANGED = 7, THERMAL_EVENT_KEEP_ALIVE = 8, }; struct thermal_attr; struct thermal_zone_params; struct thermal_governor; struct thermal_zone_device { int id; char type[20]; struct device device; struct attribute_group trips_attribute_group; struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; struct thermal_attr *trip_hyst_attrs; enum thermal_device_mode mode; void *devdata; struct thermal_trip *trips; int num_trips; unsigned long trips_disabled; unsigned long passive_delay_jiffies; unsigned long polling_delay_jiffies; int temperature; int last_temperature; int emul_temperature; int passive; int prev_low_trip; int prev_high_trip; atomic_t need_update; struct thermal_zone_device_ops *ops; struct thermal_zone_params *tzp; struct thermal_governor *governor; void *governor_data; struct list_head thermal_instances; struct ida ida; struct mutex lock; struct list_head node; struct delayed_work poll_queue; enum thermal_notify_event notify_event; bool suspended; u64 android_kabi_reserved1; }; enum thermal_trip_type { THERMAL_TRIP_ACTIVE = 0, THERMAL_TRIP_PASSIVE = 1, THERMAL_TRIP_HOT = 2, THERMAL_TRIP_CRITICAL = 3, }; struct thermal_trip { int temperature; int hysteresis; enum thermal_trip_type type; void *priv; }; struct thermal_zone_params { char governor_name[20]; bool no_hwmon; u32 sustainable_power; s32 k_po; s32 k_pu; s32 k_i; s32 k_d; s32 integral_cutoff; int slope; int offset; u64 android_kabi_reserved1; }; struct thermal_governor { char name[20]; int (*bind_to_tz)(struct thermal_zone_device *); void (*unbind_from_tz)(struct thermal_zone_device *); int (*throttle)(struct thermal_zone_device *, int); struct list_head governor_list; u64 android_kabi_reserved1; }; struct thermal_cooling_device_ops; struct thermal_cooling_device { int id; char *type; unsigned long max_state; struct device device; struct device_node *np; void *devdata; void *stats; const struct thermal_cooling_device_ops *ops; bool updated; struct mutex lock; struct list_head thermal_instances; struct list_head node; u64 android_kabi_reserved1; }; struct thermal_cooling_device_ops { int (*get_max_state)(struct thermal_cooling_device *, unsigned long *); int (*get_cur_state)(struct thermal_cooling_device *, unsigned long *); int (*set_cur_state)(struct thermal_cooling_device *, unsigned long); int (*get_requested_power)(struct thermal_cooling_device *, u32 *); int (*state2power)(struct thermal_cooling_device *, unsigned long, u32 *); int (*power2state)(struct thermal_cooling_device *, u32, unsigned long *); u64 android_kabi_reserved1; }; enum { POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, POWER_SUPPLY_TECHNOLOGY_NiMH = 1, POWER_SUPPLY_TECHNOLOGY_LION = 2, POWER_SUPPLY_TECHNOLOGY_LIPO = 3, POWER_SUPPLY_TECHNOLOGY_LiFe = 4, POWER_SUPPLY_TECHNOLOGY_NiCd = 5, POWER_SUPPLY_TECHNOLOGY_LiMn = 6, }; enum power_supply_notifier_events { PSY_EVENT_PROP_CHANGED = 0, }; struct match_device_node_array_param { struct device_node *parent_of_node; struct power_supply **psy; ssize_t psy_size; ssize_t psy_count; }; struct psy_am_i_supplied_data { struct power_supply *psy; unsigned int count; }; struct psy_get_supplier_prop_data { struct power_supply *psy; enum power_supply_property psp; union power_supply_propval *val; }; struct power_supply_attr { const char *prop_name; char attr_name[31]; struct device_attribute dev_attr; const char * const *text_values; int text_values_len; }; enum power_supply_charge_behaviour { POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0, POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE = 1, POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE = 2, }; enum { POWER_SUPPLY_STATUS_UNKNOWN = 0, POWER_SUPPLY_STATUS_CHARGING = 1, POWER_SUPPLY_STATUS_DISCHARGING = 2, POWER_SUPPLY_STATUS_NOT_CHARGING = 3, POWER_SUPPLY_STATUS_FULL = 4, }; typedef void (*btf_trace_thermal_temperature)(void *, struct thermal_zone_device *); struct thermal_attr { struct device_attribute attr; char name[20]; }; typedef void (*btf_trace_cdev_update)(void *, struct thermal_cooling_device *, unsigned long); typedef void (*btf_trace_thermal_zone_trip)(void *, struct thermal_zone_device *, int, enum thermal_trip_type); typedef void (*btf_trace_thermal_power_cpu_get_power_simple)(void *, int, u32); typedef void (*btf_trace_thermal_power_cpu_limit)(void *, const struct cpumask *, unsigned int, unsigned long, u32); struct devfreq_dev_status; typedef void (*btf_trace_thermal_power_devfreq_get_power)(void *, struct thermal_cooling_device *, struct devfreq_dev_status *, unsigned long, u32); struct devfreq_dev_status { unsigned long total_time; unsigned long busy_time; unsigned long current_frequency; void *private_data; }; typedef void (*btf_trace_thermal_power_devfreq_limit)(void *, struct thermal_cooling_device *, unsigned long, unsigned long, u32); struct thermal_instance { int id; char name[20]; struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; const struct thermal_trip *trip; bool initialized; unsigned long upper; unsigned long lower; unsigned long target; char attr_name[20]; struct device_attribute attr; char weight_attr_name[20]; struct device_attribute weight_attr; struct list_head tz_node; struct list_head cdev_node; unsigned int weight; bool upper_no_limit; }; struct trace_event_raw_thermal_temperature { struct trace_entry ent; u32 __data_loc_thermal_zone; int id; int temp_prev; int temp; char __data[0]; }; struct trace_event_raw_cdev_update { struct trace_entry ent; u32 __data_loc_type; unsigned long target; char __data[0]; }; struct trace_event_raw_thermal_zone_trip { struct trace_entry ent; u32 __data_loc_thermal_zone; int id; int trip; enum thermal_trip_type trip_type; char __data[0]; }; struct trace_event_raw_thermal_power_cpu_get_power_simple { struct trace_entry ent; int cpu; u32 power; char __data[0]; }; struct trace_event_raw_thermal_power_cpu_limit { struct trace_entry ent; u32 __data_loc_cpumask; unsigned int freq; unsigned long cdev_state; u32 power; char __data[0]; }; struct trace_event_raw_thermal_power_devfreq_get_power { struct trace_entry ent; u32 __data_loc_type; unsigned long freq; u32 busy_time; u32 total_time; u32 power; char __data[0]; }; struct trace_event_raw_thermal_power_devfreq_limit { struct trace_entry ent; u32 __data_loc_type; unsigned int freq; unsigned long cdev_state; u32 power; char __data[0]; }; struct trace_event_data_offsets_thermal_temperature { u32 thermal_zone; }; struct trace_event_data_offsets_cdev_update { u32 type; }; struct trace_event_data_offsets_thermal_zone_trip { u32 thermal_zone; }; struct trace_event_data_offsets_thermal_power_cpu_limit { u32 cpumask; }; struct trace_event_data_offsets_thermal_power_devfreq_get_power { u32 type; }; struct trace_event_data_offsets_thermal_power_devfreq_limit { u32 type; }; struct trace_event_data_offsets_thermal_power_cpu_get_power_simple {}; struct cooling_dev_stats { spinlock_t lock; unsigned int total_trans; unsigned long state; ktime_t last_time; ktime_t *time_in_state; unsigned int *trans_table; }; struct param; typedef int (*cb_t)(struct param *); struct thermal_genl_cpu_caps; struct param { struct nlattr **attrs; struct sk_buff *msg; const char *name; int tz_id; int cdev_id; int trip_id; int trip_temp; int trip_type; int trip_hyst; int temp; int cdev_state; int cdev_max_state; struct thermal_genl_cpu_caps *cpu_capabilities; int cpu_capabilities_count; }; struct thermal_genl_cpu_caps { int cpu; int performance; int efficiency; }; enum thermal_genl_sampling { THERMAL_GENL_SAMPLING_TEMP = 0, __THERMAL_GENL_SAMPLING_MAX = 1, }; enum thermal_genl_attr { THERMAL_GENL_ATTR_UNSPEC = 0, THERMAL_GENL_ATTR_TZ = 1, THERMAL_GENL_ATTR_TZ_ID = 2, THERMAL_GENL_ATTR_TZ_TEMP = 3, THERMAL_GENL_ATTR_TZ_TRIP = 4, THERMAL_GENL_ATTR_TZ_TRIP_ID = 5, THERMAL_GENL_ATTR_TZ_TRIP_TYPE = 6, THERMAL_GENL_ATTR_TZ_TRIP_TEMP = 7, THERMAL_GENL_ATTR_TZ_TRIP_HYST = 8, THERMAL_GENL_ATTR_TZ_MODE = 9, THERMAL_GENL_ATTR_TZ_NAME = 10, THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT = 11, THERMAL_GENL_ATTR_TZ_GOV = 12, THERMAL_GENL_ATTR_TZ_GOV_NAME = 13, THERMAL_GENL_ATTR_CDEV = 14, THERMAL_GENL_ATTR_CDEV_ID = 15, THERMAL_GENL_ATTR_CDEV_CUR_STATE = 16, THERMAL_GENL_ATTR_CDEV_MAX_STATE = 17, THERMAL_GENL_ATTR_CDEV_NAME = 18, THERMAL_GENL_ATTR_GOV_NAME = 19, THERMAL_GENL_ATTR_CPU_CAPABILITY = 20, THERMAL_GENL_ATTR_CPU_CAPABILITY_ID = 21, THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE = 22, THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY = 23, __THERMAL_GENL_ATTR_MAX = 24, }; enum thermal_genl_event { THERMAL_GENL_EVENT_UNSPEC = 0, THERMAL_GENL_EVENT_TZ_CREATE = 1, THERMAL_GENL_EVENT_TZ_DELETE = 2, THERMAL_GENL_EVENT_TZ_DISABLE = 3, THERMAL_GENL_EVENT_TZ_ENABLE = 4, THERMAL_GENL_EVENT_TZ_TRIP_UP = 5, THERMAL_GENL_EVENT_TZ_TRIP_DOWN = 6, THERMAL_GENL_EVENT_TZ_TRIP_CHANGE = 7, THERMAL_GENL_EVENT_TZ_TRIP_ADD = 8, THERMAL_GENL_EVENT_TZ_TRIP_DELETE = 9, THERMAL_GENL_EVENT_CDEV_ADD = 10, THERMAL_GENL_EVENT_CDEV_DELETE = 11, THERMAL_GENL_EVENT_CDEV_STATE_UPDATE = 12, THERMAL_GENL_EVENT_TZ_GOV_CHANGE = 13, THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE = 14, __THERMAL_GENL_EVENT_MAX = 15, }; typedef void (*btf_trace_thermal_power_allocator)(void *, struct thermal_zone_device *, u32 *, u32, u32 *, u32, size_t, u32, u32, int, s32); typedef void (*btf_trace_thermal_power_allocator_pid)(void *, struct thermal_zone_device *, s32, s32, s64, s64, s64, s32); struct trace_event_raw_thermal_power_allocator { struct trace_entry ent; int tz_id; u32 __data_loc_req_power; u32 total_req_power; u32 __data_loc_granted_power; u32 total_granted_power; size_t num_actors; u32 power_range; u32 max_allocatable_power; int current_temp; s32 delta_temp; char __data[0]; }; struct trace_event_raw_thermal_power_allocator_pid { struct trace_entry ent; int tz_id; s32 err; s32 err_integral; s64 p; s64 i; s64 d; s32 output; char __data[0]; }; struct trace_event_data_offsets_thermal_power_allocator { u32 req_power; u32 granted_power; }; struct power_allocator_params { bool allocated_tzp; s64 err_integral; s32 prev_err; int trip_switch_on; int trip_max_desired_temperature; u32 sustainable_power; }; struct trace_event_data_offsets_thermal_power_allocator_pid {}; struct cpufreq_cooling_device { u32 last_load; unsigned int cpufreq_state; unsigned int max_level; struct em_perf_domain *em; struct cpufreq_policy *policy; struct thermal_cooling_device_ops cooling_ops; struct freq_qos_request qos_req; }; struct idle_inject_device; struct cpuidle_cooling_device { struct idle_inject_device *ii_dev; unsigned long state; }; enum devfreq_timer { DEVFREQ_TIMER_DEFERRABLE = 0, DEVFREQ_TIMER_DELAYED = 1, DEVFREQ_TIMER_NUM = 2, }; struct devfreq; struct devfreq_cooling_power; struct devfreq_cooling_device { struct thermal_cooling_device *cdev; struct thermal_cooling_device_ops cooling_ops; struct devfreq *devfreq; unsigned long cooling_state; u32 *freq_table; size_t max_state; struct devfreq_cooling_power *power_ops; u32 res_util; int capped_state; struct dev_pm_qos_request req_max_freq; struct em_perf_domain *em_pd; }; struct devfreq_stats { unsigned int total_trans; unsigned int *trans_table; u64 *time_in_state; u64 last_update; }; struct devfreq_dev_profile; struct devfreq_governor; struct devfreq { struct list_head node; struct mutex lock; struct device dev; struct devfreq_dev_profile *profile; const struct devfreq_governor *governor; struct opp_table *opp_table; struct notifier_block nb; struct delayed_work work; unsigned long *freq_table; unsigned int max_state; unsigned long previous_freq; struct devfreq_dev_status last_status; void *data; void *governor_data; struct dev_pm_qos_request user_min_freq_req; struct dev_pm_qos_request user_max_freq_req; unsigned long scaling_min_freq; unsigned long scaling_max_freq; bool stop_polling; unsigned long suspend_freq; unsigned long resume_freq; atomic_t suspend_count; struct devfreq_stats stats; struct srcu_notifier_head transition_notifier_list; struct thermal_cooling_device *cdev; struct notifier_block nb_min; struct notifier_block nb_max; }; struct devfreq_dev_profile { unsigned long initial_freq; unsigned int polling_ms; enum devfreq_timer timer; int (*target)(struct device *, unsigned long *, u32); int (*get_dev_status)(struct device *, struct devfreq_dev_status *); int (*get_cur_freq)(struct device *, unsigned long *); void (*exit)(struct device *); unsigned long *freq_table; unsigned int max_state; bool is_cooling_device; }; struct devfreq_governor { struct list_head node; const char name[16]; const u64 attrs; const u64 flags; int (*get_target_freq)(struct devfreq *, unsigned long *); int (*event_handler)(struct devfreq *, unsigned int, void *); }; struct devfreq_cooling_power { int (*get_real_power)(struct devfreq *, u32 *, unsigned long, unsigned long); }; struct hisi_thermal_sensor; struct hisi_thermal_data; struct hisi_thermal_ops { int (*get_temp)(struct hisi_thermal_sensor *); int (*enable_sensor)(struct hisi_thermal_sensor *); int (*disable_sensor)(struct hisi_thermal_sensor *); int (*irq_handler)(struct hisi_thermal_sensor *); int (*probe)(struct hisi_thermal_data *); }; struct hisi_thermal_sensor { struct hisi_thermal_data *data; struct thermal_zone_device *tzd; const char *irq_name; uint32_t id; uint32_t thres_temp; }; struct hisi_thermal_data { const struct hisi_thermal_ops *ops; struct hisi_thermal_sensor *sensor; struct platform_device *pdev; struct clk *clk; void *regs; int nr_sensors; }; struct watchdog_device; typedef void (*btf_trace_watchdog_start)(void *, struct watchdog_device *, int); struct watchdog_info; struct watchdog_ops; struct watchdog_governor; struct watchdog_core_data; struct watchdog_device { int id; struct device *parent; const struct attribute_group **groups; const struct watchdog_info *info; const struct watchdog_ops *ops; const struct watchdog_governor *gov; unsigned int bootstatus; unsigned int timeout; unsigned int pretimeout; unsigned int min_timeout; unsigned int max_timeout; unsigned int min_hw_heartbeat_ms; unsigned int max_hw_heartbeat_ms; struct notifier_block reboot_nb; struct notifier_block restart_nb; struct notifier_block pm_nb; void *driver_data; struct watchdog_core_data *wd_data; unsigned long status; struct list_head deferred; }; struct watchdog_info { __u32 options; __u32 firmware_version; __u8 identity[32]; }; struct watchdog_ops { struct module *owner; int (*start)(struct watchdog_device *); int (*stop)(struct watchdog_device *); int (*ping)(struct watchdog_device *); unsigned int (*status)(struct watchdog_device *); int (*set_timeout)(struct watchdog_device *, unsigned int); int (*set_pretimeout)(struct watchdog_device *, unsigned int); unsigned int (*get_timeleft)(struct watchdog_device *); int (*restart)(struct watchdog_device *, unsigned long, void *); long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long); }; struct watchdog_governor { const char name[20]; void (*pretimeout)(struct watchdog_device *); }; struct watchdog_core_data { struct device dev; struct cdev cdev; struct watchdog_device *wdd; struct mutex lock; ktime_t last_keepalive; ktime_t last_hw_keepalive; ktime_t open_deadline; struct hrtimer timer; struct kthread_work work; unsigned long status; }; typedef void (*btf_trace_watchdog_ping)(void *, struct watchdog_device *, int); typedef void (*btf_trace_watchdog_stop)(void *, struct watchdog_device *, int); typedef void (*btf_trace_watchdog_set_timeout)(void *, struct watchdog_device *, unsigned int, int); struct trace_event_raw_watchdog_template { struct trace_entry ent; int id; int err; char __data[0]; }; struct trace_event_raw_watchdog_set_timeout { struct trace_entry ent; int id; unsigned int timeout; int err; char __data[0]; }; struct trace_event_data_offsets_watchdog_template {}; struct trace_event_data_offsets_watchdog_set_timeout {}; enum dm_uevent_type { DM_UEVENT_PATH_FAILED = 0, DM_UEVENT_PATH_REINSTATED = 1, }; struct mapped_device; struct dm_uevent { struct mapped_device *md; enum kobject_action action; struct kobj_uevent_env ku_env; struct list_head elist; char name[128]; char uuid[129]; }; struct dm_table; struct target_type; struct dm_target { struct dm_table *table; struct target_type *type; sector_t begin; sector_t len; uint32_t max_io_len; unsigned int num_flush_bios; unsigned int num_discard_bios; unsigned int num_secure_erase_bios; unsigned int num_write_zeroes_bios; unsigned int per_io_data_size; void *private; char *error; bool flush_supported: 1; bool discards_supported: 1; bool max_discard_granularity: 1; bool max_secure_erase_granularity: 1; bool max_write_zeroes_granularity: 1; bool limit_swap_bios: 1; bool emulate_zone_append: 1; bool accounts_remapped_io: 1; bool needs_bio_set_dev: 1; bool flush_bypasses_map: 1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; typedef int (*dm_ctr_fn)(struct dm_target *, unsigned int, char **); typedef void (*dm_dtr_fn)(struct dm_target *); typedef int (*dm_map_fn)(struct dm_target *, struct bio *); union map_info___2; typedef int (*dm_clone_and_map_request_fn)(struct dm_target *, struct request *, union map_info___2 *, struct request **); typedef void (*dm_release_clone_request_fn)(struct request *, union map_info___2 *); typedef int (*dm_endio_fn)(struct dm_target *, struct bio *, blk_status_t *); typedef int (*dm_request_endio_fn)(struct dm_target *, struct request *, blk_status_t, union map_info___2 *); typedef void (*dm_presuspend_fn)(struct dm_target *); typedef void (*dm_presuspend_undo_fn)(struct dm_target *); typedef void (*dm_postsuspend_fn)(struct dm_target *); typedef int (*dm_preresume_fn)(struct dm_target *); typedef void (*dm_resume_fn)(struct dm_target *); typedef enum { STATUSTYPE_INFO = 0, STATUSTYPE_TABLE = 1, STATUSTYPE_IMA = 2, } status_type_t; typedef void (*dm_status_fn)(struct dm_target *, status_type_t, unsigned int, char *, unsigned int); typedef int (*dm_message_fn)(struct dm_target *, unsigned int, char **, char *, unsigned int); typedef int (*dm_prepare_ioctl_fn)(struct dm_target *, struct block_device **); struct dm_report_zones_args; typedef int (*dm_report_zones_fn)(struct dm_target *, struct dm_report_zones_args *, unsigned int); typedef int (*dm_busy_fn)(struct dm_target *); struct dm_dev; typedef int (*iterate_devices_callout_fn)(struct dm_target *, struct dm_dev *, sector_t, sector_t, void *); typedef int (*dm_iterate_devices_fn)(struct dm_target *, iterate_devices_callout_fn, void *); typedef void (*dm_io_hints_fn)(struct dm_target *, struct queue_limits *); typedef long (*dm_dax_direct_access_fn)(struct dm_target *, unsigned long, long, enum dax_access_mode, void **, pfn_t *); typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *, unsigned long, size_t); typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *, unsigned long, void *, size_t, struct iov_iter *); struct target_type { uint64_t features; const char *name; struct module *module; unsigned int version[3]; dm_ctr_fn ctr; dm_dtr_fn dtr; dm_map_fn map; dm_clone_and_map_request_fn clone_and_map_rq; dm_release_clone_request_fn release_clone_rq; dm_endio_fn end_io; dm_request_endio_fn rq_end_io; dm_presuspend_fn presuspend; dm_presuspend_undo_fn presuspend_undo; dm_postsuspend_fn postsuspend; dm_preresume_fn preresume; dm_resume_fn resume; dm_status_fn status; dm_message_fn message; dm_prepare_ioctl_fn prepare_ioctl; dm_report_zones_fn report_zones; dm_busy_fn busy; dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; dm_dax_direct_access_fn direct_access; dm_dax_zero_page_range_fn dax_zero_page_range; dm_dax_recovery_write_fn dax_recovery_write; u64 android_kabi_reserved1; u64 android_kabi_reserved2; struct list_head list; }; union map_info___2 { void *ptr; }; struct dm_report_zones_args { struct dm_target *tgt; sector_t next_sector; void *orig_data; report_zones_cb orig_cb; unsigned int zone_idx; sector_t start; }; struct dm_dev { struct block_device *bdev; struct dax_device *dax_dev; blk_mode_t mode; char name[16]; }; enum dm_queue_mode { DM_TYPE_NONE = 0, DM_TYPE_BIO_BASED = 1, DM_TYPE_REQUEST_BASED = 2, DM_TYPE_DAX_BIO_BASED = 3, }; struct dm_stats_last_position; struct dm_stats { struct mutex mutex; struct list_head list; struct dm_stats_last_position __attribute__((btf_type_tag("percpu"))) *last; bool precise_timestamps; }; struct dm_kobject_holder { struct kobject kobj; struct completion completion; }; struct dm_io; struct dm_md_mempools; struct mapped_device { struct mutex suspend_lock; struct mutex table_devices_lock; struct list_head table_devices; void __attribute__((btf_type_tag("rcu"))) *map; unsigned long flags; struct mutex type_lock; enum dm_queue_mode type; int numa_node_id; struct request_queue *queue; atomic_t holders; atomic_t open_count; struct dm_target *immutable_target; struct target_type *immutable_target_type; char name[16]; struct gendisk *disk; struct dax_device *dax_dev; wait_queue_head_t wait; unsigned long __attribute__((btf_type_tag("percpu"))) *pending_io; struct hd_geometry geometry; struct workqueue_struct *wq; struct work_struct work; spinlock_t deferred_lock; struct bio_list deferred; struct work_struct requeue_work; struct dm_io *requeue_list; void *interface_ptr; wait_queue_head_t eventq; atomic_t event_nr; atomic_t uevent_seq; struct list_head uevent_list; spinlock_t uevent_lock; bool init_tio_pdu: 1; struct blk_mq_tag_set *tag_set; struct dm_stats stats; unsigned int internal_suspend_count; int swap_bios; struct semaphore swap_bios_semaphore; struct mutex swap_bios_lock; struct dm_md_mempools *mempools; struct dm_kobject_holder kobj_holder; struct srcu_struct io_barrier; unsigned int nr_zones; unsigned int *zwp_offset; }; struct dm_table { struct mapped_device *md; enum dm_queue_mode type; unsigned int depth; unsigned int counts[16]; sector_t *index[16]; unsigned int num_targets; unsigned int num_allocated; sector_t *highs; struct dm_target *targets; struct target_type *immutable_target_type; bool integrity_supported: 1; bool singleton: 1; bool integrity_added: 1; bool flush_bypasses_map: 1; blk_mode_t mode; struct list_head devices; struct rw_semaphore devices_lock; void (*event_fn)(void *); void *event_context; struct dm_md_mempools *mempools; struct blk_crypto_profile *crypto_profile; }; struct dm_md_mempools { struct bio_set bs; struct bio_set io_bs; }; typedef u16 blk_short_t; struct dm_stats_aux { bool merged; unsigned long long duration_ns; }; struct dm_target_io { unsigned short magic; blk_short_t flags; unsigned int target_bio_nr; struct dm_io *io; struct dm_target *ti; unsigned int *len_ptr; sector_t old_sector; struct bio clone; }; struct dm_io { unsigned short magic; blk_short_t flags; spinlock_t lock; unsigned long start_time; void *data; struct dm_io *next; struct dm_stats_aux stats_aux; blk_status_t status; atomic_t io_count; struct mapped_device *md; struct bio *orig_bio; unsigned int sector_offset; unsigned int sectors; struct dm_target_io tio; }; struct dm_stats_last_position { sector_t last_sector; unsigned int last_rw; }; struct orig_bio_details { enum req_op op; unsigned int nr_sectors; }; enum { DM_TIO_INSIDE_DM_IO = 0, DM_TIO_IS_DUPLICATE_BIO = 1, }; enum { DM_IO_ACCOUNTED = 0, DM_IO_WAS_SPLIT = 1, DM_IO_BLK_STAT = 2, }; struct table_device { struct list_head list; refcount_t count; struct dm_dev dm_dev; }; struct dm_dev_internal { struct list_head list; refcount_t count; struct dm_dev *dm_dev; }; struct clone_info { struct dm_table *map; struct bio *bio; struct dm_io *io; sector_t sector; unsigned int sector_count; bool is_abnormal_io: 1; bool submit_as_polled: 1; }; struct dm_pr { u64 old_key; u64 new_key; u32 flags; bool abort; bool fail_early; int ret; enum pr_type type; struct pr_keys *read_keys; struct pr_held_reservation *rsv; }; enum suspend_mode { PRESUSPEND = 0, PRESUSPEND_UNDO = 1, POSTSUSPEND = 2, }; struct dm_crypto_profile { struct blk_crypto_profile profile; struct mapped_device *md; }; struct dm_arg_set { unsigned int argc; char **argv; }; struct dm_arg { unsigned int min; unsigned int max; char *error; }; struct dm_derive_sw_secret_args { const u8 *eph_key; size_t eph_key_size; u8 *sw_secret; int err; }; struct linear_c { struct dm_dev *dev; sector_t start; }; struct stripe { struct dm_dev *dev; sector_t physical_start; atomic_t error_count; }; struct stripe_c { uint32_t stripes; int stripes_shift; sector_t stripe_width; uint32_t chunk_size; int chunk_size_shift; struct dm_target *ti; struct work_struct trigger_event; struct stripe stripe[0]; }; struct dm_ioctl; typedef int (*ioctl_fn___2)(struct file *, struct dm_ioctl *, size_t); struct dm_ioctl { __u32 version[3]; __u32 data_size; __u32 data_start; __u32 target_count; __s32 open_count; __u32 flags; __u32 event_nr; __u32 padding; __u64 dev; char name[128]; char uuid[129]; char data[7]; }; enum { DM_VERSION_CMD = 0, DM_REMOVE_ALL_CMD = 1, DM_LIST_DEVICES_CMD = 2, DM_DEV_CREATE_CMD = 3, DM_DEV_REMOVE_CMD = 4, DM_DEV_RENAME_CMD = 5, DM_DEV_SUSPEND_CMD = 6, DM_DEV_STATUS_CMD = 7, DM_DEV_WAIT_CMD = 8, DM_TABLE_LOAD_CMD = 9, DM_TABLE_CLEAR_CMD = 10, DM_TABLE_DEPS_CMD = 11, DM_TABLE_STATUS_CMD = 12, DM_LIST_VERSIONS_CMD = 13, DM_TARGET_MSG_CMD = 14, DM_DEV_SET_GEOMETRY_CMD = 15, DM_DEV_ARM_POLL_CMD = 16, DM_GET_TARGET_VERSION_CMD = 17, }; struct hash_cell { struct rb_node name_node; struct rb_node uuid_node; bool name_set; bool uuid_set; char *name; char *uuid; struct mapped_device *md; struct dm_table *new_map; }; struct dm_target_spec { __u64 sector_start; __u64 length; __s32 status; __u32 next; char target_type[16]; }; struct dm_target_msg { __u64 sector; char message[0]; }; struct dm_target_deps { __u32 count; __u32 padding; __u64 dev[0]; }; struct dm_target_versions; struct vers_iter { size_t param_size; struct dm_target_versions *vers; struct dm_target_versions *old_vers; char *end; uint32_t flags; }; struct dm_target_versions { __u32 next; __u32 version[3]; char name[0]; }; struct dm_file { volatile unsigned int global_event_nr; }; struct dm_name_list { __u64 dev; __u32 next; char name[0]; }; enum dm_io_mem_type { DM_IO_PAGE_LIST = 0, DM_IO_BIO = 1, DM_IO_VMA = 2, DM_IO_KMEM = 3, }; struct page_list; struct dm_io_memory { enum dm_io_mem_type type; unsigned int offset; union { struct page_list *pl; struct bio *bio; void *vma; void *addr; } ptr; }; typedef void (*io_notify_fn)(unsigned long, void *); struct dm_io_notify { io_notify_fn fn; void *context; }; struct dm_io_client; struct dm_io_request { blk_opf_t bi_opf; struct dm_io_memory mem; struct dm_io_notify notify; struct dm_io_client *client; }; struct page_list { struct page_list *next; struct page *page; }; struct dm_io_client { mempool_t pool; struct bio_set bios; }; struct dpages { void (*get_page)(struct dpages *, struct page **, unsigned long *, unsigned int *); void (*next_page)(struct dpages *); union { unsigned int context_u; struct bvec_iter context_bi; }; void *context_ptr; void *vma_invalidate_address; unsigned long vma_invalidate_size; }; struct dm_io_region { struct block_device *bdev; sector_t sector; sector_t count; }; struct io { unsigned long error_bits; atomic_t count; struct dm_io_client *client; io_notify_fn callback; void *context; void *vma_invalidate_address; unsigned long vma_invalidate_size; long: 64; }; struct sync_io { unsigned long error_bits; struct completion wait; }; struct dm_kcopyd_throttle; struct dm_kcopyd_client { struct page_list *pages; unsigned int nr_reserved_pages; unsigned int nr_free_pages; unsigned int sub_job_size; struct dm_io_client *io_client; wait_queue_head_t destroyq; mempool_t job_pool; struct workqueue_struct *kcopyd_wq; struct work_struct kcopyd_work; struct dm_kcopyd_throttle *throttle; atomic_t nr_jobs; spinlock_t job_lock; struct list_head callback_jobs; struct list_head complete_jobs; struct list_head io_jobs; struct list_head pages_jobs; }; struct dm_kcopyd_throttle { unsigned int throttle; unsigned int num_io_jobs; unsigned int io_period; unsigned int total_period; unsigned int last_jiffies; }; typedef void (*dm_kcopyd_notify_fn)(int, unsigned long, void *); struct kcopyd_job { struct dm_kcopyd_client *kc; struct list_head list; unsigned int flags; int read_err; unsigned long write_err; enum req_op op; struct dm_io_region source; unsigned int num_dests; struct dm_io_region dests[8]; struct page_list *pages; dm_kcopyd_notify_fn fn; void *context; struct mutex lock; atomic_t sub_jobs; sector_t progress; sector_t write_offset; struct kcopyd_job *master_job; }; struct dm_sysfs_attr { struct attribute attr; ssize_t (*show)(struct mapped_device *, char *); ssize_t (*store)(struct mapped_device *, const char *, size_t); }; struct dm_stat_percpu { unsigned long long sectors[2]; unsigned long long ios[2]; unsigned long long merges[2]; unsigned long long ticks[2]; unsigned long long io_ticks[2]; unsigned long long io_ticks_total; unsigned long long time_in_queue; unsigned long long *histogram; }; struct dm_stat_shared { atomic_t in_flight[2]; unsigned long long stamp; struct dm_stat_percpu tmp; }; struct dm_stat { struct list_head list_entry; int id; unsigned int stat_flags; size_t n_entries; sector_t start; sector_t end; sector_t step; unsigned int n_histogram_entries; unsigned long long *histogram_boundaries; const char *program_id; const char *aux_data; struct callback_head callback_head; size_t shared_alloc_size; size_t percpu_alloc_size; size_t histogram_alloc_size; struct dm_stat_percpu *stat_percpu[32]; struct dm_stat_shared stat_shared[0]; }; struct dm_rq_target_io; struct dm_rq_clone_bio_info { struct bio *orig; struct dm_rq_target_io *tio; struct bio clone; }; struct dm_rq_target_io { struct mapped_device *md; struct dm_target *ti; struct request *orig; struct request *clone; struct kthread_work work; blk_status_t error; union map_info___2 info; struct dm_stats_aux stats_aux; unsigned long duration_jiffies; unsigned int n_sectors; unsigned int completed; }; enum new_flag { NF_FRESH = 0, NF_READ = 1, NF_GET = 2, NF_PREFETCH = 3, }; enum evict_result { ER_EVICT = 0, ER_DONT_EVICT = 1, ER_STOP = 2, }; enum data_mode { DATA_MODE_SLAB = 0, DATA_MODE_GET_FREE_PAGES = 1, DATA_MODE_VMALLOC = 2, DATA_MODE_LIMIT = 3, }; enum it_action { IT_NEXT = 0, IT_COMPLETE = 1, }; struct lru_entry { struct list_head list; atomic_t referenced; }; struct dm_bufio_client; struct dm_buffer { struct rb_node node; sector_t block; void *data; unsigned char data_mode; atomic_t hold_count; unsigned long last_accessed; unsigned long state; struct lru_entry lru; unsigned char list_mode; blk_status_t read_error; blk_status_t write_error; unsigned int dirty_start; unsigned int dirty_end; unsigned int write_start; unsigned int write_end; struct list_head write_list; struct dm_bufio_client *c; void (*end_io)(struct dm_buffer *, blk_status_t); }; struct lru { struct list_head *cursor; unsigned long count; struct list_head iterators; }; struct buffer_tree { union { struct rw_semaphore lock; rwlock_t spinlock; } u; struct rb_root root; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct dm_buffer_cache { struct lru lru[2]; unsigned int num_locks; bool no_sleep; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct buffer_tree trees[0]; }; struct dm_bufio_client { struct block_device *bdev; unsigned int block_size; s8 sectors_per_block_bits; bool no_sleep; struct mutex lock; spinlock_t spinlock; int async_write_error; void (*alloc_callback)(struct dm_buffer *); void (*write_callback)(struct dm_buffer *); struct kmem_cache *slab_buffer; struct kmem_cache *slab_cache; struct dm_io_client *dm_io; struct list_head reserved_buffers; unsigned int need_reserved_buffers; unsigned int minimum_buffers; sector_t start; struct shrinker shrinker; struct work_struct shrink_work; atomic_long_t need_shrink; wait_queue_head_t free_buffer_wait; struct list_head client_list; unsigned long oldest_buffer; long: 64; struct dm_buffer_cache cache; }; struct lru_iter { struct lru *lru; struct list_head list; struct lru_entry *stop; struct lru_entry *e; }; struct lock_history { struct dm_buffer_cache *cache; bool write; unsigned int previous; unsigned int no_previous; }; typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *); struct evict_wrapper { struct lock_history *lh; b_predicate pred; void *context; }; typedef bool (*iter_predicate)(struct lru_entry *, void *); typedef void (*b_release)(struct dm_buffer *); typedef enum it_action (*iter_fn)(struct dm_buffer *, void *); struct evict_params { gfp_t gfp; unsigned long age_hz; unsigned long last_accessed; }; struct write_context { int no_wait; struct list_head *write_list; }; typedef enum evict_result (*le_predicate)(struct lru_entry *, void *); struct crypt_config; struct dm_crypt_request; struct crypt_iv_operations { int (*ctr)(struct crypt_config *, struct dm_target *, const char *); void (*dtr)(struct crypt_config *); int (*init)(struct crypt_config *); int (*wipe)(struct crypt_config *); int (*generator)(struct crypt_config *, u8 *, struct dm_crypt_request *); int (*post)(struct crypt_config *, u8 *, struct dm_crypt_request *); }; struct iv_benbi_private { int shift; }; struct iv_lmk_private { struct crypto_shash *hash_tfm; u8 *seed; }; struct iv_tcw_private { struct crypto_shash *crc32_tfm; u8 *iv_seed; u8 *whitening; }; struct iv_elephant_private { struct crypto_skcipher *tfm; }; struct crypt_config { struct dm_dev *dev; sector_t start; struct percpu_counter n_allocated_pages; struct workqueue_struct *io_queue; struct workqueue_struct *crypt_queue; spinlock_t write_thread_lock; struct task_struct *write_thread; struct rb_root write_tree; char *cipher_string; char *cipher_auth; char *key_string; const struct crypt_iv_operations *iv_gen_ops; union { struct iv_benbi_private benbi; struct iv_lmk_private lmk; struct iv_tcw_private tcw; struct iv_elephant_private elephant; } iv_gen_private; u64 iv_offset; unsigned int iv_size; unsigned short sector_size; unsigned char sector_shift; union { struct crypto_skcipher **tfms; struct crypto_aead **tfms_aead; } cipher_tfm; unsigned int tfms_count; unsigned long cipher_flags; unsigned int dmreq_start; unsigned int per_bio_data_size; unsigned long flags; unsigned int key_size; unsigned int key_parts; unsigned int key_extra_size; unsigned int key_mac_size; unsigned int integrity_tag_size; unsigned int integrity_iv_size; unsigned int on_disk_tag_size; unsigned int tag_pool_max_sectors; mempool_t tag_pool; mempool_t req_pool; mempool_t page_pool; struct bio_set bs; struct mutex bio_alloc_lock; u8 *authenc_key; u8 key[0]; }; struct convert_context; struct dm_crypt_request { struct convert_context *ctx; struct scatterlist sg_in[4]; struct scatterlist sg_out[4]; u64 iv_sector; }; struct convert_context { struct completion restart; struct bio *bio_in; struct bvec_iter iter_in; struct bio *bio_out; struct bvec_iter iter_out; atomic_t cc_pending; u64 cc_sector; union { struct skcipher_request *req; struct aead_request *req_aead; } r; bool aead_recheck; bool aead_failed; }; enum flags { DM_CRYPT_SUSPENDED = 0, DM_CRYPT_KEY_VALID = 1, DM_CRYPT_SAME_CPU = 2, DM_CRYPT_NO_OFFLOAD = 3, DM_CRYPT_NO_READ_WORKQUEUE = 4, DM_CRYPT_NO_WRITE_WORKQUEUE = 5, DM_CRYPT_WRITE_INLINE = 6, }; enum cipher_flags { CRYPT_MODE_INTEGRITY_AEAD = 0, CRYPT_IV_LARGE_SECTORS = 1, CRYPT_ENCRYPT_PREPROCESS = 2, }; struct dm_crypt_io { struct crypt_config *cc; struct bio *base_bio; u8 *integrity_metadata; bool integrity_metadata_from_pool: 1; struct work_struct work; struct convert_context ctx; atomic_t io_pending; blk_status_t error; sector_t sector; struct bvec_iter saved_bi_iter; struct rb_node rb_node; }; struct dm_default_key_cipher { const char *name; enum blk_crypto_mode_num mode_num; int key_size; }; struct default_key_c { struct dm_dev *dev; sector_t start; const char *cipher_string; u64 iv_offset; unsigned int sector_size; unsigned int sector_bits; struct blk_crypto_key key; enum blk_crypto_key_type key_type; u64 max_dun; }; struct dm_exception_table { uint32_t hash_mask; unsigned int hash_shift; struct hlist_bl_head *table; }; typedef sector_t chunk_t; struct dm_exception_store; struct dm_snapshot { struct rw_semaphore lock; struct dm_dev *origin; struct dm_dev *cow; struct dm_target *ti; struct list_head list; int valid; int snapshot_overflowed; int active; atomic_t pending_exceptions_count; spinlock_t pe_allocation_lock; sector_t exception_start_sequence; sector_t exception_complete_sequence; struct rb_root out_of_order_tree; mempool_t pending_pool; struct dm_exception_table pending; struct dm_exception_table complete; spinlock_t pe_lock; spinlock_t tracked_chunk_lock; struct hlist_head tracked_chunk_hash[16]; struct dm_exception_store *store; unsigned int in_progress; struct wait_queue_head in_progress_wait; struct dm_kcopyd_client *kcopyd_client; unsigned long state_bits; chunk_t first_merging_chunk; int num_merging_chunks; bool merge_failed: 1; bool discard_zeroes_cow: 1; bool discard_passdown_origin: 1; struct bio_list bios_queued_during_merge; }; struct dm_exception_store_type; struct dm_exception_store { struct dm_exception_store_type *type; struct dm_snapshot *snap; unsigned int chunk_size; unsigned int chunk_mask; unsigned int chunk_shift; void *context; bool userspace_supports_overflow; }; struct dm_exception; struct dm_exception_store_type { const char *name; struct module *module; int (*ctr)(struct dm_exception_store *, char *); void (*dtr)(struct dm_exception_store *); int (*read_metadata)(struct dm_exception_store *, int (*)(void *, chunk_t, chunk_t), void *); int (*prepare_exception)(struct dm_exception_store *, struct dm_exception *); void (*commit_exception)(struct dm_exception_store *, struct dm_exception *, int, void (*)(void *, int), void *); int (*prepare_merge)(struct dm_exception_store *, chunk_t *, chunk_t *); int (*commit_merge)(struct dm_exception_store *, int); void (*drop_snapshot)(struct dm_exception_store *); unsigned int (*status)(struct dm_exception_store *, status_type_t, char *, unsigned int); void (*usage)(struct dm_exception_store *, sector_t *, sector_t *, sector_t *); struct list_head list; }; struct dm_exception { struct hlist_bl_node hash_list; chunk_t old_chunk; chunk_t new_chunk; }; struct origin { struct block_device *bdev; struct list_head hash_list; struct list_head snapshots; }; struct dm_snap_pending_exception { struct dm_exception e; struct bio_list origin_bios; struct bio_list snapshot_bios; struct dm_snapshot *snap; int started; int copy_error; sector_t exception_sequence; struct rb_node out_of_order_node; struct bio *full_bio; bio_end_io_t *full_bio_end_io; }; struct dm_snap_tracked_chunk { struct hlist_node node; chunk_t chunk; }; struct dm_origin { struct dm_dev *dev; struct dm_target *ti; unsigned int split_boundary; struct list_head hash_list; }; struct dm_exception_table_lock { struct hlist_bl_head *complete_slot; struct hlist_bl_head *pending_slot; }; struct transient_c { sector_t next_free; }; struct mdata_req { struct dm_io_region *where; struct dm_io_request *io_req; struct work_struct work; int result; }; struct disk_exception { __le64 old_chunk; __le64 new_chunk; }; struct commit_callback; struct pstore { struct dm_exception_store *store; int version; int valid; uint32_t exceptions_per_area; void *area; void *zero_area; void *header_area; chunk_t current_area; chunk_t next_free; uint32_t current_committed; atomic_t pending_count; uint32_t callback_count; struct commit_callback *callbacks; struct dm_io_client *io_client; struct workqueue_struct *metadata_wq; }; struct commit_callback { void (*callback)(void *, int); void *context; }; struct disk_header { __le32 magic; __le32 valid; __le32 version; __le32 chunk_size; }; struct core_exception { uint64_t old_chunk; uint64_t new_chunk; }; enum verity_mode { DM_VERITY_MODE_EIO = 0, DM_VERITY_MODE_LOGGING = 1, DM_VERITY_MODE_RESTART = 2, DM_VERITY_MODE_PANIC = 3, }; enum verity_block_type { DM_VERITY_BLOCK_TYPE_DATA = 0, DM_VERITY_BLOCK_TYPE_METADATA = 1, }; struct dm_verity_fec_io { struct rs_control *rs; int erasures[253]; u8 *bufs[256]; unsigned int nbufs; u8 *output; unsigned int level; }; struct dm_verity_fec; struct dm_verity { struct dm_dev *data_dev; struct dm_dev *hash_dev; struct dm_target *ti; struct dm_bufio_client *bufio; char *alg_name; struct crypto_ahash *ahash_tfm; struct crypto_shash *shash_tfm; u8 *root_digest; u8 *salt; u8 *initial_hashstate; u8 *zero_digest; unsigned int salt_size; sector_t data_start; sector_t hash_start; sector_t data_blocks; sector_t hash_blocks; unsigned char data_dev_block_bits; unsigned char hash_dev_block_bits; unsigned char hash_per_block_bits; unsigned char levels; unsigned char version; bool hash_failed: 1; bool use_tasklet: 1; unsigned char mb_max_msgs; unsigned int digest_size; unsigned int hash_reqsize; enum verity_mode mode; unsigned int corrupted_errs; struct workqueue_struct *verify_wq; sector_t hash_level_block[63]; struct dm_verity_fec *fec; unsigned long *validated_blocks; char *signature_key_desc; struct dm_io_client *io; mempool_t recheck_pool; }; struct dm_verity_fec { struct dm_dev *dev; struct dm_bufio_client *data_bufio; struct dm_bufio_client *bufio; size_t io_size; sector_t start; sector_t blocks; sector_t rounds; sector_t hash_blocks; unsigned char roots; unsigned char rsn; mempool_t rs_pool; mempool_t prealloc_pool; mempool_t extra_pool; mempool_t output_pool; struct kmem_cache *cache; }; struct pending_block { void *data; sector_t blkno; u8 want_digest[64]; u8 real_digest[64]; }; struct dm_verity_io { struct dm_verity *v; bio_end_io_t *orig_bi_end_io; struct bvec_iter iter; sector_t block; unsigned int n_blocks; bool in_tasklet; struct work_struct work; u8 tmp_digest[64]; int num_pending; struct pending_block pending_blocks[2]; }; struct dm_verity_prefetch_work { struct work_struct work; struct dm_verity *v; unsigned short ioprio; sector_t block; unsigned int n_blocks; }; struct buffer_aux { int hash_verified; }; struct dm_verity_sig_opts { unsigned int sig_size; u8 *sig; }; struct dm_user_message { __u64 seq; __u64 type; __u64 flags; __u64 sector; __u64 len; __u8 buf[0]; }; struct target; struct message { struct dm_user_message msg; struct bio *bio; size_t posn_to_user; size_t total_to_user; size_t posn_from_user; size_t total_from_user; struct list_head from_user; struct list_head to_user; u64 return_type; u64 return_flags; struct delayed_work work; bool delayed; struct target *t; }; struct target { struct mutex lock; struct wait_queue_head wq; mempool_t message_pool; u64 next_seq_to_map; u64 next_seq_to_user; struct list_head to_user; struct miscdevice miscdev; struct kref references; int dm_destroyed; bool daemon_terminated; }; struct channel { struct target *target; struct mutex lock; struct message *cur_to_user; struct message *cur_from_user; ssize_t to_user_error; ssize_t from_user_error; struct list_head from_user; struct message scratch_message_from_user; }; enum scrub_type { SCRUB_UNKNOWN = 0, SCRUB_NONE = 1, SCRUB_SW_PROG = 2, SCRUB_SW_SRC = 3, SCRUB_SW_PROG_SRC = 4, SCRUB_SW_TUNABLE = 5, SCRUB_HW_PROG = 6, SCRUB_HW_SRC = 7, SCRUB_HW_PROG_SRC = 8, SCRUB_HW_TUNABLE = 9, }; enum edac_mc_layer_type { EDAC_MC_LAYER_BRANCH = 0, EDAC_MC_LAYER_CHANNEL = 1, EDAC_MC_LAYER_SLOT = 2, EDAC_MC_LAYER_CHIP_SELECT = 3, EDAC_MC_LAYER_ALL_MEM = 4, }; enum hw_event_mc_err_type { HW_EVENT_ERR_CORRECTED = 0, HW_EVENT_ERR_UNCORRECTED = 1, HW_EVENT_ERR_DEFERRED = 2, HW_EVENT_ERR_FATAL = 3, HW_EVENT_ERR_INFO = 4, }; enum dev_type { DEV_UNKNOWN = 0, DEV_X1 = 1, DEV_X2 = 2, DEV_X4 = 3, DEV_X8 = 4, DEV_X16 = 5, DEV_X32 = 6, DEV_X64 = 7, }; enum mem_type___2 { MEM_EMPTY = 0, MEM_RESERVED = 1, MEM_UNKNOWN = 2, MEM_FPM = 3, MEM_EDO = 4, MEM_BEDO = 5, MEM_SDR = 6, MEM_RDR = 7, MEM_DDR = 8, MEM_RDDR = 9, MEM_RMBS = 10, MEM_DDR2 = 11, MEM_FB_DDR2 = 12, MEM_RDDR2 = 13, MEM_XDR = 14, MEM_DDR3 = 15, MEM_RDDR3 = 16, MEM_LRDDR3 = 17, MEM_LPDDR3 = 18, MEM_DDR4 = 19, MEM_RDDR4 = 20, MEM_LRDDR4 = 21, MEM_LPDDR4 = 22, MEM_DDR5 = 23, MEM_RDDR5 = 24, MEM_LRDDR5 = 25, MEM_NVDIMM = 26, MEM_WIO2 = 27, MEM_HBM2 = 28, }; enum edac_type { EDAC_UNKNOWN = 0, EDAC_NONE = 1, EDAC_RESERVED = 2, EDAC_PARITY = 3, EDAC_EC = 4, EDAC_SECDED = 5, EDAC_S2ECD2ED = 6, EDAC_S4ECD4ED = 7, EDAC_S8ECD8ED = 8, EDAC_S16ECD16ED = 9, }; struct mcidev_sysfs_attribute; struct edac_raw_error_desc { char location[256]; char label[296]; long grain; u16 error_count; enum hw_event_mc_err_type type; int top_layer; int mid_layer; int low_layer; unsigned long page_frame_number; unsigned long offset_in_page; unsigned long syndrome; const char *msg; const char *other_detail; }; struct csrow_info; struct edac_mc_layer; struct dimm_info; struct mem_ctl_info { struct device dev; struct bus_type *bus; struct list_head link; struct module *owner; unsigned long mtype_cap; unsigned long edac_ctl_cap; unsigned long edac_cap; unsigned long scrub_cap; enum scrub_type scrub_mode; int (*set_sdram_scrub_rate)(struct mem_ctl_info *, u32); int (*get_sdram_scrub_rate)(struct mem_ctl_info *); void (*edac_check)(struct mem_ctl_info *); unsigned long (*ctl_page_to_phys)(struct mem_ctl_info *, unsigned long); int mc_idx; struct csrow_info **csrows; unsigned int nr_csrows; unsigned int num_cschannel; unsigned int n_layers; struct edac_mc_layer *layers; bool csbased; unsigned int tot_dimms; struct dimm_info **dimms; struct device *pdev; const char *mod_name; const char *ctl_name; const char *dev_name; void *pvt_info; unsigned long start_time; u32 ce_noinfo_count; u32 ue_noinfo_count; u32 ue_mc; u32 ce_mc; struct completion complete; const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; struct delayed_work work; struct edac_raw_error_desc error_desc; int op_state; struct dentry *debugfs; u8 fake_inject_layer[3]; bool fake_inject_ue; u16 fake_inject_count; }; struct rank_info; struct csrow_info { struct device dev; unsigned long first_page; unsigned long last_page; unsigned long page_mask; int csrow_idx; u32 ue_count; u32 ce_count; struct mem_ctl_info *mci; u32 nr_channels; struct rank_info **channels; }; struct rank_info { int chan_idx; struct csrow_info *csrow; struct dimm_info *dimm; u32 ce_count; }; struct dimm_info { struct device dev; char label[32]; unsigned int location[3]; struct mem_ctl_info *mci; unsigned int idx; u32 grain; enum dev_type dtype; enum mem_type___2 mtype; enum edac_type edac_mode; u32 nr_pages; unsigned int csrow; unsigned int cschannel; u16 smbios_handle; u32 ce_count; u32 ue_count; }; struct edac_mc_layer { enum edac_mc_layer_type type; unsigned int size; bool is_virt_csrow; }; struct edac_device_counter { u32 ue_count; u32 ce_count; }; struct edac_dev_sysfs_attribute; struct edac_device_instance; struct edac_device_block; struct edac_dev_sysfs_block_attribute; struct edac_device_ctl_info { struct list_head link; struct module *owner; int dev_idx; int log_ue; int log_ce; int panic_on_ue; unsigned int poll_msec; unsigned long delay; struct edac_dev_sysfs_attribute *sysfs_attributes; struct bus_type *edac_subsys; int op_state; struct delayed_work work; void (*edac_check)(struct edac_device_ctl_info *); struct device *dev; const char *mod_name; const char *ctl_name; const char *dev_name; void *pvt_info; unsigned long start_time; struct completion removal_complete; char name[32]; u32 nr_instances; struct edac_device_instance *instances; struct edac_device_block *blocks; struct edac_dev_sysfs_block_attribute *attribs; struct edac_device_counter counters; struct kobject kobj; }; struct edac_dev_sysfs_attribute { struct attribute attr; ssize_t (*show)(struct edac_device_ctl_info *, char *); ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t); }; struct edac_device_instance { struct edac_device_ctl_info *ctl; char name[35]; struct edac_device_counter counters; u32 nr_blocks; struct edac_device_block *blocks; struct kobject kobj; }; struct edac_device_block { struct edac_device_instance *instance; char name[32]; struct edac_device_counter counters; int nr_attribs; struct edac_dev_sysfs_block_attribute *block_attributes; struct kobject kobj; }; struct edac_dev_sysfs_block_attribute { struct attribute attr; ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); struct edac_device_block *block; unsigned int value; }; struct dev_ch_attribute { struct device_attribute attr; unsigned int channel; }; struct ctl_info_attribute { struct attribute attr; ssize_t (*show)(struct edac_device_ctl_info *, char *); ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t); }; struct instance_attribute { struct attribute attr; ssize_t (*show)(struct edac_device_instance *, char *); ssize_t (*store)(struct edac_device_instance *, const char *, size_t); }; struct edac_pci_counter { atomic_t pe_count; atomic_t npe_count; }; struct edac_pci_ctl_info { struct list_head link; int pci_idx; struct bus_type *edac_subsys; int op_state; struct delayed_work work; void (*edac_check)(struct edac_pci_ctl_info *); struct device *dev; const char *mod_name; const char *ctl_name; const char *dev_name; void *pvt_info; unsigned long start_time; struct completion complete; char name[32]; struct edac_pci_counter counters; struct kobject kobj; }; struct edac_pci_gen_data { int edac_idx; }; struct edac_pci_dev_attribute { struct attribute attr; void *value; ssize_t (*show)(void *, char *); ssize_t (*store)(void *, const char *, size_t); }; struct instance_attribute___2 { struct attribute attr; ssize_t (*show)(struct edac_pci_ctl_info *, char *); ssize_t (*store)(struct edac_pci_ctl_info *, const char *, size_t); }; typedef void (*pci_parity_check_fn_t)(struct pci_dev *); enum opp_table_access { OPP_TABLE_ACCESS_UNKNOWN = 0, OPP_TABLE_ACCESS_EXCLUSIVE = 1, OPP_TABLE_ACCESS_SHARED = 2, }; enum dev_pm_opp_event { OPP_EVENT_ADD = 0, OPP_EVENT_REMOVE = 1, OPP_EVENT_ENABLE = 2, OPP_EVENT_DISABLE = 3, OPP_EVENT_ADJUST_VOLTAGE = 4, }; struct dev_pm_opp_supply; struct dev_pm_opp_icc_bw; struct dev_pm_opp { struct list_head node; struct kref kref; bool available; bool dynamic; bool turbo; bool suspend; bool removed; unsigned long *rates; unsigned int level; struct dev_pm_opp_supply *supplies; struct dev_pm_opp_icc_bw *bandwidth; unsigned long clock_latency_ns; struct dev_pm_opp **required_opps; struct opp_table *opp_table; struct device_node *np; struct dentry *dentry; const char *of_name; }; struct dev_pm_opp_supply { unsigned long u_volt; unsigned long u_volt_min; unsigned long u_volt_max; unsigned long u_amp; unsigned long u_watt; }; struct dev_pm_opp_icc_bw { u32 avg; u32 peak; }; struct opp_table { struct list_head node; struct list_head lazy; struct blocking_notifier_head head; struct list_head dev_list; struct list_head opp_list; struct kref kref; struct mutex lock; struct device_node *np; unsigned long clock_latency_ns_max; unsigned int voltage_tolerance_v1; unsigned int parsed_static_opps; enum opp_table_access shared_opp; unsigned long rate_clk_single; struct dev_pm_opp *current_opp; struct dev_pm_opp *suspend_opp; struct mutex genpd_virt_dev_lock; struct device **genpd_virt_devs; struct opp_table **required_opp_tables; unsigned int required_opp_count; unsigned int *supported_hw; unsigned int supported_hw_count; const char *prop_name; config_clks_t config_clks; struct clk **clks; struct clk *clk; int clk_count; config_regulators_t config_regulators; struct regulator **regulators; int regulator_count; struct icc_path **paths; unsigned int path_count; bool enabled; bool is_genpd; int (*set_required_opps)(struct device *, struct opp_table *, struct dev_pm_opp *, bool); struct dentry *dentry; char dentry_name[255]; }; struct opp_device { struct list_head node; const struct device *dev; struct dentry *dentry; }; struct opp_config_data { struct opp_table *opp_table; unsigned int flags; }; struct cpufreq_policy_data; struct freq_attr; struct cpufreq_driver { char name[16]; u16 flags; void *driver_data; int (*init)(struct cpufreq_policy *); int (*verify)(struct cpufreq_policy_data *); int (*setpolicy)(struct cpufreq_policy *); int (*target)(struct cpufreq_policy *, unsigned int, unsigned int); int (*target_index)(struct cpufreq_policy *, unsigned int); unsigned int (*fast_switch)(struct cpufreq_policy *, unsigned int); void (*adjust_perf)(unsigned int, unsigned long, unsigned long, unsigned long); unsigned int (*get_intermediate)(struct cpufreq_policy *, unsigned int); int (*target_intermediate)(struct cpufreq_policy *, unsigned int); unsigned int (*get)(unsigned int); void (*update_limits)(unsigned int); int (*bios_limit)(int, unsigned int *); int (*online)(struct cpufreq_policy *); int (*offline)(struct cpufreq_policy *); int (*exit)(struct cpufreq_policy *); int (*suspend)(struct cpufreq_policy *); int (*resume)(struct cpufreq_policy *); void (*ready)(struct cpufreq_policy *); struct freq_attr **attr; bool boost_enabled; int (*set_boost)(struct cpufreq_policy *, int); void (*register_em)(struct cpufreq_policy *); }; struct cpufreq_policy_data { struct cpufreq_cpuinfo cpuinfo; struct cpufreq_frequency_table *freq_table; unsigned int cpu; unsigned int min; unsigned int max; }; struct freq_attr { struct attribute attr; ssize_t (*show)(struct cpufreq_policy *, char *); ssize_t (*store)(struct cpufreq_policy *, const char *, size_t); }; struct cpufreq_freqs { struct cpufreq_policy *policy; unsigned int old; unsigned int new; u8 flags; }; struct cpufreq_stats { unsigned int total_trans; unsigned long long last_time; unsigned int max_state; unsigned int state_num; unsigned int last_index; u64 *time_in_state; unsigned int *freq_table; unsigned int *trans_table; unsigned int reset_pending; unsigned long long reset_time; }; struct cpu_freqs { unsigned int offset; unsigned int max_state; unsigned int last_index; unsigned int freq_table[0]; }; struct dbs_data; struct policy_dbs_info; struct dbs_governor { struct cpufreq_governor gov; struct kobj_type kobj_type; struct dbs_data *gdbs_data; unsigned int (*gov_dbs_update)(struct cpufreq_policy *); struct policy_dbs_info * (*alloc)(); void (*free)(struct policy_dbs_info *); int (*init)(struct dbs_data *); void (*exit)(struct dbs_data *); void (*start)(struct cpufreq_policy *); }; struct dbs_data { struct gov_attr_set attr_set; struct dbs_governor *gov; void *tuners; unsigned int ignore_nice_load; unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; unsigned int io_is_busy; }; struct policy_dbs_info { struct cpufreq_policy *policy; struct mutex update_mutex; u64 last_sample_time; s64 sample_delay_ns; atomic_t work_count; struct irq_work irq_work; struct work_struct work; struct dbs_data *dbs_data; struct list_head list; unsigned int rate_mult; unsigned int idle_periods; bool is_shared; bool work_in_progress; }; struct cs_policy_dbs_info { struct policy_dbs_info policy_dbs; unsigned int down_skip; unsigned int requested_freq; }; struct cs_dbs_tuners { unsigned int down_threshold; unsigned int freq_step; }; struct cpu_dbs_info { u64 prev_cpu_idle; u64 prev_update_time; u64 prev_cpu_nice; unsigned int prev_load; struct update_util_data update_util; struct policy_dbs_info *policy_dbs; }; struct scmi_data { int domain_id; int nr_opp; struct device *cpu_dev; cpumask_var_t opp_shared_cpus; }; struct scpi_data { struct clk *clk; struct device *cpu_dev; }; struct cpuidle_governor { char name[16]; struct list_head governor_list; unsigned int rating; int (*enable)(struct cpuidle_driver *, struct cpuidle_device *); void (*disable)(struct cpuidle_driver *, struct cpuidle_device *); int (*select)(struct cpuidle_driver *, struct cpuidle_device *, bool *); void (*reflect)(struct cpuidle_device *, int); }; struct cpuidle_state_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_state *, struct cpuidle_state_usage *, char *); ssize_t (*store)(struct cpuidle_state *, struct cpuidle_state_usage *, const char *, size_t); }; struct cpuidle_state_kobj { struct cpuidle_state *state; struct cpuidle_state_usage *state_usage; struct completion kobj_unregister; struct kobject kobj; struct cpuidle_device *device; }; struct cpuidle_driver_kobj { struct cpuidle_driver *drv; struct completion kobj_unregister; struct kobject kobj; }; struct cpuidle_device_kobj { struct cpuidle_device *dev; struct completion kobj_unregister; struct kobject kobj; }; struct cpuidle_driver_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_driver *, char *); ssize_t (*store)(struct cpuidle_driver *, const char *, size_t); }; struct cpuidle_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_device *, char *); ssize_t (*store)(struct cpuidle_device *, const char *, size_t); }; struct menu_device { int needs_update; int tick_wakeup; u64 next_timer_ns; unsigned int bucket; unsigned int correction_factor[12]; unsigned int intervals[8]; int interval_ptr; }; struct teo_bin { unsigned int intercepts; unsigned int hits; unsigned int recent; }; struct teo_cpu { s64 time_span_ns; s64 sleep_length_ns; struct teo_bin state_bins[10]; unsigned int total; int next_recent_idx; int recent_idx[9]; unsigned int tick_hits; unsigned long util_threshold; }; struct psci_cpuidle_data { u32 *psci_states; struct device *dev; }; struct psci_pd_provider { struct list_head link; struct device_node *node; }; struct mmc_host; struct mmc_request; typedef void (*btf_trace_mmc_request_start)(void *, struct mmc_host *, struct mmc_request *); typedef unsigned int mmc_pm_flag_t; struct mmc_ios { unsigned int clock; unsigned short vdd; unsigned int power_delay_ms; unsigned char bus_mode; unsigned char chip_select; unsigned char power_mode; unsigned char bus_width; unsigned char timing; unsigned char signal_voltage; unsigned char drv_type; bool enhanced_strobe; }; struct mmc_ctx { struct task_struct *task; }; struct mmc_slot { int cd_irq; bool cd_wake_enabled; void *handler_priv; }; struct mmc_supply { struct regulator *vmmc; struct regulator *vqmmc; }; struct mmc_host_ops; struct mmc_pwrseq; struct mmc_card; struct mmc_bus_ops; struct mmc_cqe_ops; struct mmc_host { struct device *parent; struct device class_dev; int index; const struct mmc_host_ops *ops; struct mmc_pwrseq *pwrseq; unsigned int f_min; unsigned int f_max; unsigned int f_init; u32 ocr_avail; u32 ocr_avail_sdio; u32 ocr_avail_sd; u32 ocr_avail_mmc; struct wakeup_source *ws; u32 max_current_330; u32 max_current_300; u32 max_current_180; u32 caps; u32 caps2; int fixed_drv_type; mmc_pm_flag_t pm_caps; unsigned int max_seg_size; unsigned short max_segs; unsigned short unused; unsigned int max_req_size; unsigned int max_blk_size; unsigned int max_blk_count; unsigned int max_busy_timeout; spinlock_t lock; struct mmc_ios ios; unsigned int use_spi_crc: 1; unsigned int claimed: 1; unsigned int doing_init_tune: 1; unsigned int can_retune: 1; unsigned int doing_retune: 1; unsigned int retune_now: 1; unsigned int retune_paused: 1; unsigned int retune_crc_disable: 1; unsigned int can_dma_map_merge: 1; unsigned int vqmmc_enabled: 1; int rescan_disable; int rescan_entered; int need_retune; int hold_retune; unsigned int retune_period; struct timer_list retune_timer; bool trigger_card_event; struct mmc_card *card; wait_queue_head_t wq; struct mmc_ctx *claimer; int claim_cnt; struct mmc_ctx default_ctx; struct delayed_work detect; int detect_change; struct mmc_slot slot; const struct mmc_bus_ops *bus_ops; unsigned int sdio_irqs; struct task_struct *sdio_irq_thread; struct work_struct sdio_irq_work; bool sdio_irq_pending; atomic_t sdio_irq_thread_abort; mmc_pm_flag_t pm_flags; struct led_trigger *led; bool regulator_enabled; struct mmc_supply supply; struct dentry *debugfs_root; struct mmc_request *ongoing_mrq; unsigned int actual_clock; unsigned int slotno; int dsr_req; u32 dsr; const struct mmc_cqe_ops *cqe_ops; void *cqe_private; int cqe_qdepth; bool cqe_enabled; bool cqe_on; bool cqe_recovery_reset_always; struct blk_crypto_profile crypto_profile; bool hsq_enabled; int hsq_depth; u32 err_stats[15]; u64 android_kabi_reserved1; u64 android_kabi_reserved2; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; unsigned long private[0]; }; struct mmc_host_ops { void (*post_req)(struct mmc_host *, struct mmc_request *, int); void (*pre_req)(struct mmc_host *, struct mmc_request *); void (*request)(struct mmc_host *, struct mmc_request *); int (*request_atomic)(struct mmc_host *, struct mmc_request *); void (*set_ios)(struct mmc_host *, struct mmc_ios *); int (*get_ro)(struct mmc_host *); int (*get_cd)(struct mmc_host *); void (*enable_sdio_irq)(struct mmc_host *, int); void (*ack_sdio_irq)(struct mmc_host *); void (*init_card)(struct mmc_host *, struct mmc_card *); int (*start_signal_voltage_switch)(struct mmc_host *, struct mmc_ios *); int (*card_busy)(struct mmc_host *); int (*execute_tuning)(struct mmc_host *, u32); int (*prepare_hs400_tuning)(struct mmc_host *, struct mmc_ios *); int (*execute_hs400_tuning)(struct mmc_host *, struct mmc_card *); int (*prepare_sd_hs_tuning)(struct mmc_host *, struct mmc_card *); int (*execute_sd_hs_tuning)(struct mmc_host *, struct mmc_card *); int (*hs400_prepare_ddr)(struct mmc_host *); void (*hs400_downgrade)(struct mmc_host *); void (*hs400_complete)(struct mmc_host *); void (*hs400_enhanced_strobe)(struct mmc_host *, struct mmc_ios *); int (*select_drive_strength)(struct mmc_card *, unsigned int, int, int, int *); void (*card_hw_reset)(struct mmc_host *); void (*card_event)(struct mmc_host *); int (*multi_io_quirk)(struct mmc_card *, unsigned int, int); int (*init_sd_express)(struct mmc_host *, struct mmc_ios *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct mmc_command; struct mmc_data; struct mmc_request { struct mmc_command *sbc; struct mmc_command *cmd; struct mmc_data *data; struct mmc_command *stop; struct completion completion; struct completion cmd_completion; void (*done)(struct mmc_request *); void (*recovery_notifier)(struct mmc_request *); struct mmc_host *host; bool cap_cmd_during_tfr; int tag; const struct bio_crypt_ctx *crypto_ctx; int crypto_key_slot; }; struct mmc_command { u32 opcode; u32 arg; u32 resp[4]; unsigned int flags; unsigned int retries; int error; unsigned int busy_timeout; struct mmc_data *data; struct mmc_request *mrq; }; struct mmc_data { unsigned int timeout_ns; unsigned int timeout_clks; unsigned int blksz; unsigned int blocks; unsigned int blk_addr; int error; unsigned int flags; unsigned int bytes_xfered; struct mmc_command *stop; struct mmc_request *mrq; unsigned int sg_len; int sg_count; struct scatterlist *sg; s32 host_cookie; }; struct mmc_cid { unsigned int manfid; char prod_name[8]; unsigned char prv; unsigned int serial; unsigned short oemid; unsigned short year; unsigned char hwrev; unsigned char fwrev; unsigned char month; }; struct mmc_csd { unsigned char structure; unsigned char mmca_vsn; unsigned short cmdclass; unsigned short taac_clks; unsigned int taac_ns; unsigned int c_size; unsigned int r2w_factor; unsigned int max_dtr; unsigned int erase_size; unsigned int read_blkbits; unsigned int write_blkbits; unsigned int capacity; unsigned int read_partial: 1; unsigned int read_misalign: 1; unsigned int write_partial: 1; unsigned int write_misalign: 1; unsigned int dsr_imp: 1; }; struct mmc_ext_csd { u8 rev; u8 erase_group_def; u8 sec_feature_support; u8 rel_sectors; u8 rel_param; bool enhanced_rpmb_supported; u8 part_config; u8 cache_ctrl; u8 rst_n_function; u8 max_packed_writes; u8 max_packed_reads; u8 packed_event_en; unsigned int part_time; unsigned int sa_timeout; unsigned int generic_cmd6_time; unsigned int power_off_longtime; u8 power_off_notification; unsigned int hs_max_dtr; unsigned int hs200_max_dtr; unsigned int sectors; unsigned int hc_erase_size; unsigned int hc_erase_timeout; unsigned int sec_trim_mult; unsigned int sec_erase_mult; unsigned int trim_timeout; bool partition_setting_completed; unsigned long long enhanced_area_offset; unsigned int enhanced_area_size; unsigned int cache_size; bool hpi_en; bool hpi; unsigned int hpi_cmd; bool bkops; bool man_bkops_en; bool auto_bkops_en; unsigned int data_sector_size; unsigned int data_tag_unit_size; unsigned int boot_ro_lock; bool boot_ro_lockable; bool ffu_capable; bool cmdq_en; bool cmdq_support; unsigned int cmdq_depth; u8 fwrev[8]; u8 raw_exception_status; u8 raw_partition_support; u8 raw_rpmb_size_mult; u8 raw_erased_mem_count; u8 strobe_support; u8 raw_ext_csd_structure; u8 raw_card_type; u8 raw_driver_strength; u8 out_of_int_time; u8 raw_pwr_cl_52_195; u8 raw_pwr_cl_26_195; u8 raw_pwr_cl_52_360; u8 raw_pwr_cl_26_360; u8 raw_s_a_timeout; u8 raw_hc_erase_gap_size; u8 raw_erase_timeout_mult; u8 raw_hc_erase_grp_size; u8 raw_boot_mult; u8 raw_sec_trim_mult; u8 raw_sec_erase_mult; u8 raw_sec_feature_support; u8 raw_trim_mult; u8 raw_pwr_cl_200_195; u8 raw_pwr_cl_200_360; u8 raw_pwr_cl_ddr_52_195; u8 raw_pwr_cl_ddr_52_360; u8 raw_pwr_cl_ddr_200_360; u8 raw_bkops_status; u8 raw_sectors[4]; u8 pre_eol_info; u8 device_life_time_est_typ_a; u8 device_life_time_est_typ_b; unsigned int feature_support; }; struct sd_scr { unsigned char sda_vsn; unsigned char sda_spec3; unsigned char sda_spec4; unsigned char sda_specx; unsigned char bus_widths; unsigned char cmds; }; struct sd_ssr { unsigned int au; unsigned int erase_timeout; unsigned int erase_offset; }; struct sd_switch_caps { unsigned int hs_max_dtr; unsigned int uhs_max_dtr; unsigned int sd3_bus_mode; unsigned int sd3_drv_type; unsigned int sd3_curr_limit; }; struct sd_ext_reg { u8 fno; u8 page; u16 offset; u8 rev; u8 feature_enabled; u8 feature_support; }; struct sdio_cccr { unsigned int sdio_vsn; unsigned int sd_vsn; unsigned int multi_block: 1; unsigned int low_speed: 1; unsigned int wide_bus: 1; unsigned int high_power: 1; unsigned int high_speed: 1; unsigned int disable_cd: 1; unsigned int enable_async_irq: 1; }; struct sdio_cis { unsigned short vendor; unsigned short device; unsigned short blksize; unsigned int max_dtr; }; struct mmc_part { u64 size; unsigned int part_cfg; char name[20]; bool force_ro; unsigned int area_type; u64 android_kabi_reserved1; }; struct sdio_func; struct sdio_func_tuple; struct mmc_card { struct mmc_host *host; struct device dev; u32 ocr; unsigned int rca; unsigned int type; unsigned int state; unsigned int quirks; unsigned int quirk_max_rate; bool written_flag; bool reenable_cmdq; unsigned int erase_size; unsigned int erase_shift; unsigned int pref_erase; unsigned int eg_boundary; unsigned int erase_arg; u8 erased_byte; u32 raw_cid[4]; u32 raw_csd[4]; u32 raw_scr[2]; u32 raw_ssr[16]; struct mmc_cid cid; struct mmc_csd csd; struct mmc_ext_csd ext_csd; struct sd_scr scr; struct sd_ssr ssr; struct sd_switch_caps sw_caps; struct sd_ext_reg ext_power; struct sd_ext_reg ext_perf; unsigned int sdio_funcs; atomic_t sdio_funcs_probed; struct sdio_cccr cccr; struct sdio_cis cis; struct sdio_func *sdio_func[7]; struct sdio_func *sdio_single_irq; u8 major_rev; u8 minor_rev; unsigned int num_info; const char **info; struct sdio_func_tuple *tuples; unsigned int sd_bus_speed; unsigned int mmc_avail_type; unsigned int drive_strength; struct dentry *debugfs_root; struct mmc_part part[7]; unsigned int nr_parts; struct workqueue_struct *complete_wq; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct mmc_pwrseq_ops; struct mmc_pwrseq { const struct mmc_pwrseq_ops *ops; struct device *dev; struct list_head pwrseq_node; struct module *owner; }; struct mmc_pwrseq_ops { void (*pre_power_on)(struct mmc_host *); void (*post_power_on)(struct mmc_host *); void (*power_off)(struct mmc_host *); void (*reset)(struct mmc_host *); }; struct mmc_bus_ops { void (*remove)(struct mmc_host *); void (*detect)(struct mmc_host *); int (*pre_suspend)(struct mmc_host *); int (*suspend)(struct mmc_host *); int (*resume)(struct mmc_host *); int (*runtime_suspend)(struct mmc_host *); int (*runtime_resume)(struct mmc_host *); int (*alive)(struct mmc_host *); int (*shutdown)(struct mmc_host *); int (*hw_reset)(struct mmc_host *); int (*sw_reset)(struct mmc_host *); bool (*cache_enabled)(struct mmc_host *); int (*flush_cache)(struct mmc_host *); }; struct mmc_cqe_ops { int (*cqe_enable)(struct mmc_host *, struct mmc_card *); void (*cqe_disable)(struct mmc_host *); int (*cqe_request)(struct mmc_host *, struct mmc_request *); void (*cqe_post_req)(struct mmc_host *, struct mmc_request *); void (*cqe_off)(struct mmc_host *); int (*cqe_wait_for_idle)(struct mmc_host *); bool (*cqe_timeout)(struct mmc_host *, struct mmc_request *, bool *); void (*cqe_recovery_start)(struct mmc_host *); void (*cqe_recovery_finish)(struct mmc_host *); u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; typedef void (*btf_trace_mmc_request_done)(void *, struct mmc_host *, struct mmc_request *); enum mmc_busy_cmd { MMC_BUSY_CMD6 = 0, MMC_BUSY_ERASE = 1, MMC_BUSY_HPI = 2, MMC_BUSY_EXTR_SINGLE = 3, MMC_BUSY_IO = 4, }; enum mmc_err_stat { MMC_ERR_CMD_TIMEOUT = 0, MMC_ERR_CMD_CRC = 1, MMC_ERR_DAT_TIMEOUT = 2, MMC_ERR_DAT_CRC = 3, MMC_ERR_AUTO_CMD = 4, MMC_ERR_ADMA = 5, MMC_ERR_TUNING = 6, MMC_ERR_CMDQ_RED = 7, MMC_ERR_CMDQ_GCE = 8, MMC_ERR_CMDQ_ICCE = 9, MMC_ERR_REQ_TIMEOUT = 10, MMC_ERR_CMDQ_REQ_TIMEOUT = 11, MMC_ERR_ICE_CFG = 12, MMC_ERR_CTRL_TIMEOUT = 13, MMC_ERR_UNEXPECTED_IRQ = 14, MMC_ERR_MAX = 15, }; struct trace_event_raw_mmc_request_start { struct trace_entry ent; u32 cmd_opcode; u32 cmd_arg; unsigned int cmd_flags; unsigned int cmd_retries; u32 stop_opcode; u32 stop_arg; unsigned int stop_flags; unsigned int stop_retries; u32 sbc_opcode; u32 sbc_arg; unsigned int sbc_flags; unsigned int sbc_retries; unsigned int blocks; unsigned int blk_addr; unsigned int blksz; unsigned int data_flags; int tag; unsigned int can_retune; unsigned int doing_retune; unsigned int retune_now; int need_retune; int hold_retune; unsigned int retune_period; struct mmc_request *mrq; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_mmc_request_done { struct trace_entry ent; u32 cmd_opcode; int cmd_err; u32 cmd_resp[4]; unsigned int cmd_retries; u32 stop_opcode; int stop_err; u32 stop_resp[4]; unsigned int stop_retries; u32 sbc_opcode; int sbc_err; u32 sbc_resp[4]; unsigned int sbc_retries; unsigned int bytes_xfered; int data_err; int tag; unsigned int can_retune; unsigned int doing_retune; unsigned int retune_now; int need_retune; int hold_retune; unsigned int retune_period; struct mmc_request *mrq; u32 __data_loc_name; char __data[0]; }; struct trace_event_data_offsets_mmc_request_start { u32 name; }; struct trace_event_data_offsets_mmc_request_done { u32 name; }; struct mmc_driver { struct device_driver drv; int (*probe)(struct mmc_card *); void (*remove)(struct mmc_card *); void (*shutdown)(struct mmc_card *); }; struct mmc_clk_phase { bool valid; u16 in_deg; u16 out_deg; }; struct mmc_clk_phase_map { struct mmc_clk_phase phase[11]; }; struct mmc_fixup { const char *name; u64 rev_start; u64 rev_end; unsigned int manfid; unsigned short oemid; unsigned short year; unsigned char month; u16 cis_vendor; u16 cis_device; unsigned int ext_csd_rev; const char *of_compatible; void (*vendor_fixup)(struct mmc_card *, int); int data; }; struct mmc_op_cond_busy_data { struct mmc_host *host; u32 ocr; struct mmc_command *cmd; }; struct mmc_busy_data { struct mmc_card *card; bool retry_crc_err; enum mmc_busy_cmd busy_cmd; }; struct sd_busy_data { struct mmc_card *card; u8 *reg_buf; }; typedef void sdio_irq_handler_t(struct sdio_func *); struct sdio_func { struct mmc_card *card; struct device dev; sdio_irq_handler_t *irq_handler; unsigned int num; unsigned char class; unsigned short vendor; unsigned short device; unsigned int max_blksize; unsigned int cur_blksize; unsigned int enable_timeout; unsigned int state; u8 *tmpbuf; u8 major_rev; u8 minor_rev; unsigned int num_info; const char **info; struct sdio_func_tuple *tuples; }; struct sdio_func_tuple { struct sdio_func_tuple *next; unsigned char code; unsigned char size; unsigned char data[0]; }; struct sdio_device_id; struct sdio_driver { char *name; const struct sdio_device_id *id_table; int (*probe)(struct sdio_func *, const struct sdio_device_id *); void (*remove)(struct sdio_func *); struct device_driver drv; }; struct sdio_device_id { __u8 class; __u16 vendor; __u16 device; kernel_ulong_t driver_data; }; typedef int tpl_parse_t(struct mmc_card *, struct sdio_func *, const unsigned char *, unsigned int); struct cis_tpl { unsigned char code; unsigned char min_size; tpl_parse_t *parse; }; struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; irqreturn_t (*cd_gpio_isr)(int, void *); char *ro_label; char *cd_label; u32 cd_debounce_delay_ms; int cd_irq; }; enum mmc_drv_op { MMC_DRV_OP_IOCTL = 0, MMC_DRV_OP_IOCTL_RPMB = 1, MMC_DRV_OP_BOOT_WP = 2, MMC_DRV_OP_GET_CARD_STATUS = 3, MMC_DRV_OP_GET_EXT_CSD = 4, }; struct mmc_blk_request { struct mmc_request mrq; struct mmc_command sbc; struct mmc_command cmd; struct mmc_command stop; struct mmc_data data; }; struct mmc_queue_req { struct mmc_blk_request brq; struct scatterlist *sg; enum mmc_drv_op drv_op; int drv_op_result; void *drv_op_data; unsigned int ioc_count; int retries; }; enum mmc_issued { MMC_REQ_STARTED = 0, MMC_REQ_BUSY = 1, MMC_REQ_FAILED_TO_START = 2, MMC_REQ_FINISHED = 3, }; enum mmc_issue_type { MMC_ISSUE_SYNC = 0, MMC_ISSUE_DCMD = 1, MMC_ISSUE_ASYNC = 2, MMC_ISSUE_MAX = 3, }; struct mmc_blk_data; struct mmc_queue { struct mmc_card *card; struct mmc_ctx ctx; struct blk_mq_tag_set tag_set; struct mmc_blk_data *blkdata; struct request_queue *queue; spinlock_t lock; int in_flight[3]; unsigned int cqe_busy; bool busy; bool recovery_needed; bool in_recovery; bool rw_wait; bool waiting; struct work_struct recovery_work; wait_queue_head_t wait; struct request *recovery_req; struct request *complete_req; struct mutex complete_lock; struct work_struct complete_work; }; struct mmc_blk_data { struct device *parent; struct gendisk *disk; struct mmc_queue queue; struct list_head part; struct list_head rpmbs; unsigned int flags; struct kref kref; unsigned int read_only; unsigned int part_type; unsigned int reset_done; unsigned int part_curr; int area_type; struct dentry *status_dentry; struct dentry *ext_csd_dentry; }; struct mmc_ioc_cmd { int write_flag; int is_acmd; __u32 opcode; __u32 arg; __u32 response[4]; unsigned int flags; unsigned int blksz; unsigned int blocks; unsigned int postsleep_min_us; unsigned int postsleep_max_us; unsigned int data_timeout_ns; unsigned int cmd_timeout_ms; __u32 __pad; __u64 data_ptr; }; struct mmc_ioc_multi_cmd { __u64 num_of_cmds; struct mmc_ioc_cmd cmds[0]; }; struct mmc_rpmb_data { struct device dev; struct cdev chrdev; int id; unsigned int part_index; struct mmc_blk_data *md; struct list_head node; }; struct mmc_blk_busy_data { struct mmc_card *card; u32 status; }; struct mmc_blk_ioc_data { struct mmc_ioc_cmd ic; unsigned char *buf; u64 buf_bytes; unsigned int flags; struct mmc_rpmb_data *rpmb; }; enum sdhci_reset_reason { SDHCI_RESET_FOR_INIT = 0, SDHCI_RESET_FOR_REQUEST_ERROR = 1, SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY = 2, SDHCI_RESET_FOR_TUNING_ABORT = 3, SDHCI_RESET_FOR_CARD_REMOVED = 4, SDHCI_RESET_FOR_CQE_RECOVERY = 5, }; enum sdhci_cookie { COOKIE_UNMAPPED = 0, COOKIE_PRE_MAPPED = 1, COOKIE_MAPPED = 2, }; struct sdhci_ops; struct sdhci_host { const char *hw_name; unsigned int quirks; unsigned int quirks2; int irq; void *ioaddr; phys_addr_t mapbase; char *bounce_buffer; dma_addr_t bounce_addr; unsigned int bounce_buffer_size; const struct sdhci_ops *ops; struct mmc_host *mmc; struct mmc_host_ops mmc_host_ops; u64 dma_mask; struct led_classdev led; char led_name[32]; spinlock_t lock; int flags; unsigned int version; unsigned int max_clk; unsigned int timeout_clk; u8 max_timeout_count; unsigned int clk_mul; unsigned int clock; u8 pwr; u8 drv_type; bool reinit_uhs; bool runtime_suspended; bool bus_on; bool preset_enabled; bool pending_reset; bool irq_wake_enabled; bool v4_mode; bool use_external_dma; bool always_defer_done; struct mmc_request *mrqs_done[2]; struct mmc_command *cmd; struct mmc_command *data_cmd; struct mmc_command *deferred_cmd; struct mmc_data *data; unsigned int data_early: 1; struct sg_mapping_iter sg_miter; unsigned int blocks; int sg_count; int max_adma; void *adma_table; void *align_buffer; size_t adma_table_sz; size_t align_buffer_sz; dma_addr_t adma_addr; dma_addr_t align_addr; unsigned int desc_sz; unsigned int alloc_desc_sz; struct workqueue_struct *complete_wq; struct work_struct complete_work; struct timer_list timer; struct timer_list data_timer; u32 caps; u32 caps1; bool read_caps; bool sdhci_core_to_disable_vqmmc; unsigned int ocr_avail_sdio; unsigned int ocr_avail_sd; unsigned int ocr_avail_mmc; u32 ocr_mask; unsigned int timing; u32 thread_isr; u32 ier; bool cqe_on; u32 cqe_ier; u32 cqe_err_ier; wait_queue_head_t buf_ready_int; unsigned int tuning_done; unsigned int tuning_count; unsigned int tuning_mode; unsigned int tuning_err; int tuning_delay; int tuning_loop_count; u32 sdma_boundary; u32 adma_table_cnt; u64 data_timeout; u64 android_kabi_reserved1; long: 64; long: 64; long: 64; long: 64; long: 64; unsigned long private[0]; }; struct sdhci_ops { u32 (*read_l)(struct sdhci_host *, int); u16 (*read_w)(struct sdhci_host *, int); u8 (*read_b)(struct sdhci_host *, int); void (*write_l)(struct sdhci_host *, u32, int); void (*write_w)(struct sdhci_host *, u16, int); void (*write_b)(struct sdhci_host *, u8, int); void (*set_clock)(struct sdhci_host *, unsigned int); void (*set_power)(struct sdhci_host *, unsigned char, unsigned short); u32 (*irq)(struct sdhci_host *, u32); int (*set_dma_mask)(struct sdhci_host *); int (*enable_dma)(struct sdhci_host *); unsigned int (*get_max_clock)(struct sdhci_host *); unsigned int (*get_min_clock)(struct sdhci_host *); unsigned int (*get_timeout_clock)(struct sdhci_host *); unsigned int (*get_max_timeout_count)(struct sdhci_host *); void (*set_timeout)(struct sdhci_host *, struct mmc_command *); void (*set_bus_width)(struct sdhci_host *, int); void (*platform_send_init_74_clocks)(struct sdhci_host *, u8); unsigned int (*get_ro)(struct sdhci_host *); void (*reset)(struct sdhci_host *, u8); int (*platform_execute_tuning)(struct sdhci_host *, u32); void (*set_uhs_signaling)(struct sdhci_host *, unsigned int); void (*hw_reset)(struct sdhci_host *); void (*adma_workaround)(struct sdhci_host *, u32); void (*card_event)(struct sdhci_host *); void (*voltage_switch)(struct sdhci_host *); void (*adma_write_desc)(struct sdhci_host *, void **, dma_addr_t, int, unsigned int); void (*copy_to_bounce_buffer)(struct sdhci_host *, struct mmc_data *, unsigned int); void (*request_done)(struct sdhci_host *, struct mmc_request *); void (*dump_vendor_regs)(struct sdhci_host *); u64 android_kabi_reserved1; }; struct sdhci_adma2_64_desc { __le16 cmd; __le16 len; __le32 addr_lo; __le32 addr_hi; }; struct sdhci_pltfm_host { struct clk *clk; unsigned int clock; u16 xfer_mode_shadow; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; unsigned long private[0]; }; struct sdhci_pltfm_data { const struct sdhci_ops *ops; unsigned int quirks; unsigned int quirks2; }; typedef void (*btf_trace_ufshcd_clk_gating)(void *, const char *, int); typedef void (*btf_trace_ufshcd_clk_scaling)(void *, const char *, const char *, const char *, u32, u32); typedef void (*btf_trace_ufshcd_auto_bkops_state)(void *, const char *, const char *); typedef void (*btf_trace_ufshcd_profile_hibern8)(void *, const char *, const char *, s64, int); typedef void (*btf_trace_ufshcd_profile_clk_gating)(void *, const char *, const char *, s64, int); typedef void (*btf_trace_ufshcd_profile_clk_scaling)(void *, const char *, const char *, s64, int); typedef void (*btf_trace_ufshcd_system_suspend)(void *, const char *, int, s64, int, int); typedef void (*btf_trace_ufshcd_system_resume)(void *, const char *, int, s64, int, int); typedef void (*btf_trace_ufshcd_runtime_suspend)(void *, const char *, int, s64, int, int); typedef void (*btf_trace_ufshcd_runtime_resume)(void *, const char *, int, s64, int, int); typedef void (*btf_trace_ufshcd_init)(void *, const char *, int, s64, int, int); typedef void (*btf_trace_ufshcd_wl_suspend)(void *, const char *, int, s64, int, int); typedef void (*btf_trace_ufshcd_wl_resume)(void *, const char *, int, s64, int, int); typedef void (*btf_trace_ufshcd_wl_runtime_suspend)(void *, const char *, int, s64, int, int); typedef void (*btf_trace_ufshcd_wl_runtime_resume)(void *, const char *, int, s64, int, int); enum ufs_trace_str_t { UFS_CMD_SEND = 0, UFS_CMD_COMP = 1, UFS_DEV_COMP = 2, UFS_QUERY_SEND = 3, UFS_QUERY_COMP = 4, UFS_QUERY_ERR = 5, UFS_TM_SEND = 6, UFS_TM_COMP = 7, UFS_TM_ERR = 8, }; typedef void (*btf_trace_ufshcd_command)(void *, const char *, enum ufs_trace_str_t, unsigned int, u32, u32, int, u32, u64, u8, u8); typedef void (*btf_trace_ufshcd_uic_command)(void *, const char *, enum ufs_trace_str_t, u32, u32, u32, u32); enum ufs_trace_tsf_t { UFS_TSF_CDB = 0, UFS_TSF_OSF = 1, UFS_TSF_TM_INPUT = 2, UFS_TSF_TM_OUTPUT = 3, }; typedef void (*btf_trace_ufshcd_upiu)(void *, const char *, enum ufs_trace_str_t, void *, void *, enum ufs_trace_tsf_t); typedef void (*btf_trace_ufshcd_exception_event)(void *, const char *, u16); enum ufs_dev_pwr_mode { UFS_ACTIVE_PWR_MODE = 1, UFS_SLEEP_PWR_MODE = 2, UFS_POWERDOWN_PWR_MODE = 3, UFS_DEEPSLEEP_PWR_MODE = 4, }; enum uic_link_state { UIC_LINK_OFF_STATE = 0, UIC_LINK_ACTIVE_STATE = 1, UIC_LINK_HIBERN8_STATE = 2, UIC_LINK_BROKEN_STATE = 3, }; struct ufs_pm_lvl_states { enum ufs_dev_pwr_mode dev_state; enum uic_link_state link_state; }; struct ufs_dev_quirk { u16 wmanufacturerid; const u8 *model; unsigned int quirk; }; enum ufs_ref_clk_freq { REF_CLK_FREQ_19_2_MHZ = 0, REF_CLK_FREQ_26_MHZ = 1, REF_CLK_FREQ_38_4_MHZ = 2, REF_CLK_FREQ_52_MHZ = 3, REF_CLK_FREQ_INVAL = -1, }; struct ufs_ref_clk { unsigned long freq_hz; enum ufs_ref_clk_freq val; }; struct devfreq_simple_ondemand_data { unsigned int upthreshold; unsigned int downdifferential; }; struct ufs_hba_variant_params { struct devfreq_dev_profile devfreq_profile; struct devfreq_simple_ondemand_data ondemand_data; u16 hba_enable_delay_us; u32 wb_flush_threshold; }; enum ufs_pm_level { UFS_PM_LVL_0 = 0, UFS_PM_LVL_1 = 1, UFS_PM_LVL_2 = 2, UFS_PM_LVL_3 = 3, UFS_PM_LVL_4 = 4, UFS_PM_LVL_5 = 5, UFS_PM_LVL_6 = 6, UFS_PM_LVL_MAX = 7, }; enum ufs_notify_change_status { PRE_CHANGE = 0, POST_CHANGE = 1, }; enum uic_cmd_dme { UIC_CMD_DME_GET = 1, UIC_CMD_DME_SET = 2, UIC_CMD_DME_PEER_GET = 3, UIC_CMD_DME_PEER_SET = 4, UIC_CMD_DME_POWERON = 16, UIC_CMD_DME_POWEROFF = 17, UIC_CMD_DME_ENABLE = 18, UIC_CMD_DME_RESET = 20, UIC_CMD_DME_END_PT_RST = 21, UIC_CMD_DME_LINK_STARTUP = 22, UIC_CMD_DME_HIBER_ENTER = 23, UIC_CMD_DME_HIBER_EXIT = 24, UIC_CMD_DME_TEST_MODE = 26, }; enum ufs_pm_op { UFS_RUNTIME_PM = 0, UFS_SYSTEM_PM = 1, UFS_SHUTDOWN_PM = 2, }; enum ufs_event_type { UFS_EVT_PA_ERR = 0, UFS_EVT_DL_ERR = 1, UFS_EVT_NL_ERR = 2, UFS_EVT_TL_ERR = 3, UFS_EVT_DME_ERR = 4, UFS_EVT_AUTO_HIBERN8_ERR = 5, UFS_EVT_FATAL_ERR = 6, UFS_EVT_LINK_STARTUP_FAIL = 7, UFS_EVT_RESUME_ERR = 8, UFS_EVT_SUSPEND_ERR = 9, UFS_EVT_WL_SUSP_ERR = 10, UFS_EVT_WL_RES_ERR = 11, UFS_EVT_DEV_RESET = 12, UFS_EVT_HOST_RESET = 13, UFS_EVT_ABORT = 14, UFS_EVT_CNT = 15, }; enum ufshcd_state { UFSHCD_STATE_RESET = 0, UFSHCD_STATE_OPERATIONAL = 1, UFSHCD_STATE_EH_SCHEDULED_NON_FATAL = 2, UFSHCD_STATE_EH_SCHEDULED_FATAL = 3, UFSHCD_STATE_ERROR = 4, }; enum dev_cmd_type { DEV_CMD_TYPE_NOP = 0, DEV_CMD_TYPE_QUERY = 1, DEV_CMD_TYPE_RPMB = 2, }; enum clk_gating_state { CLKS_OFF = 0, CLKS_ON = 1, REQ_CLKS_OFF = 2, REQ_CLKS_ON = 3, }; enum bkops_status { BKOPS_STATUS_NO_OP = 0, BKOPS_STATUS_NON_CRITICAL = 1, BKOPS_STATUS_PERF_IMPACT = 2, BKOPS_STATUS_CRITICAL = 3, BKOPS_STATUS_MAX = 3, }; enum { REG_CONTROLLER_CAPABILITIES = 0, REG_MCQCAP = 4, REG_UFS_VERSION = 8, REG_CONTROLLER_DEV_ID = 16, REG_CONTROLLER_PROD_ID = 20, REG_AUTO_HIBERNATE_IDLE_TIMER = 24, REG_INTERRUPT_STATUS = 32, REG_INTERRUPT_ENABLE = 36, REG_CONTROLLER_STATUS = 48, REG_CONTROLLER_ENABLE = 52, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 56, REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 60, REG_UIC_ERROR_CODE_NETWORK_LAYER = 64, REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 68, REG_UIC_ERROR_CODE_DME = 72, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 76, REG_UTP_TRANSFER_REQ_LIST_BASE_L = 80, REG_UTP_TRANSFER_REQ_LIST_BASE_H = 84, REG_UTP_TRANSFER_REQ_DOOR_BELL = 88, REG_UTP_TRANSFER_REQ_LIST_CLEAR = 92, REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 96, REG_UTP_TASK_REQ_LIST_BASE_L = 112, REG_UTP_TASK_REQ_LIST_BASE_H = 116, REG_UTP_TASK_REQ_DOOR_BELL = 120, REG_UTP_TASK_REQ_LIST_CLEAR = 124, REG_UTP_TASK_REQ_LIST_RUN_STOP = 128, REG_UIC_COMMAND = 144, REG_UIC_COMMAND_ARG_1 = 148, REG_UIC_COMMAND_ARG_2 = 152, REG_UIC_COMMAND_ARG_3 = 156, UFSHCI_REG_SPACE_SIZE = 160, REG_UFS_CCAP = 256, REG_UFS_CRYPTOCAP = 260, REG_UFS_MEM_CFG = 768, REG_UFS_MCQ_CFG = 896, REG_UFS_ESILBA = 900, REG_UFS_ESIUBA = 904, UFSHCI_CRYPTO_REG_SPACE_SIZE = 1024, }; enum ufs_unipro_ver { UFS_UNIPRO_VER_RESERVED = 0, UFS_UNIPRO_VER_1_40 = 1, UFS_UNIPRO_VER_1_41 = 2, UFS_UNIPRO_VER_1_6 = 3, UFS_UNIPRO_VER_1_61 = 4, UFS_UNIPRO_VER_1_8 = 5, UFS_UNIPRO_VER_MAX = 6, UFS_UNIPRO_VER_MASK = 15, }; enum ufshcd_quirks { UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1, UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 2, UFSHCD_QUIRK_BROKEN_LCC = 4, UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 8, UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 16, UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 32, UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 64, UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 128, UFSHCI_QUIRK_BROKEN_HCE = 256, UFSHCD_QUIRK_PRDT_BYTE_GRAN = 512, UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1024, UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 2048, UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 4096, UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 8192, UFSHCD_QUIRK_BROKEN_UIC_CMD = 32768, UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 65536, UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 131072, UFSHCD_QUIRK_HIBERN_FASTAUTO = 262144, UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 524288, UFSHCD_QUIRK_MCQ_BROKEN_INTR = 1048576, UFSHCD_QUIRK_MCQ_BROKEN_RTC = 2097152, }; enum query_opcode { UPIU_QUERY_OPCODE_NOP = 0, UPIU_QUERY_OPCODE_READ_DESC = 1, UPIU_QUERY_OPCODE_WRITE_DESC = 2, UPIU_QUERY_OPCODE_READ_ATTR = 3, UPIU_QUERY_OPCODE_WRITE_ATTR = 4, UPIU_QUERY_OPCODE_READ_FLAG = 5, UPIU_QUERY_OPCODE_SET_FLAG = 6, UPIU_QUERY_OPCODE_CLEAR_FLAG = 7, UPIU_QUERY_OPCODE_TOGGLE_FLAG = 8, }; enum flag_idn { QUERY_FLAG_IDN_FDEVICEINIT = 1, QUERY_FLAG_IDN_PERMANENT_WPE = 2, QUERY_FLAG_IDN_PWR_ON_WPE = 3, QUERY_FLAG_IDN_BKOPS_EN = 4, QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 5, QUERY_FLAG_IDN_PURGE_ENABLE = 6, QUERY_FLAG_IDN_RESERVED2 = 7, QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 8, QUERY_FLAG_IDN_BUSY_RTC = 9, QUERY_FLAG_IDN_RESERVED3 = 10, QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 11, QUERY_FLAG_IDN_WB_EN = 14, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 15, QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 16, QUERY_FLAG_IDN_HPB_RESET = 17, QUERY_FLAG_IDN_HPB_EN = 18, }; enum { UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 1, UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 129, }; enum attr_idn { QUERY_ATTR_IDN_BOOT_LU_EN = 0, QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 1, QUERY_ATTR_IDN_POWER_MODE = 2, QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 3, QUERY_ATTR_IDN_OOO_DATA_EN = 4, QUERY_ATTR_IDN_BKOPS_STATUS = 5, QUERY_ATTR_IDN_PURGE_STATUS = 6, QUERY_ATTR_IDN_MAX_DATA_IN = 7, QUERY_ATTR_IDN_MAX_DATA_OUT = 8, QUERY_ATTR_IDN_DYN_CAP_NEEDED = 9, QUERY_ATTR_IDN_REF_CLK_FREQ = 10, QUERY_ATTR_IDN_CONF_DESC_LOCK = 11, QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 12, QUERY_ATTR_IDN_EE_CONTROL = 13, QUERY_ATTR_IDN_EE_STATUS = 14, QUERY_ATTR_IDN_SECONDS_PASSED = 15, QUERY_ATTR_IDN_CNTX_CONF = 16, QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 17, QUERY_ATTR_IDN_RESERVED2 = 18, QUERY_ATTR_IDN_RESERVED3 = 19, QUERY_ATTR_IDN_FFU_STATUS = 20, QUERY_ATTR_IDN_PSA_STATE = 21, QUERY_ATTR_IDN_PSA_DATA_SIZE = 22, QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 23, QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 24, QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 25, QUERY_ATTR_IDN_LOW_TEMP_BOUND = 26, QUERY_ATTR_IDN_WB_FLUSH_STATUS = 28, QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 29, QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 30, QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 31, QUERY_ATTR_IDN_EXT_IID_EN = 42, QUERY_ATTR_IDN_TIMESTAMP = 48, }; enum desc_idn { QUERY_DESC_IDN_DEVICE = 0, QUERY_DESC_IDN_CONFIGURATION = 1, QUERY_DESC_IDN_UNIT = 2, QUERY_DESC_IDN_RFU_0 = 3, QUERY_DESC_IDN_INTERCONNECT = 4, QUERY_DESC_IDN_STRING = 5, QUERY_DESC_IDN_RFU_1 = 6, QUERY_DESC_IDN_GEOMETRY = 7, QUERY_DESC_IDN_POWER = 8, QUERY_DESC_IDN_HEALTH = 9, QUERY_DESC_IDN_MAX = 10, }; enum desc_header_offset { QUERY_DESC_LENGTH_OFFSET = 0, QUERY_DESC_DESC_TYPE_OFFSET = 1, }; enum ufs_hs_gear_tag { UFS_HS_DONT_CHANGE = 0, UFS_HS_G1 = 1, UFS_HS_G2 = 2, UFS_HS_G3 = 3, UFS_HS_G4 = 4, UFS_HS_G5 = 5, }; enum { FAST_MODE = 1, SLOW_MODE = 2, FASTAUTO_MODE = 4, SLOWAUTO_MODE = 5, UNCHANGED = 7, }; enum { UFSHCD_EH_IN_PROGRESS = 1, }; enum utp_ocs { OCS_SUCCESS = 0, OCS_INVALID_CMD_TABLE_ATTR = 1, OCS_INVALID_PRDT_ATTR = 2, OCS_MISMATCH_DATA_BUF_SIZE = 3, OCS_MISMATCH_RESP_UPIU_SIZE = 4, OCS_PEER_COMM_FAILURE = 5, OCS_ABORTED = 6, OCS_FATAL_ERROR = 7, OCS_DEVICE_FATAL_ERROR = 8, OCS_INVALID_CRYPTO_CONFIG = 9, OCS_GENERAL_CRYPTO_ERROR = 10, OCS_INVALID_COMMAND_STATUS = 15, }; enum { UTP_CMD_TYPE_SCSI = 0, UTP_CMD_TYPE_UFS = 1, UTP_CMD_TYPE_DEV_MANAGE = 2, }; enum { UTP_CMD_TYPE_UFS_STORAGE = 1, }; enum { MASK_OCS = 15, }; enum upiu_request_transaction { UPIU_TRANSACTION_NOP_OUT = 0, UPIU_TRANSACTION_COMMAND = 1, UPIU_TRANSACTION_DATA_OUT = 2, UPIU_TRANSACTION_TASK_REQ = 4, UPIU_TRANSACTION_QUERY_REQ = 22, }; enum { UFS_UPIU_REPORT_LUNS_WLUN = 129, UFS_UPIU_UFS_DEVICE_WLUN = 208, UFS_UPIU_BOOT_WLUN = 176, UFS_UPIU_RPMB_WLUN = 196, }; enum { MASK_TRANSFER_REQUESTS_SLOTS = 31, MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 458752, MASK_EHSLUTRD_SUPPORTED = 4194304, MASK_AUTO_HIBERN8_SUPPORT = 8388608, MASK_64_ADDRESSING_SUPPORT = 16777216, MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 33554432, MASK_UIC_DME_TEST_MODE_SUPPORT = 67108864, MASK_CRYPTO_SUPPORT = 268435456, MASK_MCQ_SUPPORT = 1073741824, }; enum { UFS_ABORT_TASK = 1, UFS_ABORT_TASK_SET = 2, UFS_CLEAR_TASK_SET = 4, UFS_LOGICAL_RESET = 8, UFS_QUERY_TASK = 128, UFS_QUERY_TASK_SET = 129, }; enum { UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0, UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 4, UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 8, UPIU_TASK_MANAGEMENT_FUNC_FAILED = 5, UPIU_INCORRECT_LOGICAL_UNIT_NO = 9, }; enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, UFSHCD_CMD_PER_LUN = 31, UFSHCD_CAN_QUEUE = 31, }; enum ufshcd_caps { UFSHCD_CAP_CLK_GATING = 1, UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 2, UFSHCD_CAP_CLK_SCALING = 4, UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 8, UFSHCD_CAP_INTR_AGGR = 16, UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 32, UFSHCD_CAP_RPM_AUTOSUSPEND = 64, UFSHCD_CAP_WB_EN = 128, UFSHCD_CAP_CRYPTO = 256, UFSHCD_CAP_AGGR_POWER_COLLAPSE = 512, UFSHCD_CAP_DEEPSLEEP = 1024, UFSHCD_CAP_TEMP_NOTIF = 2048, UFSHCD_CAP_WB_WITH_CLK_SCALING = 4096, }; enum { UIC_CMD_TIMEOUT_DEFAULT = 500, UIC_CMD_TIMEOUT_MAX = 2000, }; enum { PWR_OK = 0, PWR_LOCAL = 1, PWR_REMOTE = 2, PWR_BUSY = 3, PWR_ERROR_CAP = 4, PWR_FATAL_ERROR = 5, }; enum { INTERRUPT_MASK_ALL_VER_10 = 200703, INTERRUPT_MASK_RW_VER_10 = 196608, INTERRUPT_MASK_ALL_VER_11 = 204799, INTERRUPT_MASK_ALL_VER_21 = 466943, }; enum ufshcd_android_quirks { UFSHCD_ANDROID_QUIRK_CUSTOM_CRYPTO_PROFILE = 1, UFSHCD_ANDROID_QUIRK_BROKEN_CRYPTO_ENABLE = 2, UFSHCD_ANDROID_QUIRK_KEYS_IN_PRDT = 4, }; enum { UPIU_RSP_FLAG_UNDERFLOW = 32, UPIU_RSP_FLAG_OVERFLOW = 64, }; enum upiu_response_transaction { UPIU_TRANSACTION_NOP_IN = 32, UPIU_TRANSACTION_RESPONSE = 33, UPIU_TRANSACTION_DATA_IN = 34, UPIU_TRANSACTION_TASK_RSP = 36, UPIU_TRANSACTION_READY_XFER = 49, UPIU_TRANSACTION_QUERY_RSP = 54, UPIU_TRANSACTION_REJECT_UPIU = 63, }; enum { MASK_EE_STATUS = 65535, MASK_EE_DYNCAP_EVENT = 1, MASK_EE_SYSPOOL_EVENT = 2, MASK_EE_URGENT_BKOPS = 4, MASK_EE_TOO_HIGH_TEMP = 8, MASK_EE_TOO_LOW_TEMP = 16, MASK_EE_WRITEBOOSTER_EVENT = 32, MASK_EE_PERFORMANCE_THROTTLING = 64, }; enum { WB_BUF_MODE_LU_DEDICATED = 0, WB_BUF_MODE_SHARED = 1, }; enum { UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1, }; enum { UFSHCD_UIC_DL_PA_INIT_ERROR = 1, UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = 2, UFSHCD_UIC_DL_TCx_REPLAY_ERROR = 4, UFSHCD_UIC_NL_ERROR = 8, UFSHCD_UIC_TL_ERROR = 16, UFSHCD_UIC_DME_ERROR = 32, UFSHCD_UIC_PA_GENERIC_ERROR = 64, }; enum utp_data_direction { UTP_NO_DATA_TRANSFER = 0, UTP_HOST_TO_DEVICE = 1, UTP_DEVICE_TO_HOST = 2, }; enum { UPIU_CMD_FLAGS_NONE = 0, UPIU_CMD_FLAGS_WRITE = 32, UPIU_CMD_FLAGS_READ = 64, }; enum { MASK_TM_SERVICE_RESP = 255, }; enum ufs_pwm_gear_tag { UFS_PWM_DONT_CHANGE = 0, UFS_PWM_G1 = 1, UFS_PWM_G2 = 2, UFS_PWM_G3 = 3, UFS_PWM_G4 = 4, UFS_PWM_G5 = 5, UFS_PWM_G6 = 6, UFS_PWM_G7 = 7, }; enum ufs_lanes { UFS_LANE_DONT_CHANGE = 0, UFS_LANE_1 = 1, UFS_LANE_2 = 2, }; enum geometry_desc_param { GEOMETRY_DESC_PARAM_LEN = 0, GEOMETRY_DESC_PARAM_TYPE = 1, GEOMETRY_DESC_PARAM_DEV_CAP = 4, GEOMETRY_DESC_PARAM_MAX_NUM_LUN = 12, GEOMETRY_DESC_PARAM_SEG_SIZE = 13, GEOMETRY_DESC_PARAM_ALLOC_UNIT_SIZE = 17, GEOMETRY_DESC_PARAM_MIN_BLK_SIZE = 18, GEOMETRY_DESC_PARAM_OPT_RD_BLK_SIZE = 19, GEOMETRY_DESC_PARAM_OPT_WR_BLK_SIZE = 20, GEOMETRY_DESC_PARAM_MAX_IN_BUF_SIZE = 21, GEOMETRY_DESC_PARAM_MAX_OUT_BUF_SIZE = 22, GEOMETRY_DESC_PARAM_RPMB_RW_SIZE = 23, GEOMETRY_DESC_PARAM_DYN_CAP_RSRC_PLC = 24, GEOMETRY_DESC_PARAM_DATA_ORDER = 25, GEOMETRY_DESC_PARAM_MAX_NUM_CTX = 26, GEOMETRY_DESC_PARAM_TAG_UNIT_SIZE = 27, GEOMETRY_DESC_PARAM_TAG_RSRC_SIZE = 28, GEOMETRY_DESC_PARAM_SEC_RM_TYPES = 29, GEOMETRY_DESC_PARAM_MEM_TYPES = 30, GEOMETRY_DESC_PARAM_SCM_MAX_NUM_UNITS = 32, GEOMETRY_DESC_PARAM_SCM_CAP_ADJ_FCTR = 36, GEOMETRY_DESC_PARAM_NPM_MAX_NUM_UNITS = 38, GEOMETRY_DESC_PARAM_NPM_CAP_ADJ_FCTR = 42, GEOMETRY_DESC_PARAM_ENM1_MAX_NUM_UNITS = 44, GEOMETRY_DESC_PARAM_ENM1_CAP_ADJ_FCTR = 48, GEOMETRY_DESC_PARAM_ENM2_MAX_NUM_UNITS = 50, GEOMETRY_DESC_PARAM_ENM2_CAP_ADJ_FCTR = 54, GEOMETRY_DESC_PARAM_ENM3_MAX_NUM_UNITS = 56, GEOMETRY_DESC_PARAM_ENM3_CAP_ADJ_FCTR = 60, GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 62, GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 66, GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 68, GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 72, GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 73, GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 74, GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 75, GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 79, GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 83, GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 84, GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 85, GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 86, }; enum device_desc_param { DEVICE_DESC_PARAM_LEN = 0, DEVICE_DESC_PARAM_TYPE = 1, DEVICE_DESC_PARAM_DEVICE_TYPE = 2, DEVICE_DESC_PARAM_DEVICE_CLASS = 3, DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 4, DEVICE_DESC_PARAM_PRTCL = 5, DEVICE_DESC_PARAM_NUM_LU = 6, DEVICE_DESC_PARAM_NUM_WLU = 7, DEVICE_DESC_PARAM_BOOT_ENBL = 8, DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 9, DEVICE_DESC_PARAM_INIT_PWR_MODE = 10, DEVICE_DESC_PARAM_HIGH_PR_LUN = 11, DEVICE_DESC_PARAM_SEC_RMV_TYPE = 12, DEVICE_DESC_PARAM_SEC_LU = 13, DEVICE_DESC_PARAM_BKOP_TERM_LT = 14, DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 15, DEVICE_DESC_PARAM_SPEC_VER = 16, DEVICE_DESC_PARAM_MANF_DATE = 18, DEVICE_DESC_PARAM_MANF_NAME = 20, DEVICE_DESC_PARAM_PRDCT_NAME = 21, DEVICE_DESC_PARAM_SN = 22, DEVICE_DESC_PARAM_OEM_ID = 23, DEVICE_DESC_PARAM_MANF_ID = 24, DEVICE_DESC_PARAM_UD_OFFSET = 26, DEVICE_DESC_PARAM_UD_LEN = 27, DEVICE_DESC_PARAM_RTT_CAP = 28, DEVICE_DESC_PARAM_FRQ_RTC = 29, DEVICE_DESC_PARAM_UFS_FEAT = 31, DEVICE_DESC_PARAM_FFU_TMT = 32, DEVICE_DESC_PARAM_Q_DPTH = 33, DEVICE_DESC_PARAM_DEV_VER = 34, DEVICE_DESC_PARAM_NUM_SEC_WPA = 36, DEVICE_DESC_PARAM_PSA_MAX_DATA = 37, DEVICE_DESC_PARAM_PSA_TMT = 41, DEVICE_DESC_PARAM_PRDCT_REV = 42, DEVICE_DESC_PARAM_HPB_VER = 64, DEVICE_DESC_PARAM_HPB_CONTROL = 66, DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 79, DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 83, DEVICE_DESC_PARAM_WB_TYPE = 84, DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 85, }; enum { UFS_DEV_LOW_TEMP_NOTIF = 16, UFS_DEV_HIGH_TEMP_NOTIF = 32, UFS_DEV_EXT_TEMP_NOTIF = 64, UFS_DEV_HPB_SUPPORT = 128, UFS_DEV_WRITE_BOOSTER_SUP = 256, UFS_DEV_EXT_IID_SUP = 65536, }; enum unit_desc_param { UNIT_DESC_PARAM_LEN = 0, UNIT_DESC_PARAM_TYPE = 1, UNIT_DESC_PARAM_UNIT_INDEX = 2, UNIT_DESC_PARAM_LU_ENABLE = 3, UNIT_DESC_PARAM_BOOT_LUN_ID = 4, UNIT_DESC_PARAM_LU_WR_PROTECT = 5, UNIT_DESC_PARAM_LU_Q_DEPTH = 6, UNIT_DESC_PARAM_PSA_SENSITIVE = 7, UNIT_DESC_PARAM_MEM_TYPE = 8, UNIT_DESC_PARAM_DATA_RELIABILITY = 9, UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 10, UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 11, UNIT_DESC_PARAM_ERASE_BLK_SIZE = 19, UNIT_DESC_PARAM_PROVISIONING_TYPE = 23, UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 24, UNIT_DESC_PARAM_CTX_CAPABILITIES = 32, UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 34, UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 35, UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 37, UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 39, UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 41, }; enum { PA_HS_MODE_A = 1, PA_HS_MODE_B = 2, }; enum power_desc_param_offset { PWR_DESC_LEN = 0, PWR_DESC_TYPE = 1, PWR_DESC_ACTIVE_LVLS_VCC_0 = 2, PWR_DESC_ACTIVE_LVLS_VCCQ_0 = 34, PWR_DESC_ACTIVE_LVLS_VCCQ2_0 = 66, }; enum { UFSHCD_NANO_AMP = 0, UFSHCD_MICRO_AMP = 1, UFSHCD_MILI_AMP = 2, UFSHCD_AMP = 3, }; enum { UPIU_COMMAND_SET_TYPE_SCSI = 0, UPIU_COMMAND_SET_TYPE_UFS = 1, UPIU_COMMAND_SET_TYPE_QUERY = 2, }; enum ufs_lu_wp_type { UFS_LU_NO_WP = 0, UFS_LU_POWER_ON_WP = 1, UFS_LU_PERM_WP = 2, }; enum rpmb_unit_desc_param { RPMB_UNIT_DESC_PARAM_LEN = 0, RPMB_UNIT_DESC_PARAM_TYPE = 1, RPMB_UNIT_DESC_PARAM_UNIT_INDEX = 2, RPMB_UNIT_DESC_PARAM_LU_ENABLE = 3, RPMB_UNIT_DESC_PARAM_BOOT_LUN_ID = 4, RPMB_UNIT_DESC_PARAM_LU_WR_PROTECT = 5, RPMB_UNIT_DESC_PARAM_LU_Q_DEPTH = 6, RPMB_UNIT_DESC_PARAM_PSA_SENSITIVE = 7, RPMB_UNIT_DESC_PARAM_MEM_TYPE = 8, RPMB_UNIT_DESC_PARAM_REGION_EN = 9, RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 10, RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 11, RPMB_UNIT_DESC_PARAM_REGION0_SIZE = 19, RPMB_UNIT_DESC_PARAM_REGION1_SIZE = 20, RPMB_UNIT_DESC_PARAM_REGION2_SIZE = 21, RPMB_UNIT_DESC_PARAM_REGION3_SIZE = 22, RPMB_UNIT_DESC_PARAM_PROVISIONING_TYPE = 23, RPMB_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 24, }; enum { MASK_EXT_IID_SUPPORT = 1024, }; enum { TASK_REQ_UPIU_SIZE_DWORDS = 8, TASK_RSP_UPIU_SIZE_DWORDS = 8, ALIGNED_UPIU_SIZE = 512, }; struct trace_event_raw_ufshcd_clk_gating { struct trace_entry ent; u32 __data_loc_dev_name; int state; char __data[0]; }; struct trace_event_raw_ufshcd_clk_scaling { struct trace_entry ent; u32 __data_loc_dev_name; u32 __data_loc_state; u32 __data_loc_clk; u32 prev_state; u32 curr_state; char __data[0]; }; struct trace_event_raw_ufshcd_auto_bkops_state { struct trace_entry ent; u32 __data_loc_dev_name; u32 __data_loc_state; char __data[0]; }; struct trace_event_raw_ufshcd_profiling_template { struct trace_entry ent; u32 __data_loc_dev_name; u32 __data_loc_profile_info; s64 time_us; int err; char __data[0]; }; struct trace_event_raw_ufshcd_template { struct trace_entry ent; s64 usecs; int err; u32 __data_loc_dev_name; int dev_state; int link_state; char __data[0]; }; struct trace_event_raw_ufshcd_command { struct trace_entry ent; u32 __data_loc_dev_name; enum ufs_trace_str_t str_t; unsigned int tag; u32 doorbell; u32 hwq_id; u32 intr; u64 lba; int transfer_len; u8 opcode; u8 group_id; char __data[0]; }; struct trace_event_raw_ufshcd_uic_command { struct trace_entry ent; u32 __data_loc_dev_name; enum ufs_trace_str_t str_t; u32 cmd; u32 arg1; u32 arg2; u32 arg3; char __data[0]; }; struct trace_event_raw_ufshcd_upiu { struct trace_entry ent; u32 __data_loc_dev_name; enum ufs_trace_str_t str_t; unsigned char hdr[12]; unsigned char tsf[16]; enum ufs_trace_tsf_t tsf_t; char __data[0]; }; struct trace_event_raw_ufshcd_exception_event { struct trace_entry ent; u32 __data_loc_dev_name; u16 status; char __data[0]; }; struct utp_upiu_cmd { __be32 exp_data_transfer_len; __u8 cdb[16]; }; struct utp_upiu_query { __u8 opcode; __u8 idn; __u8 index; __u8 selector; __be16 reserved_osf; __be16 length; __be32 value; __be32 reserved[2]; }; struct utp_upiu_header { union { struct { __be32 dword_0; __be32 dword_1; __be32 dword_2; }; struct { __u8 transaction_code; __u8 flags; __u8 lun; __u8 task_tag; __u8 command_set_type: 4; __u8 iid: 4; union { __u8 tm_function; __u8 query_function; }; __u8 response; __u8 status; __u8 ehs_length; __u8 device_information; __be16 data_segment_length; }; }; }; struct utp_upiu_req { struct utp_upiu_header header; union { struct utp_upiu_cmd sc; struct utp_upiu_query qr; struct utp_upiu_query uc; }; }; struct ufs_clk_info { struct list_head list; struct clk *clk; const char *name; u32 max_freq; u32 min_freq; u32 curr_freq; bool keep_link_active; bool enabled; }; struct utp_upiu_query_v4_0 { __u8 opcode; __u8 idn; __u8 index; __u8 selector; __u8 osf3; __u8 osf4; __be16 osf5; __be32 osf6; __be32 osf7; __be32 reserved; }; struct utp_cmd_rsp { __be32 residual_transfer_count; __be32 reserved[4]; __be16 sense_data_len; u8 sense_data[18]; }; struct utp_upiu_rsp { struct utp_upiu_header header; union { struct utp_cmd_rsp sr; struct utp_upiu_query qr; }; }; struct ufshcd_sg_entry { __le64 addr; __le32 reserved; __le32 size; }; struct ufs_event_hist { int pos; u32 val[8]; u64 tstamp[8]; unsigned long long cnt; }; struct ufs_stats { u32 last_intr_status; u64 last_intr_ts; u32 hibern8_exit_cnt; u64 last_hibern8_exit_tstamp; struct ufs_event_hist event[15]; }; struct ufs_query_req { u8 query_func; struct utp_upiu_query upiu_req; }; struct ufs_query_res { struct utp_upiu_query upiu_res; }; struct ufs_query { struct ufs_query_req request; u8 *descriptor; struct ufs_query_res response; }; struct ufs_dev_cmd { enum dev_cmd_type type; struct mutex lock; struct completion *complete; struct ufs_query query; }; struct ufs_dev_info { bool f_power_on_wp_en; bool is_lu_power_on_wp; u8 max_lu_supported; u16 wmanufacturerid; u8 *model; u16 wspecversion; u32 clk_gating_wait_us; u8 bqueuedepth; bool wb_enabled; bool wb_buf_flush_enabled; u8 wb_dedicated_lu; u8 wb_buffer_type; bool b_rpm_dev_flush_capable; u8 b_presrv_uspc_en; bool b_advanced_rpmb_en; bool b_ext_iid_en; u64 android_oem_data1; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; struct ufs_vreg; struct ufs_vreg_info { struct ufs_vreg *vcc; struct ufs_vreg *vccq; struct ufs_vreg *vccq2; struct ufs_vreg *vdd_hba; }; struct ufs_pa_layer_attr { u32 gear_rx; u32 gear_tx; u32 lane_rx; u32 lane_tx; u32 pwr_rx; u32 pwr_tx; u32 hs_rate; }; struct ufs_pwr_mode_info { bool is_valid; struct ufs_pa_layer_attr info; }; struct ufs_clk_gating { struct delayed_work gate_work; struct work_struct ungate_work; enum clk_gating_state state; unsigned long delay_ms; bool is_suspended; struct device_attribute delay_attr; struct device_attribute enable_attr; bool is_enabled; bool is_initialized; int active_reqs; struct workqueue_struct *clk_gating_workq; u64 android_kabi_reserved1; }; struct ufs_clk_scaling { int active_reqs; unsigned long tot_busy_t; ktime_t window_start_t; ktime_t busy_start_t; struct device_attribute enable_attr; struct ufs_pa_layer_attr saved_pwr_info; struct workqueue_struct *workq; struct work_struct suspend_work; struct work_struct resume_work; u32 min_gear; bool is_enabled; bool is_allowed; bool is_initialized; bool is_busy_started; bool is_suspended; bool suspend_on_no_request; u64 android_kabi_reserved1; }; struct ufs_hba_monitor { unsigned long chunk_size; unsigned long nr_sec_rw[2]; ktime_t total_busy[2]; unsigned long nr_req[2]; ktime_t lat_sum[2]; ktime_t lat_max[2]; ktime_t lat_min[2]; u32 nr_queued[2]; ktime_t busy_start_ts[2]; ktime_t enabled_ts; bool enabled; }; union ufs_crypto_capabilities { __le32 reg_val; struct { u8 num_crypto_cap; u8 config_count; u8 reserved; u8 config_array_ptr; }; }; struct ufshcd_res_info { const char *name; struct resource *resource; void *base; }; struct ufshcd_mcq_opr_info_t { unsigned long offset; unsigned long stride; void *base; }; struct utp_transfer_cmd_desc; struct utp_transfer_req_desc; struct utp_task_req_desc; struct ufshcd_lrb; struct ufs_hba_variant_ops; struct uic_command; union ufs_crypto_cap_entry; struct ufs_hw_queue; struct ufs_hba { void *mmio_base; struct utp_transfer_cmd_desc *ucdl_base_addr; struct utp_transfer_req_desc *utrdl_base_addr; struct utp_task_req_desc *utmrdl_base_addr; dma_addr_t ucdl_dma_addr; dma_addr_t utrdl_dma_addr; dma_addr_t utmrdl_dma_addr; struct Scsi_Host *host; struct device *dev; struct scsi_device *ufs_device_wlun; enum ufs_dev_pwr_mode curr_dev_pwr_mode; enum uic_link_state uic_link_state; enum ufs_pm_level rpm_lvl; enum ufs_pm_level spm_lvl; int pm_op_in_progress; u32 ahit; struct ufshcd_lrb *lrb; unsigned long outstanding_tasks; spinlock_t outstanding_lock; unsigned long outstanding_reqs; u32 capabilities; int nutrs; u32 mcq_capabilities; int nutmrs; u32 reserved_slot; u32 ufs_version; const struct ufs_hba_variant_ops *vops; struct ufs_hba_variant_params *vps; void *priv; size_t sg_entry_size; unsigned int irq; bool is_irq_enabled; enum ufs_ref_clk_freq dev_ref_clk_freq; unsigned int quirks; unsigned int android_quirks; unsigned int dev_quirks; struct blk_mq_tag_set tmf_tag_set; struct request_queue *tmf_queue; struct request **tmf_rqs; struct uic_command *active_uic_cmd; struct mutex uic_cmd_mutex; struct completion *uic_async_done; enum ufshcd_state ufshcd_state; u32 eh_flags; u32 intr_mask; u16 ee_ctrl_mask; u16 ee_drv_mask; u16 ee_usr_mask; struct mutex ee_ctrl_mutex; bool is_powered; bool shutting_down; struct semaphore host_sem; struct workqueue_struct *eh_wq; struct work_struct eh_work; struct work_struct eeh_work; u32 errors; u32 uic_error; u32 saved_err; u32 saved_uic_err; struct ufs_stats ufs_stats; bool force_reset; bool force_pmc; bool silence_err_logs; struct ufs_dev_cmd dev_cmd; ktime_t last_dme_cmd_tstamp; int nop_out_timeout; struct ufs_dev_info dev_info; bool auto_bkops_enabled; struct ufs_vreg_info vreg_info; struct list_head clk_list_head; int req_abort_count; u32 lanes_per_direction; struct ufs_pa_layer_attr pwr_info; struct ufs_pwr_mode_info max_pwr_info; struct ufs_clk_gating clk_gating; u32 caps; struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; bool system_suspending; bool is_sys_suspended; enum bkops_status urgent_bkops_lvl; bool is_urgent_bkops_lvl_checked; struct mutex wb_mutex; struct rw_semaphore clk_scaling_lock; atomic_t scsi_block_reqs_cnt; struct device bsg_dev; struct request_queue *bsg_queue; struct delayed_work rpm_dev_flush_recheck_work; struct ufs_hba_monitor monitor; union ufs_crypto_capabilities crypto_capabilities; union ufs_crypto_cap_entry *crypto_cap_array; u32 crypto_cfg_register; struct blk_crypto_profile crypto_profile; struct dentry *debugfs_root; struct delayed_work debugfs_ee_work; u32 debugfs_ee_rate_limit_ms; u32 luns_avail; unsigned int nr_hw_queues; unsigned int nr_queues[3]; bool complete_put; bool ext_iid_sup; bool scsi_host_added; bool mcq_sup; bool mcq_enabled; struct ufshcd_res_info res[7]; void *mcq_base; struct ufs_hw_queue *uhq; struct ufs_hw_queue *dev_cmd_queue; struct ufshcd_mcq_opr_info_t mcq_opr[4]; u64 android_oem_data1; }; struct utp_transfer_cmd_desc { u8 command_upiu[512]; u8 response_upiu[512]; u8 prd_table[0]; }; struct request_desc_header { u8 cci; u8 ehs_length; u8 reserved2: 7; u8 enable_crypto: 1; u8 interrupt: 1; u8 data_direction: 2; u8 reserved1: 1; u8 command_type: 4; __le32 dunl; u8 ocs; u8 cds; __le16 ldbc; __le32 dunu; }; struct utp_transfer_req_desc { struct request_desc_header header; __le64 command_desc_base_addr; __le16 response_upiu_length; __le16 response_upiu_offset; __le16 prd_table_length; __le16 prd_table_offset; }; struct utp_task_req_desc { struct request_desc_header header; struct { struct utp_upiu_header req_header; __be32 input_param1; __be32 input_param2; __be32 input_param3; __be32 __reserved1[2]; } upiu_req; struct { struct utp_upiu_header rsp_header; __be32 output_param1; __be32 output_param2; __be32 __reserved2[3]; } upiu_rsp; }; struct ufshcd_lrb { struct utp_transfer_req_desc *utr_descriptor_ptr; struct utp_upiu_req *ucd_req_ptr; struct utp_upiu_rsp *ucd_rsp_ptr; struct ufshcd_sg_entry *ucd_prdt_ptr; dma_addr_t utrd_dma_addr; dma_addr_t ucd_req_dma_addr; dma_addr_t ucd_rsp_dma_addr; dma_addr_t ucd_prdt_dma_addr; struct scsi_cmnd *cmd; int scsi_status; int command_type; int task_tag; u8 lun; bool intr_cmd; ktime_t issue_time_stamp; u64 issue_time_stamp_local_clock; ktime_t compl_time_stamp; u64 compl_time_stamp_local_clock; int crypto_key_slot; u64 data_unit_num; bool req_abort_skip; u64 android_kabi_reserved1; }; union ufs_crypto_cfg_entry; struct ufs_hba_variant_ops { const char *name; int (*init)(struct ufs_hba *); void (*exit)(struct ufs_hba *); u32 (*get_ufs_hci_version)(struct ufs_hba *); int (*clk_scale_notify)(struct ufs_hba *, bool, enum ufs_notify_change_status); int (*setup_clocks)(struct ufs_hba *, bool, enum ufs_notify_change_status); int (*hce_enable_notify)(struct ufs_hba *, enum ufs_notify_change_status); int (*link_startup_notify)(struct ufs_hba *, enum ufs_notify_change_status); int (*pwr_change_notify)(struct ufs_hba *, enum ufs_notify_change_status, struct ufs_pa_layer_attr *, struct ufs_pa_layer_attr *); void (*setup_xfer_req)(struct ufs_hba *, int, bool); void (*setup_task_mgmt)(struct ufs_hba *, int, u8); void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, enum ufs_notify_change_status); int (*apply_dev_quirks)(struct ufs_hba *); void (*fixup_dev_quirks)(struct ufs_hba *); int (*suspend)(struct ufs_hba *, enum ufs_pm_op, enum ufs_notify_change_status); int (*resume)(struct ufs_hba *, enum ufs_pm_op); void (*dbg_register_dump)(struct ufs_hba *); int (*phy_initialization)(struct ufs_hba *); int (*device_reset)(struct ufs_hba *); void (*config_scaling_param)(struct ufs_hba *, struct devfreq_dev_profile *, struct devfreq_simple_ondemand_data *); int (*program_key)(struct ufs_hba *, const union ufs_crypto_cfg_entry *, int); void (*event_notify)(struct ufs_hba *, enum ufs_event_type, void *); void (*reinit_notify)(struct ufs_hba *); int (*mcq_config_resource)(struct ufs_hba *); int (*get_hba_mac)(struct ufs_hba *); int (*op_runtime_config)(struct ufs_hba *); int (*get_outstanding_cqs)(struct ufs_hba *, unsigned long *); int (*config_esi)(struct ufs_hba *); void (*config_scsi_dev)(struct scsi_device *); }; union ufs_crypto_cfg_entry { __le32 reg_val[32]; struct { u8 crypto_key[64]; u8 data_unit_size; u8 crypto_cap_idx; u8 reserved_1; u8 config_enable; u8 reserved_multi_host; u8 reserved_2; u8 vsb[2]; u8 reserved_3[56]; }; }; struct uic_command { u32 command; u32 argument1; u32 argument2; u32 argument3; int cmd_active; struct completion done; }; struct ufs_vreg { struct regulator *reg; const char *name; bool always_on; bool enabled; int max_uA; }; union ufs_crypto_cap_entry { __le32 reg_val; struct { u8 algorithm_id; u8 sdus_mask; u8 key_size; u8 reserved; }; }; struct cq_entry; struct ufs_hw_queue { void *mcq_sq_head; void *mcq_sq_tail; void *mcq_cq_head; void *mcq_cq_tail; struct utp_transfer_req_desc *sqe_base_addr; dma_addr_t sqe_dma_addr; struct cq_entry *cqe_base_addr; dma_addr_t cqe_dma_addr; u32 max_entries; u32 id; u32 sq_tail_slot; spinlock_t sq_lock; u32 cq_tail_slot; u32 cq_head_slot; spinlock_t cq_lock; struct mutex sq_mutex; }; struct cq_entry { __le64 command_desc_base_addr; __le16 response_upiu_length; __le16 response_upiu_offset; __le16 prd_table_length; __le16 prd_table_offset; __le32 status; __le32 reserved[3]; }; struct trace_event_data_offsets_ufshcd_clk_gating { u32 dev_name; }; struct trace_event_data_offsets_ufshcd_template { u32 dev_name; }; struct trace_event_data_offsets_ufshcd_command { u32 dev_name; }; struct trace_event_data_offsets_ufshcd_uic_command { u32 dev_name; }; struct trace_event_data_offsets_ufshcd_upiu { u32 dev_name; }; struct trace_event_data_offsets_ufshcd_exception_event { u32 dev_name; }; struct trace_event_data_offsets_ufshcd_clk_scaling { u32 dev_name; u32 state; u32 clk; }; struct trace_event_data_offsets_ufshcd_auto_bkops_state { u32 dev_name; u32 state; }; struct trace_event_data_offsets_ufshcd_profiling_template { u32 dev_name; u32 profile_info; }; struct uc_string_id { u8 len; u8 type; wchar_t uc[0]; }; struct ufs_arpmb_meta { __be16 req_resp_type; __u8 nonce[16]; __be32 write_counter; __be16 addr_lun; __be16 block_count; __be16 result; } __attribute__((packed)); struct ufs_ehs { __u8 length; __u8 ehs_type; __be16 ehssub_type; struct ufs_arpmb_meta meta; __u8 mac_key[32]; }; enum interconnect_desc_param { INTERCONNECT_DESC_PARAM_LEN = 0, INTERCONNECT_DESC_PARAM_TYPE = 1, INTERCONNECT_DESC_PARAM_UNIPRO_VER = 2, INTERCONNECT_DESC_PARAM_MPHY_VER = 4, }; enum health_desc_param { HEALTH_DESC_PARAM_LEN = 0, HEALTH_DESC_PARAM_TYPE = 1, HEALTH_DESC_PARAM_EOL_INFO = 2, HEALTH_DESC_PARAM_LIFE_TIME_EST_A = 3, HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 4, }; enum ufshcd_mcq_opr { OPR_SQD = 0, OPR_SQIS = 1, OPR_CQD = 2, OPR_CQIS = 3, OPR_MAX = 4, }; enum { REG_CQIS = 0, REG_CQIE = 4, }; enum { REG_SQATTR = 0, REG_SQLBA = 4, REG_SQUBA = 8, REG_SQDAO = 12, REG_SQISAO = 16, REG_CQATTR = 32, REG_CQLBA = 36, REG_CQUBA = 40, REG_CQDAO = 44, REG_CQISAO = 48, }; enum { REG_SQHP = 0, REG_SQTP = 4, REG_SQRTC = 8, REG_SQCTI = 12, REG_SQRTS = 16, }; enum { REG_CQHP = 0, REG_CQTP = 4, }; enum { SQ_START = 0, SQ_STOP = 1, SQ_ICU = 2, }; enum { SQ_STS = 1, SQ_CUS = 2, }; struct ufs_debugfs_attr { const char *name; mode_t mode; const struct file_operations *fops; }; enum ufs_bsg_msg_code { UPIU_TRANSACTION_UIC_CMD = 31, UPIU_TRANSACTION_ARPMB_CMD = 32, }; enum ufs_rpmb_op_type { UFS_RPMB_WRITE_KEY = 1, UFS_RPMB_READ_CNT = 2, UFS_RPMB_WRITE = 3, UFS_RPMB_READ = 4, UFS_RPMB_READ_RESP = 5, UFS_RPMB_SEC_CONF_WRITE = 6, UFS_RPMB_SEC_CONF_READ = 7, UFS_RPMB_PURGE_ENABLE = 8, UFS_RPMB_PURGE_STATUS_READ = 9, }; struct ufs_bsg_request { __u32 msgcode; struct utp_upiu_req upiu_req; }; struct ufs_rpmb_request { struct ufs_bsg_request bsg_request; struct ufs_ehs ehs_req; }; struct ufs_bsg_reply { int result; __u32 reply_payload_rcv_len; struct utp_upiu_req upiu_rsp; }; struct ufs_rpmb_reply { struct ufs_bsg_reply bsg_reply; struct ufs_ehs ehs_rsp; }; enum ufs_crypto_alg { UFS_CRYPTO_ALG_AES_XTS = 0, UFS_CRYPTO_ALG_BITLOCKER_AES_CBC = 1, UFS_CRYPTO_ALG_AES_ECB = 2, UFS_CRYPTO_ALG_ESSIV_AES_CBC = 3, }; enum ufs_crypto_key_size { UFS_CRYPTO_KEY_SIZE_INVALID = 0, UFS_CRYPTO_KEY_SIZE_128 = 1, UFS_CRYPTO_KEY_SIZE_192 = 2, UFS_CRYPTO_KEY_SIZE_256 = 3, UFS_CRYPTO_KEY_SIZE_512 = 4, }; struct ufs_crypto_alg_entry { enum ufs_crypto_alg ufs_alg; enum ufs_crypto_key_size ufs_key_size; }; struct ufshcd_dme_attr_val { u32 attr_sel; u32 mib_val; u8 peer; }; enum clk_div_values { DWC_UFS_REG_HCLKDIV_DIV_62_5 = 62, DWC_UFS_REG_HCLKDIV_DIV_125 = 125, DWC_UFS_REG_HCLKDIV_DIV_200 = 200, }; enum dwc_specific_registers { DWC_UFS_REG_HCLKDIV = 252, }; enum link_status { UFSHCD_LINK_IS_DOWN = 1, UFSHCD_LINK_IS_UP = 2, }; enum intel_ufs_dsm_func_id { INTEL_DSM_FNS = 0, INTEL_DSM_RESET = 1, }; struct ufs_host { void (*late_init)(struct ufs_hba *); }; struct intel_host { struct ufs_host ufs_host; u32 dsm_fns; u32 active_ltr; u32 idle_ltr; struct dentry *debugfs_root; struct gpio_desc *reset_gpio; }; struct ufs_dev_params { u32 pwm_rx_gear; u32 pwm_tx_gear; u32 hs_rx_gear; u32 hs_tx_gear; u32 rx_lanes; u32 tx_lanes; u32 rx_pwr_pwm; u32 tx_pwr_pwm; u32 rx_pwr_hs; u32 tx_pwr_hs; u32 hs_rate; u32 desired_working_mode; }; enum { UFS_REG_OCPTHRTL = 192, UFS_REG_OOCPR = 196, UFS_REG_CDACFG = 208, UFS_REG_CDATX1 = 212, UFS_REG_CDATX2 = 216, UFS_REG_CDARX1 = 220, UFS_REG_CDARX2 = 224, UFS_REG_CDASTA = 228, UFS_REG_LBMCFG = 240, UFS_REG_LBMSTA = 244, UFS_REG_UFSMODE = 248, UFS_REG_HCLKDIV = 252, }; struct ufs_hisi_host { struct ufs_hba *hba; void *ufs_sys_ctrl; struct reset_control *rst; uint64_t caps; bool in_suspend; }; enum led_default_state { LEDS_DEFSTATE_OFF = 0, LEDS_DEFSTATE_ON = 1, LEDS_DEFSTATE_KEEP = 2, }; struct led_properties { u32 color; bool color_present; const char *function; u32 func_enum; bool func_enum_present; const char *label; }; struct led_lookup_data { struct list_head list; const char *provider; const char *dev_id; const char *con_id; }; struct led_flash_setting { u32 min; u32 max; u32 step; u32 val; }; struct led_flash_ops; struct led_classdev_flash { struct led_classdev led_cdev; const struct led_flash_ops *ops; struct led_flash_setting brightness; struct led_flash_setting timeout; const struct attribute_group *sysfs_groups[5]; }; struct led_flash_ops { int (*flash_brightness_set)(struct led_classdev_flash *, u32); int (*flash_brightness_get)(struct led_classdev_flash *, u32 *); int (*strobe_set)(struct led_classdev_flash *, bool); int (*strobe_get)(struct led_classdev_flash *, bool *); int (*timeout_set)(struct led_classdev_flash *, u32); int (*fault_get)(struct led_classdev_flash *, u32 *); }; struct mc_subled; struct led_classdev_mc { struct led_classdev led_cdev; unsigned int num_colors; struct mc_subled *subled_info; }; struct mc_subled { unsigned int color_index; unsigned int brightness; unsigned int intensity; unsigned int channel; }; struct transient_trig_data { int activate; int state; int restore_state; unsigned long duration; struct timer_list timer; struct led_classdev *led_cdev; }; struct scpi_chan; struct scpi_drvinfo { u32 protocol_version; u32 firmware_version; bool is_legacy; int num_chans; int *commands; unsigned long cmd_priority[1]; atomic_t next_chan; struct scpi_ops *scpi_ops; struct scpi_chan *channels; struct scpi_dvfs_info *dvfs[8]; }; struct scpi_xfer; struct scpi_chan { struct mbox_client cl; struct mbox_chan *chan; void *tx_payload; void *rx_payload; struct list_head rx_pending; struct list_head xfers_list; struct scpi_xfer *xfers; spinlock_t rx_lock; struct mutex xfers_lock; u8 token; }; struct scpi_xfer { u32 slot; u32 cmd; u32 status; const void *tx_buf; void *rx_buf; unsigned int tx_len; unsigned int rx_len; struct list_head node; struct completion done; }; enum scpi_drv_cmds { CMD_SCPI_CAPABILITIES = 0, CMD_GET_CLOCK_INFO = 1, CMD_GET_CLOCK_VALUE = 2, CMD_SET_CLOCK_VALUE = 3, CMD_GET_DVFS = 4, CMD_SET_DVFS = 5, CMD_GET_DVFS_INFO = 6, CMD_SENSOR_CAPABILITIES = 7, CMD_SENSOR_INFO = 8, CMD_SENSOR_VALUE = 9, CMD_SET_DEVICE_PWR_STATE = 10, CMD_GET_DEVICE_PWR_STATE = 11, CMD_MAX_COUNT = 12, }; enum scpi_error_codes { SCPI_SUCCESS = 0, SCPI_ERR_PARAM = 1, SCPI_ERR_ALIGN = 2, SCPI_ERR_SIZE = 3, SCPI_ERR_HANDLER = 4, SCPI_ERR_ACCESS = 5, SCPI_ERR_RANGE = 6, SCPI_ERR_TIMEOUT = 7, SCPI_ERR_NOMEM = 8, SCPI_ERR_PWRSTATE = 9, SCPI_ERR_SUPPORT = 10, SCPI_ERR_DEVICE = 11, SCPI_ERR_BUSY = 12, SCPI_ERR_MAX = 13, }; struct scpi_shared_mem { __le32 command; __le32 status; u8 payload[0]; }; struct legacy_clk_set_value { __le32 rate; __le16 id; __le16 reserved; }; struct scp_capabilities { __le32 protocol_version; __le32 event_version; __le32 platform_version; __le32 commands[4]; }; struct legacy_scpi_shared_mem { __le32 status; u8 payload[0]; }; struct clk_get_info { __le16 id; __le16 flags; __le32 min_rate; __le32 max_rate; u8 name[20]; }; struct clk_set_value { __le16 id; __le16 reserved; __le32 rate; }; struct dvfs_set { u8 domain; u8 index; }; struct dvfs_info { u8 domain; u8 opp_count; __le16 latency; struct { __le32 freq; __le32 m_volt; } opps[16]; }; struct _scpi_sensor_info { __le16 sensor_id; u8 class; u8 trigger_type; char name[20]; }; struct dev_pstate_set { __le16 dev_id; u8 pstate; } __attribute__((packed)); struct scmi_requested_dev { const struct scmi_device_id *id_table; struct list_head node; }; struct scmi_xfer; struct scmi_xfer_ops { int (*version_get)(const struct scmi_protocol_handle *, u32 *); int (*xfer_get_init)(const struct scmi_protocol_handle *, u8, size_t, size_t, struct scmi_xfer **); void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *, struct scmi_xfer *); int (*do_xfer)(const struct scmi_protocol_handle *, struct scmi_xfer *); int (*do_xfer_with_response)(const struct scmi_protocol_handle *, struct scmi_xfer *); void (*xfer_put)(const struct scmi_protocol_handle *, struct scmi_xfer *); }; struct scmi_msg_hdr { u8 id; u8 protocol_id; u8 type; u16 seq; u32 status; bool poll_completion; }; struct scmi_msg { void *buf; size_t len; }; struct scmi_xfer { int transfer_id; struct scmi_msg_hdr hdr; struct scmi_msg tx; struct scmi_msg rx; struct completion done; struct completion *async_done; bool pending; struct hlist_node node; refcount_t users; atomic_t busy; int state; int flags; spinlock_t lock; void *priv; }; struct scmi_iterator_ops; struct scmi_fc_db_info; struct scmi_proto_helpers_ops { int (*extended_name_get)(const struct scmi_protocol_handle *, u8, u32, u32 *, char *, size_t); void * (*iter_response_init)(const struct scmi_protocol_handle *, struct scmi_iterator_ops *, unsigned int, u8, size_t, void *); int (*iter_response_run)(void *); void (*fastchannel_init)(const struct scmi_protocol_handle *, u8, u32, u32, u32, void **, struct scmi_fc_db_info **); void (*fastchannel_db_ring)(struct scmi_fc_db_info *); int (*get_max_msg_size)(const struct scmi_protocol_handle *); }; struct scmi_iterator_state; struct scmi_iterator_ops { void (*prepare_message)(void *, unsigned int, const void *); int (*update_state)(struct scmi_iterator_state *, const void *, void *); int (*process_response)(const struct scmi_protocol_handle *, const void *, struct scmi_iterator_state *, void *); }; struct scmi_iterator_state { unsigned int desc_index; unsigned int num_returned; unsigned int num_remaining; unsigned int max_resources; unsigned int loop_idx; size_t rx_len; void *priv; }; struct scmi_fc_db_info { int width; u64 set; u64 mask; void *addr; }; typedef void (*btf_trace_scmi_fc_call)(void *, u8, u8, u32, u32, u32); typedef void (*btf_trace_scmi_xfer_begin)(void *, int, u8, u8, u16, bool); typedef void (*btf_trace_scmi_xfer_response_wait)(void *, int, u8, u8, u16, u32, bool); typedef void (*btf_trace_scmi_xfer_end)(void *, int, u8, u8, u16, int); typedef void (*btf_trace_scmi_rx_done)(void *, int, u8, u8, u16, u8); typedef void (*btf_trace_scmi_msg_dump)(void *, int, u8, u8, u8, unsigned char *, u16, int, void *, size_t); enum scmi_error_codes { SCMI_SUCCESS = 0, SCMI_ERR_SUPPORT = -1, SCMI_ERR_PARAMS = -2, SCMI_ERR_ACCESS = -3, SCMI_ERR_ENTRY = -4, SCMI_ERR_RANGE = -5, SCMI_ERR_BUSY = -6, SCMI_ERR_COMMS = -7, SCMI_ERR_GENERIC = -8, SCMI_ERR_HARDWARE = -9, SCMI_ERR_PROTOCOL = -10, }; enum scmi_common_cmd { PROTOCOL_VERSION = 0, PROTOCOL_ATTRIBUTES = 1, PROTOCOL_MESSAGE_ATTRIBUTES = 2, }; struct scmi_xfers_info { unsigned long *xfer_alloc_table; spinlock_t xfer_lock; int max_msg; struct hlist_head free_xfers; struct hlist_head pending_xfers[512]; }; struct scmi_desc; struct scmi_debug_info; struct scmi_info { int id; struct device *dev; const struct scmi_desc *desc; struct scmi_revision_info version; struct scmi_handle handle; struct scmi_xfers_info tx_minfo; struct scmi_xfers_info rx_minfo; struct idr tx_idr; struct idr rx_idr; struct idr protocols; struct mutex protocols_mtx; u8 *protocols_imp; struct idr active_protocols; unsigned int atomic_threshold; void *notify_priv; struct list_head node; int users; struct notifier_block bus_nb; struct notifier_block dev_req_nb; struct mutex devreq_mtx; struct scmi_debug_info *dbg; void *raw; }; struct scmi_transport_ops; struct scmi_desc { int (*transport_init)(); void (*transport_exit)(); const struct scmi_transport_ops *ops; int max_rx_timeout_ms; int max_msg; int max_msg_size; const bool force_polling; const bool sync_cmds_completed_on_ret; const bool atomic_enabled; }; struct scmi_chan_info; struct scmi_transport_ops { int (*link_supplier)(struct device *); bool (*chan_available)(struct device_node *, int); int (*chan_setup)(struct scmi_chan_info *, struct device *, bool); int (*chan_free)(int, void *, void *); unsigned int (*get_max_msg)(struct scmi_chan_info *); int (*send_message)(struct scmi_chan_info *, struct scmi_xfer *); void (*mark_txdone)(struct scmi_chan_info *, int, struct scmi_xfer *); void (*fetch_response)(struct scmi_chan_info *, struct scmi_xfer *); void (*fetch_notification)(struct scmi_chan_info *, size_t, struct scmi_xfer *); void (*clear_channel)(struct scmi_chan_info *); bool (*poll_done)(struct scmi_chan_info *, struct scmi_xfer *); }; struct scmi_chan_info { int id; struct device *dev; unsigned int rx_timeout_ms; struct scmi_handle *handle; bool no_completion_irq; void *transport_info; }; struct scmi_debug_info { struct dentry *top_dentry; const char *name; const char *type; bool is_atomic; }; struct scmi_protocol; struct scmi_protocol_instance { const struct scmi_handle *handle; const struct scmi_protocol *proto; void *gid; refcount_t users; void *priv; unsigned int version; struct scmi_protocol_handle ph; }; typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *); struct scmi_protocol_events; struct scmi_protocol { const u8 id; struct module *owner; const scmi_prot_init_ph_fn_t instance_init; const scmi_prot_init_ph_fn_t instance_deinit; const void *ops; const struct scmi_protocol_events *events; unsigned int supported_version; }; struct scmi_event_ops; struct scmi_event; struct scmi_protocol_events { size_t queue_sz; const struct scmi_event_ops *ops; const struct scmi_event *evts; unsigned int num_events; unsigned int num_sources; }; struct scmi_event_ops { int (*get_num_sources)(const struct scmi_protocol_handle *); int (*set_notify_enabled)(const struct scmi_protocol_handle *, u8, u32, bool); void * (*fill_custom_report)(const struct scmi_protocol_handle *, u8, ktime_t, const void *, size_t, void *, u32 *); }; struct scmi_event { u8 id; size_t max_payld_sz; size_t max_report_sz; }; struct trace_event_raw_scmi_fc_call { struct trace_entry ent; u8 protocol_id; u8 msg_id; u32 res_id; u32 val1; u32 val2; char __data[0]; }; struct trace_event_raw_scmi_xfer_begin { struct trace_entry ent; int transfer_id; u8 msg_id; u8 protocol_id; u16 seq; bool poll; char __data[0]; }; struct trace_event_raw_scmi_xfer_response_wait { struct trace_entry ent; int transfer_id; u8 msg_id; u8 protocol_id; u16 seq; u32 timeout; bool poll; char __data[0]; }; struct trace_event_raw_scmi_xfer_end { struct trace_entry ent; int transfer_id; u8 msg_id; u8 protocol_id; u16 seq; int status; char __data[0]; }; struct trace_event_raw_scmi_rx_done { struct trace_entry ent; int transfer_id; u8 msg_id; u8 protocol_id; u16 seq; u8 msg_type; char __data[0]; }; struct trace_event_raw_scmi_msg_dump { struct trace_entry ent; int id; u8 channel_id; u8 protocol_id; u8 msg_id; char tag[5]; u16 seq; int status; size_t len; u32 __data_loc_cmd; char __data[0]; }; struct trace_event_data_offsets_scmi_msg_dump { u32 cmd; }; struct scmi_protocol_devres { const struct scmi_handle *handle; u8 protocol_id; }; struct trace_event_data_offsets_scmi_fc_call {}; struct trace_event_data_offsets_scmi_xfer_begin {}; struct trace_event_data_offsets_scmi_xfer_response_wait {}; struct trace_event_data_offsets_scmi_xfer_end {}; struct trace_event_data_offsets_scmi_rx_done {}; struct scmi_msg_resp_domain_name_get { __le32 flags; u8 name[64]; }; struct scmi_iterator { void *msg; void *resp; struct scmi_xfer *t; const struct scmi_protocol_handle *ph; struct scmi_iterator_ops *ops; struct scmi_iterator_state state; void *priv; }; struct scmi_msg_resp_desc_fc { __le32 attr; __le32 rate_limit; __le32 chan_addr_low; __le32 chan_addr_high; __le32 chan_size; __le32 db_addr_low; __le32 db_addr_high; __le32 db_set_lmask; __le32 db_set_hmask; __le32 db_preserve_lmask; __le32 db_preserve_hmask; }; struct scmi_msg_get_fc_info { __le32 domain; __le32 message_id; }; struct events_queue { size_t sz; struct kfifo kfifo; struct work_struct notify_work; struct workqueue_struct *wq; }; struct scmi_notify_instance; struct scmi_event_header; struct scmi_registered_event; struct scmi_registered_events_desc { u8 id; const struct scmi_event_ops *ops; struct events_queue equeue; struct scmi_notify_instance *ni; struct scmi_event_header *eh; size_t eh_sz; void *in_flight; int num_events; struct scmi_registered_event **registered_events; struct mutex registered_mtx; const struct scmi_protocol_handle *ph; struct hlist_head registered_events_handlers[64]; }; struct scmi_notify_instance { void *gid; struct scmi_handle *handle; struct work_struct init_work; struct workqueue_struct *notify_wq; struct mutex pending_mtx; struct scmi_registered_events_desc **registered_protocols; struct hlist_head pending_events_handlers[16]; }; struct scmi_event_header { ktime_t timestamp; size_t payld_sz; unsigned char evt_id; unsigned char payld[0]; }; struct scmi_registered_event { struct scmi_registered_events_desc *proto; const struct scmi_event *evt; void *report; u32 num_sources; refcount_t *sources; struct mutex sources_mtx; }; struct scmi_event_handler { u32 key; refcount_t users; struct scmi_registered_event *r_evt; struct blocking_notifier_head chain; struct hlist_node hash; bool enabled; }; struct scmi_notifier_devres { const struct scmi_handle *handle; u8 proto_id; u8 evt_id; u32 __src_id; u32 *src_id; struct notifier_block *nb; }; enum scmi_base_protocol_cmd { BASE_DISCOVER_VENDOR = 3, BASE_DISCOVER_SUB_VENDOR = 4, BASE_DISCOVER_IMPLEMENT_VERSION = 5, BASE_DISCOVER_LIST_PROTOCOLS = 6, BASE_DISCOVER_AGENT = 7, BASE_NOTIFY_ERRORS = 8, BASE_SET_DEVICE_PERMISSIONS = 9, BASE_SET_PROTOCOL_PERMISSIONS = 10, BASE_RESET_AGENT_CONFIGURATION = 11, }; enum scmi_notification_events { SCMI_EVENT_POWER_STATE_CHANGED = 0, SCMI_EVENT_CLOCK_RATE_CHANGED = 0, SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED = 1, SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0, SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 1, SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0, SCMI_EVENT_SENSOR_UPDATE = 1, SCMI_EVENT_RESET_ISSUED = 0, SCMI_EVENT_BASE_ERROR_EVENT = 0, SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER = 0, SCMI_EVENT_POWERCAP_CAP_CHANGED = 0, SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED = 1, }; struct scmi_msg_resp_base_attributes { u8 num_protocols; u8 num_agents; __le16 reserved; }; struct scmi_msg_resp_base_discover_agent { __le32 agent_id; u8 name[16]; }; struct scmi_msg_base_error_notify { __le32 event_control; }; struct scmi_base_error_notify_payld { __le32 agent_id; __le32 error_status; __le64 msg_reports[1024]; }; struct scmi_base_error_report { ktime_t timestamp; unsigned int agent_id; bool fatal; unsigned int cmd_count; unsigned long long reports[0]; }; struct scmi_clock_info; struct scmi_clk_proto_ops { int (*count_get)(const struct scmi_protocol_handle *); const struct scmi_clock_info * (*info_get)(const struct scmi_protocol_handle *, u32); int (*rate_get)(const struct scmi_protocol_handle *, u32, u64 *); int (*rate_set)(const struct scmi_protocol_handle *, u32, u64); int (*enable)(const struct scmi_protocol_handle *, u32, bool); int (*disable)(const struct scmi_protocol_handle *, u32, bool); int (*state_get)(const struct scmi_protocol_handle *, u32, bool *, bool); int (*config_oem_get)(const struct scmi_protocol_handle *, u32, u8, u32 *, u32 *, bool); int (*config_oem_set)(const struct scmi_protocol_handle *, u32, u8, u32, bool); int (*parent_get)(const struct scmi_protocol_handle *, u32, u32 *); int (*parent_set)(const struct scmi_protocol_handle *, u32, u32); u64 android_kabi_reserved1; }; struct scmi_clock_info { char name[64]; unsigned int enable_latency; bool rate_discrete; bool rate_changed_notifications; bool rate_change_requested_notifications; bool state_ctrl_forbidden; bool rate_ctrl_forbidden; bool parent_ctrl_forbidden; union { struct { int num_rates; u64 rates[16]; } list; struct { u64 min_rate; u64 max_rate; u64 step_size; } range; }; int num_parents; u32 *parents; }; enum scmi_clock_protocol_cmd { CLOCK_ATTRIBUTES = 3, CLOCK_DESCRIBE_RATES = 4, CLOCK_RATE_SET = 5, CLOCK_RATE_GET = 6, CLOCK_CONFIG_SET = 7, CLOCK_NAME_GET = 8, CLOCK_RATE_NOTIFY = 9, CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 10, CLOCK_CONFIG_GET = 11, CLOCK_POSSIBLE_PARENTS_GET = 12, CLOCK_PARENT_SET = 13, CLOCK_PARENT_GET = 14, CLOCK_GET_PERMISSIONS = 15, }; enum clk_state { CLK_STATE_DISABLE = 0, CLK_STATE_ENABLE = 1, CLK_STATE_RESERVED = 2, CLK_STATE_UNCHANGED = 3, }; struct scmi_clk_ipriv { struct device *dev; u32 clk_id; struct scmi_clock_info *clk; }; struct clock_info { u32 version; int num_clocks; int max_async_req; atomic_t cur_async_req; struct scmi_clock_info *clk; int (*clock_config_set)(const struct scmi_protocol_handle *, u32, enum clk_state, u8, u32, bool); int (*clock_config_get)(const struct scmi_protocol_handle *, u32, u8, u32 *, bool *, u32 *, bool); }; struct scmi_msg_resp_clock_protocol_attributes { __le16 num_clocks; u8 max_async_req; u8 reserved; }; struct scmi_msg_resp_clock_attributes { __le32 attributes; u8 name[16]; __le32 clock_enable_latency; }; struct scmi_msg_clock_rate_notify { __le32 clk_id; __le32 notify_enable; }; struct scmi_msg_clock_config_set_v2 { __le32 id; __le32 attributes; __le32 oem_config_val; }; struct scmi_msg_clock_config_get { __le32 id; __le32 flags; }; struct scmi_msg_resp_clock_config_get { __le32 attributes; __le32 config; __le32 oem_config_val; }; struct scmi_msg_clock_config_set { __le32 id; __le32 attributes; }; struct scmi_msg_clock_possible_parents { __le32 id; __le32 skip_parents; }; struct scmi_msg_resp_clock_possible_parents { __le32 num_parent_flags; __le32 possible_parents[0]; }; struct scmi_msg_clock_describe_rates { __le32 id; __le32 rate_index; }; struct scmi_msg_resp_clock_describe_rates { __le32 num_rates_flags; struct { __le32 value_low; __le32 value_high; } rate[0]; }; struct scmi_clock_set_rate { __le32 flags; __le32 id; __le32 value_low; __le32 value_high; }; struct scmi_msg_resp_set_rate_complete { __le32 id; __le32 rate_low; __le32 rate_high; }; struct scmi_msg_clock_set_parent { __le32 id; __le32 parent_id; }; struct scmi_clock_rate_notify_payld { __le32 agent_id; __le32 clock_id; __le32 rate_low; __le32 rate_high; }; struct scmi_clock_rate_notif_report { ktime_t timestamp; unsigned int agent_id; unsigned int clock_id; unsigned long long rate; }; enum scmi_performance_protocol_cmd { PERF_DOMAIN_ATTRIBUTES = 3, PERF_DESCRIBE_LEVELS = 4, PERF_LIMITS_SET = 5, PERF_LIMITS_GET = 6, PERF_LEVEL_SET = 7, PERF_LEVEL_GET = 8, PERF_NOTIFY_LIMITS = 9, PERF_NOTIFY_LEVEL = 10, PERF_DESCRIBE_FASTCHANNEL = 11, PERF_DOMAIN_NAME_GET = 12, }; enum { PERF_FC_LEVEL = 0, PERF_FC_LIMIT = 1, PERF_FC_MAX = 2, }; struct scmi_opp { u32 perf; u32 power; u32 trans_latency_us; u32 indicative_freq; u32 level_index; struct hlist_node hash; }; struct perf_dom_info; struct scmi_perf_info { u32 version; u16 num_domains; enum scmi_power_scale power_scale; u64 stats_addr; u32 stats_size; struct perf_dom_info *dom_info; }; struct scmi_fc_info; struct perf_dom_info { u32 id; bool set_limits; bool perf_limit_notify; bool perf_level_notify; bool perf_fastchannels; bool level_indexing_mode; u32 opp_count; u32 sustained_freq_khz; u32 sustained_perf_level; unsigned long mult_factor; struct scmi_perf_domain_info info; struct scmi_opp opp[32]; struct scmi_fc_info *fc_info; struct xarray opps_by_idx; struct xarray opps_by_lvl; struct hlist_head opps_by_freq[32]; }; struct scmi_fc_info { void *set_addr; void *get_addr; struct scmi_fc_db_info *set_db; }; struct scmi_msg_resp_perf_attributes { __le16 num_domains; __le16 flags; __le32 stats_addr_low; __le32 stats_addr_high; __le32 stats_size; }; struct scmi_msg_resp_perf_domain_attributes { __le32 flags; __le32 rate_limit_us; __le32 sustained_freq_khz; __le32 sustained_perf_level; u8 name[16]; }; struct scmi_perf_ipriv { u32 version; struct perf_dom_info *perf_dom; }; struct scmi_msg_resp_perf_describe_levels { __le16 num_returned; __le16 num_remaining; struct { __le32 perf_val; __le32 power; __le16 transition_latency_us; __le16 reserved; } opp[0]; }; struct scmi_msg_resp_perf_describe_levels_v4 { __le16 num_returned; __le16 num_remaining; struct { __le32 perf_val; __le32 power; __le16 transition_latency_us; __le16 reserved; __le32 indicative_freq; __le32 level_index; } opp[0]; }; struct scmi_perf_set_limits { __le32 domain; __le32 max_level; __le32 min_level; }; struct scmi_perf_get_limits { __le32 max_level; __le32 min_level; }; struct scmi_perf_set_level { __le32 domain; __le32 level; }; struct scmi_perf_notify_level_or_limits { __le32 domain; __le32 notify_enable; }; struct scmi_msg_perf_describe_levels { __le32 domain; __le32 level_index; }; struct scmi_perf_limits_notify_payld { __le32 agent_id; __le32 domain_id; __le32 range_max; __le32 range_min; }; struct scmi_perf_limits_report { ktime_t timestamp; unsigned int agent_id; unsigned int domain_id; unsigned int range_max; unsigned int range_min; }; struct scmi_perf_level_notify_payld { __le32 agent_id; __le32 domain_id; __le32 performance_level; }; struct scmi_perf_level_report { ktime_t timestamp; unsigned int agent_id; unsigned int domain_id; unsigned int performance_level; }; struct scmi_power_proto_ops { int (*num_domains_get)(const struct scmi_protocol_handle *); const char * (*name_get)(const struct scmi_protocol_handle *, u32); int (*state_set)(const struct scmi_protocol_handle *, u32, u32); int (*state_get)(const struct scmi_protocol_handle *, u32, u32 *); u64 android_kabi_reserved1; }; enum scmi_power_protocol_cmd { POWER_DOMAIN_ATTRIBUTES = 3, POWER_STATE_SET = 4, POWER_STATE_GET = 5, POWER_STATE_NOTIFY = 6, POWER_DOMAIN_NAME_GET = 8, }; struct power_dom_info; struct scmi_power_info { u32 version; int num_domains; u64 stats_addr; u32 stats_size; struct power_dom_info *dom_info; }; struct power_dom_info { bool state_set_sync; bool state_set_async; bool state_set_notify; char name[64]; }; struct scmi_msg_resp_power_attributes { __le16 num_domains; __le16 reserved; __le32 stats_addr_low; __le32 stats_addr_high; __le32 stats_size; }; struct scmi_msg_resp_power_domain_attributes { __le32 flags; u8 name[16]; }; struct scmi_power_state_notify { __le32 domain; __le32 notify_enable; }; struct scmi_power_set_state { __le32 flags; __le32 domain; __le32 state; }; struct scmi_power_state_notify_payld { __le32 agent_id; __le32 domain_id; __le32 power_state; }; struct scmi_power_state_changed_report { ktime_t timestamp; unsigned int agent_id; unsigned int domain_id; unsigned int power_state; }; enum scmi_reset_protocol_cmd { RESET_DOMAIN_ATTRIBUTES = 3, RESET = 4, RESET_NOTIFY = 5, RESET_DOMAIN_NAME_GET = 6, }; struct reset_dom_info; struct scmi_reset_info { u32 version; int num_domains; struct reset_dom_info *dom_info; }; struct reset_dom_info { bool async_reset; bool reset_notify; u32 latency_us; char name[64]; }; struct scmi_msg_resp_reset_domain_attributes { __le32 attributes; __le32 latency; u8 name[16]; }; struct scmi_msg_reset_domain_reset { __le32 domain_id; __le32 flags; __le32 reset_state; }; struct scmi_msg_reset_notify { __le32 id; __le32 event_control; }; struct scmi_reset_issued_notify_payld { __le32 agent_id; __le32 domain_id; __le32 reset_state; }; struct scmi_reset_issued_report { ktime_t timestamp; unsigned int agent_id; unsigned int domain_id; unsigned int reset_state; }; struct scmi_sensor_info; struct scmi_sensor_reading; struct scmi_sensor_proto_ops { int (*count_get)(const struct scmi_protocol_handle *); const struct scmi_sensor_info * (*info_get)(const struct scmi_protocol_handle *, u32); int (*trip_point_config)(const struct scmi_protocol_handle *, u32, u8, u64); int (*reading_get)(const struct scmi_protocol_handle *, u32, u64 *); int (*reading_get_timestamped)(const struct scmi_protocol_handle *, u32, u8, struct scmi_sensor_reading *); int (*config_get)(const struct scmi_protocol_handle *, u32, u32 *); int (*config_set)(const struct scmi_protocol_handle *, u32, u32); u64 android_kabi_reserved1; }; struct scmi_sensor_intervals_info { bool segmented; unsigned int count; unsigned int *desc; unsigned int prealloc_pool[16]; }; struct scmi_range_attrs { long long min_range; long long max_range; }; struct scmi_sensor_axis_info; struct scmi_sensor_info { unsigned int id; unsigned int type; int scale; unsigned int num_trip_points; bool async; bool update; bool timestamped; int tstamp_scale; unsigned int num_axis; struct scmi_sensor_axis_info *axis; struct scmi_sensor_intervals_info intervals; unsigned int sensor_config; char name[64]; bool extended_scalar_attrs; unsigned int sensor_power; unsigned int resolution; int exponent; struct scmi_range_attrs scalar_attrs; u64 android_kabi_reserved1; }; struct scmi_sensor_axis_info { unsigned int id; unsigned int type; int scale; char name[64]; bool extended_attrs; unsigned int resolution; int exponent; struct scmi_range_attrs attrs; }; struct scmi_sensor_reading { long long value; unsigned long long timestamp; }; enum scmi_sensor_protocol_cmd { SENSOR_DESCRIPTION_GET = 3, SENSOR_TRIP_POINT_NOTIFY = 4, SENSOR_TRIP_POINT_CONFIG = 5, SENSOR_READING_GET = 6, SENSOR_AXIS_DESCRIPTION_GET = 7, SENSOR_LIST_UPDATE_INTERVALS = 8, SENSOR_CONFIG_GET = 9, SENSOR_CONFIG_SET = 10, SENSOR_CONTINUOUS_UPDATE_NOTIFY = 11, SENSOR_NAME_GET = 12, SENSOR_AXIS_NAME_GET = 13, }; struct scmi_sens_ipriv { void *priv; struct device *dev; }; struct sensors_info { u32 version; int num_sensors; int max_requests; u64 reg_addr; u32 reg_size; struct scmi_sensor_info *sensors; }; struct scmi_msg_resp_sensor_attributes { __le16 num_sensors; u8 max_requests; u8 reserved; __le32 reg_addr_low; __le32 reg_addr_high; __le32 reg_size; }; struct scmi_apriv { bool any_axes_support_extended_names; struct scmi_sensor_info *s; }; struct scmi_msg_resp_attrs { __le32 min_range_low; __le32 min_range_high; __le32 max_range_low; __le32 max_range_high; }; struct scmi_sensor_reading_resp { __le32 sensor_value_low; __le32 sensor_value_high; __le32 timestamp_low; __le32 timestamp_high; }; struct scmi_msg_sensor_request_notify { __le32 id; __le32 event_control; }; struct scmi_msg_sensor_description { __le32 desc_index; }; struct scmi_sensor_descriptor { __le32 id; __le32 attributes_low; __le32 attributes_high; u8 name[16]; __le32 power; __le32 resolution; struct scmi_msg_resp_attrs scalar_attrs; }; struct scmi_msg_resp_sensor_description { __le16 num_returned; __le16 num_remaining; struct scmi_sensor_descriptor desc[0]; }; struct scmi_msg_sensor_list_update_intervals { __le32 id; __le32 index; }; struct scmi_msg_resp_sensor_list_update_intervals { __le32 num_intervals_flags; __le32 intervals[0]; }; struct scmi_msg_sensor_axis_description_get { __le32 id; __le32 axis_desc_index; }; struct scmi_axis_descriptor { __le32 id; __le32 attributes_low; __le32 attributes_high; u8 name[16]; __le32 resolution; struct scmi_msg_resp_attrs attrs; }; struct scmi_msg_resp_sensor_axis_description { __le32 num_axis_flags; struct scmi_axis_descriptor desc[0]; }; struct scmi_sensor_axis_name_descriptor { __le32 axis_id; u8 name[64]; }; struct scmi_msg_resp_sensor_axis_names_description { __le32 num_axis_flags; struct scmi_sensor_axis_name_descriptor desc[0]; }; struct scmi_msg_set_sensor_trip_point { __le32 id; __le32 event_control; __le32 value_low; __le32 value_high; }; struct scmi_msg_sensor_reading_get { __le32 id; __le32 flags; }; struct scmi_resp_sensor_reading_complete { __le32 id; __le32 readings_low; __le32 readings_high; }; struct scmi_resp_sensor_reading_complete_v3 { __le32 id; struct scmi_sensor_reading_resp readings[0]; }; struct scmi_msg_sensor_config_set { __le32 id; __le32 sensor_config; }; struct scmi_sensor_trip_notify_payld { __le32 agent_id; __le32 sensor_id; __le32 trip_point_desc; }; struct scmi_sensor_trip_point_report { ktime_t timestamp; unsigned int agent_id; unsigned int sensor_id; unsigned int trip_point_desc; }; struct scmi_sensor_update_notify_payld { __le32 agent_id; __le32 sensor_id; struct scmi_sensor_reading_resp readings[0]; }; struct scmi_sensor_update_report { ktime_t timestamp; unsigned int agent_id; unsigned int sensor_id; unsigned int readings_count; struct scmi_sensor_reading readings[0]; }; enum scmi_system_protocol_cmd { SYSTEM_POWER_STATE_NOTIFY = 5, }; enum scmi_system_events { SCMI_SYSTEM_SHUTDOWN = 0, SCMI_SYSTEM_COLDRESET = 1, SCMI_SYSTEM_WARMRESET = 2, SCMI_SYSTEM_POWERUP = 3, SCMI_SYSTEM_SUSPEND = 4, SCMI_SYSTEM_MAX = 5, }; struct scmi_system_power_state_notify { __le32 notify_enable; }; struct scmi_system_info { u32 version; bool graceful_timeout_supported; }; struct scmi_system_power_state_notifier_payld { __le32 agent_id; __le32 flags; __le32 system_state; __le32 timeout; }; struct scmi_system_power_state_notifier_report { ktime_t timestamp; unsigned int agent_id; unsigned int flags; unsigned int system_state; unsigned int timeout; }; enum scmi_voltage_level_mode { SCMI_VOLTAGE_LEVEL_SET_AUTO = 0, SCMI_VOLTAGE_LEVEL_SET_SYNC = 1, }; struct scmi_voltage_info; struct scmi_voltage_proto_ops { int (*num_domains_get)(const struct scmi_protocol_handle *); const struct scmi_voltage_info * (*info_get)(const struct scmi_protocol_handle *, u32); int (*config_set)(const struct scmi_protocol_handle *, u32, u32); int (*config_get)(const struct scmi_protocol_handle *, u32, u32 *); int (*level_set)(const struct scmi_protocol_handle *, u32, enum scmi_voltage_level_mode, s32); int (*level_get)(const struct scmi_protocol_handle *, u32, s32 *); }; struct scmi_voltage_info { unsigned int id; bool segmented; bool negative_volts_allowed; bool async_level_set; char name[64]; unsigned int num_levels; int *levels_uv; }; enum scmi_voltage_protocol_cmd { VOLTAGE_DOMAIN_ATTRIBUTES = 3, VOLTAGE_DESCRIBE_LEVELS = 4, VOLTAGE_CONFIG_SET = 5, VOLTAGE_CONFIG_GET = 6, VOLTAGE_LEVEL_SET = 7, VOLTAGE_LEVEL_GET = 8, VOLTAGE_DOMAIN_NAME_GET = 9, }; struct voltage_info { unsigned int version; unsigned int num_domains; struct scmi_voltage_info *domains; }; struct scmi_msg_resp_domain_attributes { __le32 attr; u8 name[16]; }; struct scmi_volt_ipriv { struct device *dev; struct scmi_voltage_info *v; }; struct scmi_msg_cmd_describe_levels { __le32 domain_id; __le32 level_index; }; struct scmi_msg_resp_describe_levels { __le32 flags; __le32 voltage[0]; }; struct scmi_msg_cmd_config_set { __le32 domain_id; __le32 config; }; struct scmi_msg_cmd_level_set { __le32 domain_id; __le32 flags; __le32 voltage_level; }; struct scmi_resp_voltage_level_set_complete { __le32 domain_id; __le32 voltage_level; }; struct scmi_powercap_info; struct scmi_powercap_proto_ops { int (*num_domains_get)(const struct scmi_protocol_handle *); const struct scmi_powercap_info * (*info_get)(const struct scmi_protocol_handle *, u32); int (*cap_get)(const struct scmi_protocol_handle *, u32, u32 *); int (*cap_set)(const struct scmi_protocol_handle *, u32, u32, bool); int (*cap_enable_set)(const struct scmi_protocol_handle *, u32, bool); int (*cap_enable_get)(const struct scmi_protocol_handle *, u32, bool *); int (*pai_get)(const struct scmi_protocol_handle *, u32, u32 *); int (*pai_set)(const struct scmi_protocol_handle *, u32, u32); int (*measurements_get)(const struct scmi_protocol_handle *, u32, u32 *, u32 *); int (*measurements_threshold_set)(const struct scmi_protocol_handle *, u32, u32, u32); int (*measurements_threshold_get)(const struct scmi_protocol_handle *, u32, u32 *, u32 *); }; struct scmi_powercap_info { unsigned int id; bool notify_powercap_cap_change; bool notify_powercap_measurement_change; bool async_powercap_cap_set; bool powercap_cap_config; bool powercap_monitoring; bool powercap_pai_config; bool powercap_scale_mw; bool powercap_scale_uw; bool fastchannels; char name[64]; unsigned int min_pai; unsigned int max_pai; unsigned int pai_step; unsigned int min_power_cap; unsigned int max_power_cap; unsigned int power_cap_step; unsigned int sustainable_power; unsigned int accuracy; unsigned int parent_id; struct scmi_fc_info *fc_info; }; enum scmi_powercap_protocol_cmd { POWERCAP_DOMAIN_ATTRIBUTES = 3, POWERCAP_CAP_GET = 4, POWERCAP_CAP_SET = 5, POWERCAP_PAI_GET = 6, POWERCAP_PAI_SET = 7, POWERCAP_DOMAIN_NAME_GET = 8, POWERCAP_MEASUREMENTS_GET = 9, POWERCAP_CAP_NOTIFY = 10, POWERCAP_MEASUREMENTS_NOTIFY = 11, POWERCAP_DESCRIBE_FASTCHANNEL = 12, }; enum { POWERCAP_FC_CAP = 0, POWERCAP_FC_PAI = 1, POWERCAP_FC_MAX = 2, }; struct scmi_powercap_state; struct powercap_info { u32 version; int num_domains; struct scmi_powercap_state *states; struct scmi_powercap_info *powercaps; }; struct scmi_powercap_state { bool enabled; u32 last_pcap; bool meas_notif_enabled; u64 thresholds; }; struct scmi_msg_resp_powercap_domain_attributes { __le32 attributes; u8 name[16]; __le32 min_pai; __le32 max_pai; __le32 pai_step; __le32 min_power_cap; __le32 max_power_cap; __le32 power_cap_step; __le32 sustainable_power; __le32 accuracy; __le32 parent_id; }; struct scmi_msg_powercap_set_cap_or_pai { __le32 domain; __le32 flags; __le32 value; }; struct scmi_msg_resp_powercap_cap_set_complete { __le32 domain; __le32 power_cap; }; struct scmi_msg_resp_powercap_meas_get { __le32 power; __le32 pai; }; struct scmi_msg_powercap_notify_cap { __le32 domain; __le32 notify_enable; }; struct scmi_msg_powercap_notify_thresh { __le32 domain; __le32 notify_enable; __le32 power_thresh_low; __le32 power_thresh_high; }; struct scmi_powercap_cap_changed_notify_payld { __le32 agent_id; __le32 domain_id; __le32 power_cap; __le32 pai; }; struct scmi_powercap_cap_changed_report { ktime_t timestamp; unsigned int agent_id; unsigned int domain_id; unsigned int power_cap; unsigned int pai; }; struct scmi_powercap_meas_changed_notify_payld { __le32 agent_id; __le32 domain_id; __le32 power; }; struct scmi_powercap_meas_changed_report { ktime_t timestamp; unsigned int agent_id; unsigned int domain_id; unsigned int power; }; enum scmi_pinctrl_selector_type { PIN_TYPE = 0, GROUP_TYPE = 1, FUNCTION_TYPE = 2, }; enum scmi_pinctrl_conf_type { SCMI_PIN_DEFAULT = 0, SCMI_PIN_BIAS_BUS_HOLD = 1, SCMI_PIN_BIAS_DISABLE = 2, SCMI_PIN_BIAS_HIGH_IMPEDANCE = 3, SCMI_PIN_BIAS_PULL_UP = 4, SCMI_PIN_BIAS_PULL_DEFAULT = 5, SCMI_PIN_BIAS_PULL_DOWN = 6, SCMI_PIN_DRIVE_OPEN_DRAIN = 7, SCMI_PIN_DRIVE_OPEN_SOURCE = 8, SCMI_PIN_DRIVE_PUSH_PULL = 9, SCMI_PIN_DRIVE_STRENGTH = 10, SCMI_PIN_INPUT_DEBOUNCE = 11, SCMI_PIN_INPUT_MODE = 12, SCMI_PIN_PULL_MODE = 13, SCMI_PIN_INPUT_VALUE = 14, SCMI_PIN_INPUT_SCHMITT = 15, SCMI_PIN_LOW_POWER_MODE = 16, SCMI_PIN_OUTPUT_MODE = 17, SCMI_PIN_OUTPUT_VALUE = 18, SCMI_PIN_POWER_SOURCE = 19, SCMI_PIN_SLEW_RATE = 20, SCMI_PIN_OEM_START = 192, SCMI_PIN_OEM_END = 255, }; struct scmi_pinctrl_proto_ops { int (*count_get)(const struct scmi_protocol_handle *, enum scmi_pinctrl_selector_type); int (*name_get)(const struct scmi_protocol_handle *, u32, enum scmi_pinctrl_selector_type, const char **); int (*group_pins_get)(const struct scmi_protocol_handle *, u32, const unsigned int **, unsigned int *); int (*function_groups_get)(const struct scmi_protocol_handle *, u32, unsigned int *, const unsigned int **); int (*mux_set)(const struct scmi_protocol_handle *, u32, u32); int (*settings_get_one)(const struct scmi_protocol_handle *, u32, enum scmi_pinctrl_selector_type, enum scmi_pinctrl_conf_type, u32 *); int (*settings_get_all)(const struct scmi_protocol_handle *, u32, enum scmi_pinctrl_selector_type, unsigned int *, enum scmi_pinctrl_conf_type *, u32 *); int (*settings_conf)(const struct scmi_protocol_handle *, u32, enum scmi_pinctrl_selector_type, unsigned int, enum scmi_pinctrl_conf_type *, u32 *); int (*pin_request)(const struct scmi_protocol_handle *, u32); int (*pin_free)(const struct scmi_protocol_handle *, u32); }; enum scmi_pinctrl_protocol_cmd { PINCTRL_ATTRIBUTES = 3, PINCTRL_LIST_ASSOCIATIONS = 4, PINCTRL_SETTINGS_GET = 5, PINCTRL_SETTINGS_CONFIGURE = 6, PINCTRL_REQUEST = 7, PINCTRL_RELEASE = 8, PINCTRL_NAME_GET = 9, PINCTRL_SET_PERMISSIONS = 10, }; struct scmi_group_info; struct scmi_function_info; struct scmi_pin_info; struct scmi_pinctrl_info { u32 version; int nr_groups; int nr_functions; int nr_pins; struct scmi_group_info *groups; struct scmi_function_info *functions; struct scmi_pin_info *pins; }; struct scmi_group_info { char name[64]; bool present; u32 *group_pins; u32 nr_pins; }; struct scmi_function_info { char name[64]; bool present; u32 *groups; u32 nr_groups; }; struct scmi_pin_info { char name[64]; bool present; }; struct scmi_msg_pinctrl_protocol_attributes { __le32 attributes_low; __le32 attributes_high; }; struct scmi_msg_settings_conf { __le32 identifier; __le32 function_id; __le32 attributes; __le32 configs[0]; }; struct scmi_settings_get_ipriv { u32 selector; enum scmi_pinctrl_selector_type type; bool get_all; unsigned int *nr_configs; enum scmi_pinctrl_conf_type *config_types; u32 *config_values; }; struct scmi_msg_request { __le32 identifier; __le32 flags; }; struct scmi_pinctrl_ipriv { u32 selector; enum scmi_pinctrl_selector_type type; u32 *array; }; struct scmi_resp_pinctrl_attributes { __le32 attributes; u8 name[16]; }; struct scmi_msg_pinctrl_attributes { __le32 identifier; __le32 flags; }; struct scmi_msg_pinctrl_list_assoc { __le32 identifier; __le32 flags; __le32 index; }; struct scmi_resp_pinctrl_list_assoc { __le32 flags; __le16 array[0]; }; struct scmi_msg_settings_get { __le32 identifier; __le32 attributes; }; struct scmi_resp_settings_get { __le32 function_selected; __le32 num_configs; __le32 configs[0]; }; struct scmi_shared_mem { __le32 reserved; __le32 channel_status; __le32 reserved1[2]; __le32 flags; __le32 length; __le32 msg_header; u8 msg_payload[0]; }; struct scmi_mailbox { struct mbox_client cl; struct mbox_chan *chan; struct mbox_chan *chan_receiver; struct mbox_chan *chan_platform_receiver; struct scmi_chan_info *cinfo; struct scmi_shared_mem *shmem; }; struct scmi_smc { int irq; struct scmi_chan_info *cinfo; struct scmi_shared_mem *shmem; struct mutex shmem_lock; atomic_t inflight; u32 func_id; u32 param_page; u32 param_offset; }; struct scmi_msg_payld { __le32 msg_header; __le32 msg_payload[0]; }; enum poll_states { VIO_MSG_NOT_POLLED = 0, VIO_MSG_POLL_TIMEOUT = 1, VIO_MSG_POLLING = 2, VIO_MSG_POLL_DONE = 3, }; struct scmi_vio_channel { struct virtqueue *vqueue; struct scmi_chan_info *cinfo; spinlock_t free_lock; struct list_head free_list; spinlock_t pending_lock; struct list_head pending_cmds_list; struct work_struct deferred_tx_work; struct workqueue_struct *deferred_tx_wq; bool is_rx; unsigned int max_msg; spinlock_t lock; struct completion *shutdown_done; refcount_t users; }; struct scmi_vio_msg { struct scmi_msg_payld *request; struct scmi_msg_payld *input; struct list_head list; unsigned int rx_len; unsigned int poll_idx; enum poll_states poll_status; spinlock_t poll_lock; refcount_t users; }; typedef struct { u64 signature; u32 revision; u32 headersize; u32 crc32; u32 reserved; } efi_table_hdr_t; typedef struct { u16 year; u8 month; u8 day; u8 hour; u8 minute; u8 second; u8 pad1; u32 nanosecond; s16 timezone; u8 daylight; u8 pad2; } efi_time_t; typedef struct { u32 resolution; u32 accuracy; u8 sets_to_zero; } efi_time_cap_t; typedef efi_status_t efi_get_time_t(efi_time_t *, efi_time_cap_t *); typedef efi_status_t efi_set_time_t(efi_time_t *); typedef u8 efi_bool_t; typedef efi_status_t efi_get_wakeup_time_t(efi_bool_t *, efi_bool_t *, efi_time_t *); typedef efi_status_t efi_set_wakeup_time_t(efi_bool_t, efi_time_t *); typedef efi_status_t efi_set_virtual_address_map_t(unsigned long, unsigned long, u32, efi_memory_desc_t *); typedef u16 efi_char16_t; typedef efi_status_t efi_get_variable_t(efi_char16_t *, efi_guid_t *, u32 *, unsigned long *, void *); typedef efi_status_t efi_get_next_variable_t(unsigned long *, efi_char16_t *, efi_guid_t *); typedef efi_status_t efi_set_variable_t(efi_char16_t *, efi_guid_t *, u32, unsigned long, void *); typedef efi_status_t efi_get_next_high_mono_count_t(u32 *); typedef void efi_reset_system_t(int, efi_status_t, unsigned long, efi_char16_t *); typedef struct { efi_guid_t guid; u32 headersize; u32 flags; u32 imagesize; } efi_capsule_header_t; typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **, unsigned long, unsigned long); typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **, unsigned long, u64 *, int *); typedef efi_status_t efi_query_variable_info_t(u32, u64 *, u64 *, u64 *); typedef struct { efi_table_hdr_t hdr; u32 get_time; u32 set_time; u32 get_wakeup_time; u32 set_wakeup_time; u32 set_virtual_address_map; u32 convert_pointer; u32 get_variable; u32 get_next_variable; u32 set_variable; u32 get_next_high_mono_count; u32 reset_system; u32 update_capsule; u32 query_capsule_caps; u32 query_variable_info; } efi_runtime_services_32_t; typedef union { struct { efi_table_hdr_t hdr; efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; efi_set_wakeup_time_t *set_wakeup_time; efi_set_virtual_address_map_t *set_virtual_address_map; void *convert_pointer; efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; efi_get_next_high_mono_count_t *get_next_high_mono_count; efi_reset_system_t *reset_system; efi_update_capsule_t *update_capsule; efi_query_capsule_caps_t *query_capsule_caps; efi_query_variable_info_t *query_variable_info; }; efi_runtime_services_32_t mixed_mode; } efi_runtime_services_t; struct efi_memory_map { phys_addr_t phys_map; void *map; void *map_end; int nr_map; unsigned long desc_version; unsigned long desc_size; unsigned long flags; }; struct efi { const efi_runtime_services_t *runtime; unsigned int runtime_version; unsigned int runtime_supported_mask; unsigned long acpi; unsigned long acpi20; unsigned long smbios; unsigned long smbios3; unsigned long esrt; unsigned long tpm_log; unsigned long tpm_final_log; unsigned long mokvar_table; unsigned long coco_secret; unsigned long unaccepted; efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; efi_set_wakeup_time_t *set_wakeup_time; efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; efi_set_variable_t *set_variable_nonblocking; efi_query_variable_info_t *query_variable_info; efi_query_variable_info_t *query_variable_info_nonblocking; efi_update_capsule_t *update_capsule; efi_query_capsule_caps_t *query_capsule_caps; efi_get_next_high_mono_count_t *get_next_high_mono_count; efi_reset_system_t *reset_system; struct efi_memory_map memmap; unsigned long flags; }; struct linux_efi_memreserve { int size; atomic_t count; phys_addr_t next; struct { phys_addr_t base; phys_addr_t size; } entry[0]; }; typedef efi_status_t efi_query_variable_store_t(u32, unsigned long, bool); struct efivar_operations { efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; efi_set_variable_t *set_variable_nonblocking; efi_query_variable_store_t *query_variable_store; efi_query_variable_info_t *query_variable_info; }; struct efivars { struct kset *kset; const struct efivar_operations *ops; }; typedef struct { efi_guid_t guid; unsigned long *ptr; const char name[16]; } efi_config_table_type_t; typedef struct { efi_guid_t guid; u32 table; } efi_config_table_32_t; typedef union { struct { efi_guid_t guid; void *table; }; efi_config_table_32_t mixed_mode; } efi_config_table_t; typedef struct { efi_guid_t guid; u64 table; } efi_config_table_64_t; struct linux_efi_random_seed { u32 size; u8 bits[0]; }; typedef struct { u16 version; u16 length; u32 runtime_services_supported; } efi_rt_properties_table_t; struct linux_efi_initrd { unsigned long base; unsigned long size; }; typedef struct { u32 version; u32 num_entries; u32 desc_size; u32 flags; efi_memory_desc_t entry[0]; } efi_memory_attributes_table_t; typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool); enum tcpa_event_types { PREBOOT = 0, POST_CODE = 1, UNUSED = 2, NO_ACTION = 3, SEPARATOR = 4, ACTION = 5, EVENT_TAG = 6, SCRTM_CONTENTS = 7, SCRTM_VERSION = 8, CPU_MICROCODE = 9, PLATFORM_CONFIG_FLAGS = 10, TABLE_OF_DEVICES = 11, COMPACT_HASH = 12, IPL = 13, IPL_PARTITION_DATA = 14, NONHOST_CODE = 15, NONHOST_CONFIG = 16, NONHOST_INFO = 17, }; struct tpm_digest { u16 alg_id; u8 digest[64]; }; struct tcg_pcr_event2_head { u32 pcr_idx; u32 event_type; u32 count; struct tpm_digest digests[0]; }; struct tcg_efi_specid_event_algs { u16 alg_id; u16 digest_size; }; struct tcg_efi_specid_event_head { u8 signature[16]; u32 platform_class; u8 spec_version_minor; u8 spec_version_major; u8 spec_errata; u8 uintnsize; u32 num_algs; struct tcg_efi_specid_event_algs digest_sizes[0]; }; struct tcg_event_field { u32 event_size; u8 event[0]; }; struct tcg_pcr_event { u32 pcr_idx; u32 event_type; u8 digest[20]; u32 event_size; u8 event[0]; }; struct linux_efi_tpm_eventlog { u32 size; u32 final_events_preboot_size; u8 version; u8 log[0]; }; struct efi_tcg2_final_events_table { u64 version; u64 nr_events; u8 events[0]; }; struct efi_memory_map_data { phys_addr_t phys_map; unsigned long size; unsigned long desc_version; unsigned long desc_size; unsigned long flags; }; struct efi_system_resource_table { u32 fw_resource_count; u32 fw_resource_count_max; u64 fw_resource_version; u8 entries[0]; }; struct esre_entry; struct esre_attribute { struct attribute attr; ssize_t (*show)(struct esre_entry *, char *); ssize_t (*store)(struct esre_entry *, const char *, size_t); }; struct efi_system_resource_entry_v1; struct esre_entry { union { struct efi_system_resource_entry_v1 *esre1; } esre; struct kobject kobj; struct list_head list; }; struct efi_system_resource_entry_v1 { efi_guid_t fw_class; u32 fw_type; u32 fw_version; u32 lowest_supported_fw_version; u32 capsule_flags; u32 last_attempt_version; u32 last_attempt_status; }; enum efi_rts_ids { EFI_NONE = 0, EFI_GET_TIME = 1, EFI_SET_TIME = 2, EFI_GET_WAKEUP_TIME = 3, EFI_SET_WAKEUP_TIME = 4, EFI_GET_VARIABLE = 5, EFI_GET_NEXT_VARIABLE = 6, EFI_SET_VARIABLE = 7, EFI_QUERY_VARIABLE_INFO = 8, EFI_GET_NEXT_HIGH_MONO_COUNT = 9, EFI_RESET_SYSTEM = 10, EFI_UPDATE_CAPSULE = 11, EFI_QUERY_CAPSULE_CAPS = 12, EFI_ACPI_PRM_HANDLER = 13, }; union efi_rts_args; struct efi_runtime_work { union efi_rts_args *args; efi_status_t status; struct work_struct work; enum efi_rts_ids efi_rts_id; struct completion efi_rts_comp; const void *caller; }; union efi_rts_args { struct { efi_time_t *time; efi_time_cap_t *capabilities; } GET_TIME; struct { efi_time_t *time; } SET_TIME; struct { efi_bool_t *enabled; efi_bool_t *pending; efi_time_t *time; } GET_WAKEUP_TIME; struct { efi_bool_t enable; efi_time_t *time; } SET_WAKEUP_TIME; struct { efi_char16_t *name; efi_guid_t *vendor; u32 *attr; unsigned long *data_size; void *data; } GET_VARIABLE; struct { unsigned long *name_size; efi_char16_t *name; efi_guid_t *vendor; } GET_NEXT_VARIABLE; struct { efi_char16_t *name; efi_guid_t *vendor; u32 attr; unsigned long data_size; void *data; } SET_VARIABLE; struct { u32 attr; u64 *storage_space; u64 *remaining_space; u64 *max_variable_size; } QUERY_VARIABLE_INFO; struct { u32 *high_count; } GET_NEXT_HIGH_MONO_COUNT; struct { efi_capsule_header_t **capsules; unsigned long count; unsigned long sg_list; } UPDATE_CAPSULE; struct { efi_capsule_header_t **capsules; unsigned long count; u64 *max_size; int *reset_type; } QUERY_CAPSULE_CAPS; struct { efi_status_t (*acpi_prm_handler)(u64, void *); u64 param_buffer_addr; void *context; } ACPI_PRM_HANDLER; }; typedef struct { efi_table_hdr_t hdr; u32 fw_vendor; u32 fw_revision; u32 con_in_handle; u32 con_in; u32 con_out_handle; u32 con_out; u32 stderr_handle; u32 stderr; u32 runtime; u32 boottime; u32 nr_tables; u32 tables; } efi_system_table_32_t; union efi_simple_text_input_protocol; typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t; union efi_simple_text_output_protocol; typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t; union efi_boot_services; typedef union efi_boot_services efi_boot_services_t; typedef union { struct { efi_table_hdr_t hdr; unsigned long fw_vendor; u32 fw_revision; unsigned long con_in_handle; efi_simple_text_input_protocol_t *con_in; unsigned long con_out_handle; efi_simple_text_output_protocol_t *con_out; unsigned long stderr_handle; unsigned long stderr; efi_runtime_services_t *runtime; efi_boot_services_t *boottime; unsigned long nr_tables; unsigned long tables; }; efi_system_table_32_t mixed_mode; } efi_system_table_t; typedef struct { u16 scan_code; efi_char16_t unicode_char; } efi_input_key_t; typedef void *efi_event_t; union efi_simple_text_input_protocol { struct { void *reset; efi_status_t (*read_keystroke)(efi_simple_text_input_protocol_t *, efi_input_key_t *); efi_event_t wait_for_key; }; struct { u32 reset; u32 read_keystroke; u32 wait_for_key; } mixed_mode; }; union efi_simple_text_output_protocol { struct { void *reset; efi_status_t (*output_string)(efi_simple_text_output_protocol_t *, efi_char16_t *); void *test_string; }; struct { u32 reset; u32 output_string; u32 test_string; } mixed_mode; }; typedef u64 efi_physical_addr_t; typedef void (*efi_event_notify_t)(efi_event_t, void *); typedef enum { EfiTimerCancel = 0, EfiTimerPeriodic = 1, EfiTimerRelative = 2, } EFI_TIMER_DELAY; typedef void *efi_handle_t; struct efi_generic_dev_path; typedef struct efi_generic_dev_path efi_device_path_protocol_t; union efi_boot_services { struct { efi_table_hdr_t hdr; void *raise_tpl; void *restore_tpl; efi_status_t (*allocate_pages)(int, int, unsigned long, efi_physical_addr_t *); efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long); efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *, unsigned long *, u32 *); efi_status_t (*allocate_pool)(int, unsigned long, void **); efi_status_t (*free_pool)(void *); efi_status_t (*create_event)(u32, unsigned long, efi_event_notify_t, void *, efi_event_t *); efi_status_t (*set_timer)(efi_event_t, EFI_TIMER_DELAY, u64); efi_status_t (*wait_for_event)(unsigned long, efi_event_t *, unsigned long *); void *signal_event; efi_status_t (*close_event)(efi_event_t); void *check_event; void *install_protocol_interface; void *reinstall_protocol_interface; void *uninstall_protocol_interface; efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **); void *__reserved; void *register_protocol_notify; efi_status_t (*locate_handle)(int, efi_guid_t *, void *, unsigned long *, efi_handle_t *); efi_status_t (*locate_device_path)(efi_guid_t *, efi_device_path_protocol_t **, efi_handle_t *); efi_status_t (*install_configuration_table)(efi_guid_t *, void *); efi_status_t (*load_image)(bool, efi_handle_t, efi_device_path_protocol_t *, void *, unsigned long, efi_handle_t *); efi_status_t (*start_image)(efi_handle_t, unsigned long *, efi_char16_t **); efi_status_t (*exit)(efi_handle_t, efi_status_t, unsigned long, efi_char16_t *); efi_status_t (*unload_image)(efi_handle_t); efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long); void *get_next_monotonic_count; efi_status_t (*stall)(unsigned long); void *set_watchdog_timer; void *connect_controller; efi_status_t (*disconnect_controller)(efi_handle_t, efi_handle_t, efi_handle_t); void *open_protocol; void *close_protocol; void *open_protocol_information; void *protocols_per_handle; void *locate_handle_buffer; efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **); efi_status_t (*install_multiple_protocol_interfaces)(efi_handle_t *, ...); efi_status_t (*uninstall_multiple_protocol_interfaces)(efi_handle_t, ...); void *calculate_crc32; void (*copy_mem)(void *, const void *, unsigned long); void (*set_mem)(void *, unsigned long, unsigned char); void *create_event_ex; }; struct { efi_table_hdr_t hdr; u32 raise_tpl; u32 restore_tpl; u32 allocate_pages; u32 free_pages; u32 get_memory_map; u32 allocate_pool; u32 free_pool; u32 create_event; u32 set_timer; u32 wait_for_event; u32 signal_event; u32 close_event; u32 check_event; u32 install_protocol_interface; u32 reinstall_protocol_interface; u32 uninstall_protocol_interface; u32 handle_protocol; u32 __reserved; u32 register_protocol_notify; u32 locate_handle; u32 locate_device_path; u32 install_configuration_table; u32 load_image; u32 start_image; u32 exit; u32 unload_image; u32 exit_boot_services; u32 get_next_monotonic_count; u32 stall; u32 set_watchdog_timer; u32 connect_controller; u32 disconnect_controller; u32 open_protocol; u32 close_protocol; u32 open_protocol_information; u32 protocols_per_handle; u32 locate_handle_buffer; u32 locate_protocol; u32 install_multiple_protocol_interfaces; u32 uninstall_multiple_protocol_interfaces; u32 calculate_crc32; u32 copy_mem; u32 set_mem; u32 create_event_ex; } mixed_mode; }; struct psci_operations { u32 (*get_version)(); int (*cpu_suspend)(u32, unsigned long); int (*cpu_off)(u32); int (*cpu_on)(unsigned long, unsigned long); int (*migrate)(unsigned long); int (*affinity_info)(unsigned long, unsigned long); int (*migrate_info_type)(); }; typedef unsigned long psci_fn(unsigned long, unsigned long, unsigned long, unsigned long); typedef int (*psci_initcall_t)(const struct device_node *); struct tegra_ivc { struct device *peer; struct { struct iosys_map map; unsigned int position; dma_addr_t phys; } rx; struct { struct iosys_map map; unsigned int position; dma_addr_t phys; } tx; void (*notify)(struct tegra_ivc *, void *); void *notify_data; unsigned int num_frames; size_t frame_size; }; typedef void (*tegra_bpmp_mrq_handler_t)(unsigned int, struct tegra_bpmp_channel *, void *); struct tegra_bpmp_mrq { struct list_head list; unsigned int mrq; tegra_bpmp_mrq_handler_t handler; void *data; }; struct tegra_bpmp_mb_data { u32 code; u32 flags; u8 data[120]; }; struct mrq_query_abi_request { uint32_t mrq; }; struct mrq_query_abi_response { int32_t status; }; struct mrq_query_tag_request { uint32_t addr; }; struct mrq_ping_request { uint32_t challenge; }; struct mrq_ping_response { uint32_t reply; }; struct mrq_query_fw_tag_response { uint8_t tag[32]; }; struct tegra186_bpmp { struct tegra_bpmp *parent; struct { struct gen_pool *pool; union { void *sram; void *dram; }; dma_addr_t phys; } tx; struct { struct gen_pool *pool; union { void *sram; void *dram; }; dma_addr_t phys; } rx; struct { struct mbox_client client; struct mbox_chan *channel; } mbox; }; enum mrq_debug_commands { CMD_DEBUG_OPEN_RO = 0, CMD_DEBUG_OPEN_WO = 1, CMD_DEBUG_READ = 2, CMD_DEBUG_WRITE = 3, CMD_DEBUG_CLOSE = 4, CMD_DEBUG_MAX = 5, }; enum mrq_debugfs_commands { CMD_DEBUGFS_READ = 1, CMD_DEBUGFS_WRITE = 2, CMD_DEBUGFS_DUMPDIR = 3, CMD_DEBUGFS_MAX = 4, }; struct seqbuf { char *buf; size_t pos; size_t size; }; struct cmd_debugfs_fileop_response { uint32_t reserved; uint32_t nbytes; }; struct cmd_debugfs_dumpdir_response { uint32_t reserved; uint32_t nbytes; }; struct mrq_debugfs_response { int32_t reserved; union { struct cmd_debugfs_fileop_response fop; struct cmd_debugfs_dumpdir_response dumpdir; }; }; struct cmd_debugfs_fileop_request { uint32_t fnameaddr; uint32_t fnamelen; uint32_t dataaddr; uint32_t datalen; }; struct cmd_debugfs_dumpdir_request { uint32_t dataaddr; uint32_t datalen; }; struct mrq_debugfs_request { uint32_t cmd; union { struct cmd_debugfs_fileop_request fop; struct cmd_debugfs_dumpdir_request dumpdir; }; }; struct cmd_debug_fopen_request { char name[116]; }; struct cmd_debug_fread_request { uint32_t fd; }; struct cmd_debug_fwrite_request { uint32_t fd; uint32_t datalen; char data[108]; }; struct cmd_debug_fclose_request { uint32_t fd; }; struct mrq_debug_request { uint32_t cmd; union { struct cmd_debug_fopen_request fop; struct cmd_debug_fread_request frd; struct cmd_debug_fwrite_request fwr; struct cmd_debug_fclose_request fcl; }; }; struct cmd_debug_fopen_response { uint32_t fd; uint32_t datalen; }; struct cmd_debug_fread_response { uint32_t readlen; char data[116]; }; struct mrq_debug_response { union { struct cmd_debug_fopen_response fop; struct cmd_debug_fread_response frd; }; }; enum tegra_ivc_state { TEGRA_IVC_STATE_ESTABLISHED = 0, TEGRA_IVC_STATE_SYNC = 1, TEGRA_IVC_STATE_ACK = 2, }; struct tegra_ivc_header { union { struct { u32 count; u32 state; }; u8 pad[64]; } tx; union { u32 count; u8 pad[64]; } rx; }; struct of_timer_base { void *base; const char *name; int index; }; struct of_timer_irq { int irq; int index; int percpu; const char *name; unsigned long flags; irq_handler_t handler; }; struct of_timer_clk { struct clk *clk; const char *name; int index; unsigned long rate; unsigned long period; }; struct timer_of { unsigned int flags; struct device_node *np; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct clock_event_device clkevt; struct of_timer_base of_base; struct of_timer_irq of_irq; struct of_timer_clk of_clk; void *private_data; long: 64; long: 64; }; typedef int (*of_init_fn_1_ret)(struct device_node *); struct clocksource_mmio { void *reg; struct clocksource clksrc; }; enum arch_timer_erratum_match_type { ate_match_dt = 0, ate_match_local_cap_id = 1, ate_match_acpi_oem_info = 2, }; struct arch_timer_erratum_workaround { enum arch_timer_erratum_match_type match_type; const void *id; const char *desc; u64 (*read_cntpct_el0)(); u64 (*read_cntvct_el0)(); int (*set_next_event_phys)(unsigned long, struct clock_event_device *); int (*set_next_event_virt)(unsigned long, struct clock_event_device *); bool disable_compat_vdso; }; struct ate_acpi_oem_info { char oem_id[7]; char oem_table_id[9]; u32 oem_revision; }; struct arch_timer { void *base; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct clock_event_device evt; }; enum arch_timer_ppi_nr { ARCH_TIMER_PHYS_SECURE_PPI = 0, ARCH_TIMER_PHYS_NONSECURE_PPI = 1, ARCH_TIMER_VIRT_PPI = 2, ARCH_TIMER_HYP_PPI = 3, ARCH_TIMER_HYP_VIRT_PPI = 4, ARCH_TIMER_MAX_TIMER_PPI = 5, }; enum arch_timer_reg { ARCH_TIMER_REG_CTRL = 0, ARCH_TIMER_REG_CVAL = 1, }; enum arch_timer_spi_nr { ARCH_TIMER_PHYS_SPI = 0, ARCH_TIMER_VIRT_SPI = 1, ARCH_TIMER_MAX_TIMER_SPI = 2, }; typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, const void *); struct arch_timer_mem_frame { bool valid; phys_addr_t cntbase; size_t size; int phys_irq; int virt_irq; }; struct arch_timer_mem { phys_addr_t cntctlbase; size_t size; struct arch_timer_mem_frame frame[8]; }; struct sp804_clkevt { void *base; void *load; void *load_h; void *value; void *value_h; void *ctrl; void *intclr; void *ris; void *mis; void *bgload; void *bgload_h; unsigned long reload; int width; }; struct sp804_timer { int load; int load_h; int value; int value_h; int ctrl; int intclr; int ris; int mis; int bgload; int bgload_h; int timer_base[2]; int width; }; struct hid_global { unsigned int usage_page; __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; __s32 physical_maximum; __s32 unit_exponent; unsigned int unit; unsigned int report_id; unsigned int report_size; unsigned int report_count; }; struct hid_local { unsigned int usage[12288]; u8 usage_size[12288]; unsigned int collection_index[12288]; unsigned int usage_index; unsigned int usage_minimum; unsigned int delimiter_depth; unsigned int delimiter_branch; }; struct hid_parser { struct hid_global global; struct hid_global global_stack[4]; unsigned int global_stack_ptr; struct hid_local local; unsigned int *collection_stack; unsigned int collection_stack_ptr; unsigned int collection_stack_size; struct hid_device *device; unsigned int scan_flags; u64 android_kabi_reserved1; }; struct hid_item { unsigned int format; __u8 size; __u8 type; __u8 tag; union { __u8 u8; __s8 s8; __u16 u16; __s16 s16; __u32 u32; __s32 s32; __u8 *longdata; } data; }; enum hid_class_request { HID_REQ_GET_REPORT = 1, HID_REQ_GET_IDLE = 2, HID_REQ_GET_PROTOCOL = 3, HID_REQ_SET_REPORT = 9, HID_REQ_SET_IDLE = 10, HID_REQ_SET_PROTOCOL = 11, }; struct hiddev { int minor; int exist; int open; struct mutex existancelock; wait_queue_head_t wait; struct hid_device *hid; struct list_head list; spinlock_t list_lock; bool initialized; }; struct hidraw { unsigned int minor; int exist; int open; wait_queue_head_t wait; struct hid_device *hid; struct device *dev; spinlock_t list_lock; struct list_head list; }; struct hid_dynid { struct list_head list; struct hid_device_id id; }; struct usage_priority { __u32 usage; bool global; unsigned int slot_overwrite; }; typedef bool (*hid_usage_cmp_t)(struct hid_usage *, unsigned int, unsigned int); struct quirks_list_struct { struct hid_device_id hid_bl_item; struct list_head node; }; struct hid_usage_entry { unsigned int page; unsigned int usage; const char *description; }; struct hid_debug_list { struct { union { struct __kfifo kfifo; char *type; const char *const_type; char (*rectype)[0]; char *ptr; const char *ptr_const; }; char buf[0]; } hid_debug_fifo; struct fasync_struct *fasync; struct hid_device *hdev; struct list_head node; struct mutex read_mutex; }; struct hidraw_report { __u8 *value; int len; }; struct hidraw_list { struct hidraw_report buffer[64]; int head; int tail; struct fasync_struct *fasync; struct hidraw *hidraw; struct list_head node; struct mutex read_mutex; }; struct hidraw_devinfo { __u32 bustype; __s16 vendor; __s16 product; }; enum uhid_legacy_event_type { UHID_CREATE = 0, UHID_OUTPUT_EV = 7, UHID_INPUT = 8, UHID_FEATURE = 9, UHID_FEATURE_ANSWER = 10, }; enum uhid_event_type { __UHID_LEGACY_CREATE = 0, UHID_DESTROY = 1, UHID_START = 2, UHID_STOP = 3, UHID_OPEN = 4, UHID_CLOSE = 5, UHID_OUTPUT = 6, __UHID_LEGACY_OUTPUT_EV = 7, __UHID_LEGACY_INPUT = 8, UHID_GET_REPORT = 9, UHID_GET_REPORT_REPLY = 10, UHID_CREATE2 = 11, UHID_INPUT2 = 12, UHID_SET_REPORT = 13, UHID_SET_REPORT_REPLY = 14, }; enum uhid_dev_flag { UHID_DEV_NUMBERED_FEATURE_REPORTS = 1, UHID_DEV_NUMBERED_OUTPUT_REPORTS = 2, UHID_DEV_NUMBERED_INPUT_REPORTS = 4, }; enum uhid_report_type { UHID_FEATURE_REPORT = 0, UHID_OUTPUT_REPORT = 1, UHID_INPUT_REPORT = 2, }; struct uhid_create_req { __u8 name[128]; __u8 phys[64]; __u8 uniq[64]; __u8 __attribute__((btf_type_tag("user"))) *rd_data; __u16 rd_size; __u16 bus; __u32 vendor; __u32 product; __u32 version; __u32 country; } __attribute__((packed)); struct uhid_input_req { __u8 data[4096]; __u16 size; }; struct uhid_output_req { __u8 data[4096]; __u16 size; __u8 rtype; } __attribute__((packed)); struct uhid_output_ev_req { __u16 type; __u16 code; __s32 value; }; struct uhid_feature_req { __u32 id; __u8 rnum; __u8 rtype; } __attribute__((packed)); struct uhid_get_report_req { __u32 id; __u8 rnum; __u8 rtype; } __attribute__((packed)); struct uhid_feature_answer_req { __u32 id; __u16 err; __u16 size; __u8 data[4096]; }; struct uhid_get_report_reply_req { __u32 id; __u16 err; __u16 size; __u8 data[4096]; }; struct uhid_create2_req { __u8 name[128]; __u8 phys[64]; __u8 uniq[64]; __u16 rd_size; __u16 bus; __u32 vendor; __u32 product; __u32 version; __u32 country; __u8 rd_data[4096]; }; struct uhid_input2_req { __u16 size; __u8 data[4096]; }; struct uhid_set_report_req { __u32 id; __u8 rnum; __u8 rtype; __u16 size; __u8 data[4096]; }; struct uhid_set_report_reply_req { __u32 id; __u16 err; } __attribute__((packed)); struct uhid_start_req { __u64 dev_flags; }; struct uhid_event { __u32 type; union { struct uhid_create_req create; struct uhid_input_req input; struct uhid_output_req output; struct uhid_output_ev_req output_ev; struct uhid_feature_req feature; struct uhid_get_report_req get_report; struct uhid_feature_answer_req feature_answer; struct uhid_get_report_reply_req get_report_reply; struct uhid_create2_req create2; struct uhid_input2_req input2; struct uhid_set_report_req set_report; struct uhid_set_report_reply_req set_report_reply; struct uhid_start_req start; } u; } __attribute__((packed)); struct uhid_device { struct mutex devlock; bool running; __u8 *rd_data; uint rd_size; struct hid_device *hid; struct uhid_event input_buf; wait_queue_head_t waitq; spinlock_t qlock; __u8 head; __u8 tail; struct uhid_event *outq[32]; struct mutex report_lock; wait_queue_head_t report_wait; bool report_running; u32 report_id; u32 report_type; struct uhid_event report_buf; struct work_struct worker; }; struct uhid_create_req_compat { __u8 name[128]; __u8 phys[64]; __u8 uniq[64]; compat_uptr_t rd_data; __u16 rd_size; __u16 bus; __u32 vendor; __u32 product; __u32 version; __u32 country; }; struct apple_key_translation { u16 from; u16 to; u8 flags; }; struct apple_non_apple_keyboard { char *name; }; struct apple_sc_backlight; struct apple_sc { struct hid_device *hdev; unsigned long quirks; unsigned int fn_on; unsigned int fn_found; unsigned long pressed_numlock[12]; struct timer_list battery_timer; struct apple_sc_backlight *backlight; }; struct apple_sc_backlight { struct led_classdev cdev; struct hid_device *hdev; unsigned short backlight_off; unsigned short backlight_on_min; unsigned short backlight_on_max; }; struct apple_backlight_config_report { u8 report_id; u8 version; u16 backlight_off; u16 backlight_on_min; u16 backlight_on_max; }; struct apple_backlight_set_report { u8 report_id; u8 version; u16 backlight; u16 rate; }; struct lg_drv_data { unsigned long quirks; void *device_props; }; enum lg_g15_model { LG_G15 = 0, LG_G15_V2 = 1, LG_G510 = 2, LG_G510_USB_AUDIO = 3, LG_Z10 = 4, }; enum lg_g15_led_type { LG_G15_KBD_BRIGHTNESS = 0, LG_G15_LCD_BRIGHTNESS = 1, LG_G15_BRIGHTNESS_MAX = 2, LG_G15_MACRO_PRESET1 = 2, LG_G15_MACRO_PRESET2 = 3, LG_G15_MACRO_PRESET3 = 4, LG_G15_MACRO_RECORD = 5, LG_G15_LED_MAX = 6, }; struct lg_g15_led { struct led_classdev cdev; enum led_brightness brightness; enum lg_g15_led_type led; u8 red; u8 green; u8 blue; }; struct lg_g15_data { u8 transfer_buf[20]; struct mutex mutex; struct work_struct work; struct input_dev *input; struct hid_device *hdev; enum lg_g15_model model; struct lg_g15_led leds[6]; bool game_mode_enabled; }; enum recvr_type { recvr_type_dj = 0, recvr_type_hidpp = 1, recvr_type_gaming_hidpp = 2, recvr_type_mouse_only = 3, recvr_type_27mhz = 4, recvr_type_bluetooth = 5, recvr_type_dinovo = 6, }; struct dj_device; struct dj_receiver_dev { struct hid_device *mouse; struct hid_device *keyboard; struct hid_device *hidpp; struct dj_device *paired_dj_devices[8]; struct list_head list; struct kref kref; struct work_struct work; struct kfifo notif_fifo; unsigned long last_query; bool ready; enum recvr_type type; unsigned int unnumbered_application; spinlock_t lock; }; struct dj_device { struct hid_device *hdev; struct dj_receiver_dev *dj_receiver_dev; u64 reports_supported; u8 device_index; }; struct dj_report { u8 report_id; u8 device_index; u8 report_type; u8 report_params[12]; }; struct hidpp_event { u8 report_id; u8 device_index; u8 sub_id; u8 params[17]; }; struct dj_workitem { u8 type; u8 device_index; u8 device_type; u8 quad_id_msb; u8 quad_id_lsb; u64 reports_supported; }; enum { POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL = 1, POWER_SUPPLY_CAPACITY_LEVEL_LOW = 2, POWER_SUPPLY_CAPACITY_LEVEL_NORMAL = 3, POWER_SUPPLY_CAPACITY_LEVEL_HIGH = 4, POWER_SUPPLY_CAPACITY_LEVEL_FULL = 5, }; enum { POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, POWER_SUPPLY_CHARGE_TYPE_NONE = 1, POWER_SUPPLY_CHARGE_TYPE_TRICKLE = 2, POWER_SUPPLY_CHARGE_TYPE_FAST = 3, POWER_SUPPLY_CHARGE_TYPE_STANDARD = 4, POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE = 5, POWER_SUPPLY_CHARGE_TYPE_CUSTOM = 6, POWER_SUPPLY_CHARGE_TYPE_LONGLIFE = 7, POWER_SUPPLY_CHARGE_TYPE_BYPASS = 8, POWER_SUPPLY_CHARGE_TYPE_TAPER_EXT = 50, }; struct hidpp_battery { u8 feature_index; u8 solar_feature_index; u8 voltage_feature_index; u8 adc_measurement_feature_index; struct power_supply_desc desc; struct power_supply *ps; char name[64]; int status; int capacity; int level; int voltage; int charge_type; bool online; u8 supported_levels_1004; }; struct hidpp_scroll_counter { int wheel_multiplier; int remainder; int direction; unsigned long long last_time; }; struct hidpp_device { struct hid_device *hid_dev; struct input_dev *input; struct mutex send_mutex; void *send_receive_buf; char *name; wait_queue_head_t wait; int very_long_report_length; bool answer_available; u8 protocol_major; u8 protocol_minor; void *private_data; struct work_struct work; struct kfifo delayed_work_fifo; atomic_t connected; struct input_dev *delayed_input; unsigned long quirks; unsigned long capabilities; u8 supported_reports; struct hidpp_battery battery; struct hidpp_scroll_counter vertical_wheel_counter; u8 wireless_feature_index; }; struct hidpp_ff_private_data; struct hidpp_ff_work_data { struct work_struct work; struct hidpp_ff_private_data *data; int effect_id; u8 command; u8 params[20]; u8 size; }; struct hidpp_ff_private_data { struct hidpp_device *hidpp; u8 feature_index; u8 version; u16 gain; s16 range; u8 slot_autocenter; u8 num_effects; int *effect_ids; struct workqueue_struct *wq; atomic_t workqueue_size; }; struct fap { u8 feature_index; u8 funcindex_clientid; u8 params[60]; }; struct rap { u8 sub_id; u8 reg_address; u8 params[60]; }; struct hidpp_report { u8 report_id; u8 device_index; union { struct fap fap; struct rap rap; u8 rawbytes[62]; }; }; struct k400_private_data { u8 feature_index; }; struct wtp_data { u16 x_size; u16 y_size; u8 finger_count; u8 mt_feature_index; u8 button_feature_index; u8 maxcontacts; bool flip_y; unsigned int resolution; }; struct hidpp_touchpad_raw_xy_finger { u8 contact_type; u8 contact_status; u16 x; u16 y; u8 z; u8 area; u8 finger_id; }; struct hidpp_touchpad_raw_xy { u16 timestamp; struct hidpp_touchpad_raw_xy_finger fingers[2]; u8 spurious_flag; u8 end_of_frame; u8 finger_count; u8 button; }; struct hidpp_touchpad_fw_items { uint8_t presence; uint8_t desired_state; uint8_t state; uint8_t persistent; }; struct hidpp_touchpad_raw_info { u16 x_size; u16 y_size; u8 z_range; u8 area_range; u8 timestamp_unit; u8 maxcontacts; u8 origin; u16 res; }; struct magicmouse_sc { struct input_dev *input; unsigned long quirks; int ntouches; int scroll_accel; unsigned long scroll_jiffies; struct { short x; short y; short scroll_x; short scroll_y; short scroll_x_hr; short scroll_y_hr; u8 size; bool scroll_x_active; bool scroll_y_active; } touches[16]; int tracking_ids[16]; struct hid_device *hdev; struct delayed_work work; struct timer_list battery_timer; }; enum { MAGNITUDE_STRONG = 2, MAGNITUDE_WEAK = 3, MAGNITUDE_NUM = 4, }; struct ms_data { unsigned long quirks; struct hid_device *hdev; struct work_struct ff_worker; __u8 strong; __u8 weak; void *output_report_dmabuf; }; struct xb1s_ff_report { __u8 report_id; __u8 enable; __u8 magnitude[4]; __u8 duration_10ms; __u8 start_delay_10ms; __u8 loop_count; }; struct mt_class { __s32 name; __s32 quirks; __s32 sn_move; __s32 sn_width; __s32 sn_height; __s32 sn_pressure; __u8 maxcontacts; bool is_indirect; bool export_all_inputs; }; enum latency_mode { HID_LATENCY_NORMAL = 0, HID_LATENCY_HIGH = 1, }; struct mt_device { struct mt_class mtclass; struct timer_list release_timer; struct hid_device *hdev; unsigned long mt_io_flags; __u8 inputmode_value; __u8 maxcontacts; bool is_buttonpad; bool serial_maybe; struct list_head applications; struct list_head reports; }; struct mt_application { struct list_head list; unsigned int application; unsigned int report_id; struct list_head mt_usages; __s32 quirks; __s32 *scantime; __s32 scantime_logical_max; __s32 *raw_cc; int left_button_state; unsigned int mt_flags; unsigned long *pending_palm_slots; __u8 num_received; __u8 num_expected; __u8 buttons_count; __u8 touches_by_report; __s32 dev_time; unsigned long jiffies; int timestamp; int prev_scantime; bool have_contact_count; }; struct mt_report_data { struct list_head list; struct hid_report *report; struct mt_application *application; bool is_mt_collection; }; struct mt_usages { struct list_head list; __s32 *x; __s32 *y; __s32 *cx; __s32 *cy; __s32 *p; __s32 *w; __s32 *h; __s32 *a; __s32 *contactid; bool *tip_state; bool *inrange_state; bool *confidence_state; }; enum joycon_ctlr_state { JOYCON_CTLR_STATE_INIT = 0, JOYCON_CTLR_STATE_READ = 1, JOYCON_CTLR_STATE_REMOVED = 2, }; enum joycon_ctlr_type { JOYCON_CTLR_TYPE_JCL = 1, JOYCON_CTLR_TYPE_JCR = 2, JOYCON_CTLR_TYPE_PRO = 3, }; enum joycon_msg_type { JOYCON_MSG_TYPE_NONE = 0, JOYCON_MSG_TYPE_USB = 1, JOYCON_MSG_TYPE_SUBCMD = 2, }; struct joycon_stick_cal { s32 max; s32 min; s32 center; }; struct joycon_imu_cal { s16 offset[3]; s16 scale[3]; }; struct joycon_ctlr { struct hid_device *hdev; struct input_dev *input; struct led_classdev leds[4]; struct led_classdev home_led; enum joycon_ctlr_state ctlr_state; spinlock_t lock; u8 mac_addr[6]; char *mac_addr_str; enum joycon_ctlr_type ctlr_type; enum joycon_msg_type msg_type; u8 subcmd_num; struct mutex output_mutex; u8 input_buf[84]; wait_queue_head_t wait; bool received_resp; u8 usb_ack_match; u8 subcmd_ack_match; bool received_input_report; unsigned int last_input_report_msecs; unsigned int last_subcmd_sent_msecs; unsigned int consecutive_valid_report_deltas; struct joycon_stick_cal left_stick_cal_x; struct joycon_stick_cal left_stick_cal_y; struct joycon_stick_cal right_stick_cal_x; struct joycon_stick_cal right_stick_cal_y; struct joycon_imu_cal accel_cal; struct joycon_imu_cal gyro_cal; s32 imu_cal_accel_divisor[3]; s32 imu_cal_gyro_divisor[3]; struct power_supply *battery; struct power_supply_desc battery_desc; u8 battery_capacity; bool battery_charging; bool host_powered; u8 rumble_data[64]; int rumble_queue_head; int rumble_queue_tail; struct workqueue_struct *rumble_queue; struct work_struct rumble_worker; unsigned int rumble_msecs; u16 rumble_ll_freq; u16 rumble_lh_freq; u16 rumble_rl_freq; u16 rumble_rh_freq; unsigned short rumble_zero_countdown; struct input_dev *imu_input; bool imu_first_packet_received; unsigned int imu_timestamp_us; unsigned int imu_last_pkt_ms; unsigned int imu_delta_samples_count; unsigned int imu_delta_samples_sum; unsigned int imu_avg_delta_ms; }; struct joycon_subcmd_request { u8 output_id; u8 packet_num; u8 rumble_data[8]; u8 subcmd_id; u8 data[0]; }; struct joycon_subcmd_reply { u8 ack; u8 id; u8 data[0]; }; struct joycon_input_report { u8 id; u8 timer; u8 bat_con; u8 button_status[3]; u8 left_stick[3]; u8 right_stick[3]; u8 vibrator_report; union { struct joycon_subcmd_reply subcmd_reply; u8 imu_raw_bytes[36]; }; }; struct joycon_imu_data { s16 accel_x; s16 accel_y; s16 accel_z; s16 gyro_x; s16 gyro_y; s16 gyro_z; }; struct joycon_rumble_output { u8 output_id; u8 packet_num; u8 rumble_data[8]; }; struct pcmidi_snd; struct pcmidi_sustain { unsigned long in_use; struct pcmidi_snd *pm; struct timer_list timer; unsigned char status; unsigned char note; unsigned char velocity; }; struct pk_device; struct pcmidi_snd { struct pk_device *pk; unsigned short ifnum; struct hid_report *pcmidi_report6; struct input_dev *input_ep82; unsigned short midi_mode; unsigned short midi_sustain_mode; unsigned short midi_sustain; unsigned short midi_channel; short midi_octave; struct pcmidi_sustain sustained_notes[32]; unsigned short fn_state; unsigned short last_key[24]; spinlock_t rawmidi_in_lock; struct snd_card *card; struct snd_rawmidi *rwmidi; struct snd_rawmidi_substream *in_substream; struct snd_rawmidi_substream *out_substream; unsigned long in_triggered; unsigned long out_active; }; struct pk_device { unsigned long quirks; struct hid_device *hdev; struct pcmidi_snd *pm; }; struct picolcd_pending; struct picolcd_data { struct hid_device *hdev; struct dentry *debug_reset; struct dentry *debug_eeprom; struct dentry *debug_flash; struct mutex mutex_flash; int addr_sz; u8 version[2]; unsigned short opmode_delay; u8 pressed_keys[2]; struct input_dev *input_keys; unsigned short keycode[17]; spinlock_t lock; struct mutex mutex; struct picolcd_pending *pending; int status; }; struct picolcd_pending { struct hid_report *out_report; struct hid_report *in_report; struct completion ready; int raw_size; u8 raw_data[64]; }; struct plt_drv_data { unsigned long device_type; unsigned long last_volume_key_ts; u32 quirks; }; struct ps_led_info { const char *name; const char *color; int max_brightness; enum led_brightness (*brightness_get)(struct led_classdev *); int (*brightness_set)(struct led_classdev *, enum led_brightness); int (*blink_set)(struct led_classdev *, unsigned long *, unsigned long *); }; enum dualshock4_dongle_state { DONGLE_DISCONNECTED = 0, DONGLE_CALIBRATING = 1, DONGLE_CONNECTED = 2, DONGLE_DISABLED = 3, }; struct ps_device { struct list_head list; struct hid_device *hdev; spinlock_t lock; uint32_t player_id; struct power_supply_desc battery_desc; struct power_supply *battery; uint8_t battery_capacity; int battery_status; const char *input_dev_name; uint8_t mac_address[6]; uint32_t hw_version; uint32_t fw_version; int (*parse_report)(struct ps_device *, struct hid_report *, u8 *, int); void (*remove)(struct ps_device *); }; struct ps_calibration_data { int abs_code; short bias; int sens_numer; int sens_denom; }; struct dualshock4 { struct ps_device base; struct input_dev *gamepad; struct input_dev *sensors; struct input_dev *touchpad; struct ps_calibration_data accel_calib_data[3]; struct ps_calibration_data gyro_calib_data[3]; enum dualshock4_dongle_state dongle_state; struct work_struct dongle_hotplug_worker; bool sensor_timestamp_initialized; uint32_t prev_sensor_timestamp; uint32_t sensor_timestamp_us; bool update_bt_poll_interval; uint8_t bt_poll_interval; bool update_rumble; uint8_t motor_left; uint8_t motor_right; bool update_lightbar; bool update_lightbar_blink; bool lightbar_enabled; uint8_t lightbar_red; uint8_t lightbar_green; uint8_t lightbar_blue; uint8_t lightbar_blink_on; uint8_t lightbar_blink_off; struct led_classdev lightbar_leds[4]; struct work_struct output_worker; bool output_worker_initialized; void *output_report_dmabuf; }; struct dualshock4_input_report_common { uint8_t x; uint8_t y; uint8_t rx; uint8_t ry; uint8_t buttons[3]; uint8_t z; uint8_t rz; __le16 sensor_timestamp; uint8_t sensor_temperature; __le16 gyro[3]; __le16 accel[3]; uint8_t reserved2[5]; uint8_t status[2]; uint8_t reserved3; } __attribute__((packed)); struct dualshock4_touch_point { uint8_t contact; uint8_t x_lo; uint8_t x_hi: 4; uint8_t y_lo: 4; uint8_t y_hi; }; struct dualshock4_touch_report { uint8_t timestamp; struct dualshock4_touch_point points[2]; }; struct dualshock4_input_report_usb { uint8_t report_id; struct dualshock4_input_report_common common; uint8_t num_touch_reports; struct dualshock4_touch_report touch_reports[3]; uint8_t reserved[3]; }; struct dualshock4_input_report_bt { uint8_t report_id; uint8_t reserved[2]; struct dualshock4_input_report_common common; uint8_t num_touch_reports; struct dualshock4_touch_report touch_reports[4]; uint8_t reserved2[2]; __le32 crc32; } __attribute__((packed)); struct dualsense { struct ps_device base; struct input_dev *gamepad; struct input_dev *sensors; struct input_dev *touchpad; uint16_t update_version; struct ps_calibration_data accel_calib_data[3]; struct ps_calibration_data gyro_calib_data[3]; bool sensor_timestamp_initialized; uint32_t prev_sensor_timestamp; uint32_t sensor_timestamp_us; bool use_vibration_v2; bool update_rumble; uint8_t motor_left; uint8_t motor_right; struct led_classdev_mc lightbar; bool update_lightbar; uint8_t lightbar_red; uint8_t lightbar_green; uint8_t lightbar_blue; bool update_mic_mute; bool mic_muted; bool last_btn_mic_state; bool update_player_leds; uint8_t player_leds_state; struct led_classdev player_leds[5]; struct work_struct output_worker; bool output_worker_initialized; void *output_report_dmabuf; uint8_t output_seq; }; struct dualsense_touch_point { uint8_t contact; uint8_t x_lo; uint8_t x_hi: 4; uint8_t y_lo: 4; uint8_t y_hi; }; struct dualsense_input_report { uint8_t x; uint8_t y; uint8_t rx; uint8_t ry; uint8_t z; uint8_t rz; uint8_t seq_number; uint8_t buttons[4]; uint8_t reserved[4]; __le16 gyro[3]; __le16 accel[3]; __le32 sensor_timestamp; uint8_t reserved2; struct dualsense_touch_point points[2]; uint8_t reserved3[12]; uint8_t status; uint8_t reserved4[10]; } __attribute__((packed)); struct dualshock4_output_report_bt; struct dualshock4_output_report_usb; struct dualshock4_output_report_common; struct dualshock4_output_report { uint8_t *data; uint8_t len; struct dualshock4_output_report_bt *bt; struct dualshock4_output_report_usb *usb; struct dualshock4_output_report_common *common; }; struct dualshock4_output_report_common { uint8_t valid_flag0; uint8_t valid_flag1; uint8_t reserved; uint8_t motor_right; uint8_t motor_left; uint8_t lightbar_red; uint8_t lightbar_green; uint8_t lightbar_blue; uint8_t lightbar_blink_on; uint8_t lightbar_blink_off; }; struct dualshock4_output_report_bt { uint8_t report_id; uint8_t hw_control; uint8_t audio_control; struct dualshock4_output_report_common common; uint8_t reserved[61]; __le32 crc32; } __attribute__((packed)); struct dualshock4_output_report_usb { uint8_t report_id; struct dualshock4_output_report_common common; uint8_t reserved[21]; }; struct dualsense_output_report_bt; struct dualsense_output_report_usb; struct dualsense_output_report_common; struct dualsense_output_report { uint8_t *data; uint8_t len; struct dualsense_output_report_bt *bt; struct dualsense_output_report_usb *usb; struct dualsense_output_report_common *common; }; struct dualsense_output_report_common { uint8_t valid_flag0; uint8_t valid_flag1; uint8_t motor_right; uint8_t motor_left; uint8_t reserved[4]; uint8_t mute_button_led; uint8_t power_save_control; uint8_t reserved2[28]; uint8_t valid_flag2; uint8_t reserved3[2]; uint8_t lightbar_setup; uint8_t led_brightness; uint8_t player_leds; uint8_t lightbar_red; uint8_t lightbar_green; uint8_t lightbar_blue; }; struct dualsense_output_report_bt { uint8_t report_id; uint8_t seq_tag; uint8_t tag; struct dualsense_output_report_common common; uint8_t reserved[24]; __le32 crc32; } __attribute__((packed)); struct dualsense_output_report_usb { uint8_t report_id; struct dualsense_output_report_common common; uint8_t reserved[15]; }; struct roccat_report { uint8_t *value; }; struct roccat_device { unsigned int minor; int report_size; int open; int exist; wait_queue_head_t wait; struct device *dev; struct hid_device *hid; struct list_head readers; struct mutex readers_lock; struct roccat_report cbuf[16]; int cbuf_end; struct mutex cbuf_lock; }; struct roccat_reader { struct list_head node; struct roccat_device *device; int cbuf_start; }; enum roccat_common2_commands { ROCCAT_COMMON_COMMAND_CONTROL = 4, }; enum roccat_common2_control_states { ROCCAT_COMMON_CONTROL_STATUS_CRITICAL = 0, ROCCAT_COMMON_CONTROL_STATUS_OK = 1, ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2, ROCCAT_COMMON_CONTROL_STATUS_BUSY = 3, ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW = 4, }; struct roccat_common2_control { uint8_t command; uint8_t value; uint8_t request; }; struct roccat_common2_device { int roccat_claimed; int chrdev_minor; struct mutex lock; }; enum arvo_commands { ARVO_COMMAND_MODE_KEY = 3, ARVO_COMMAND_BUTTON = 4, ARVO_COMMAND_INFO = 5, ARVO_COMMAND_KEY_MASK = 6, ARVO_COMMAND_ACTUAL_PROFILE = 7, }; enum arvo_special_report_event_masks { ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION = 240, ARVO_SPECIAL_REPORT_EVENT_MASK_BUTTON = 15, }; enum arvo_special_report_events { ARVO_SPECIAL_REPORT_EVENT_ACTION_PRESS = 16, ARVO_SPECIAL_REPORT_EVENT_ACTION_RELEASE = 0, }; enum arvo_roccat_report_action { ARVO_ROCCAT_REPORT_ACTION_RELEASE = 0, ARVO_ROCCAT_REPORT_ACTION_PRESS = 1, }; struct arvo_special_report { uint8_t unknown1; uint8_t event; uint8_t unknown2; }; struct arvo_device { int roccat_claimed; int chrdev_minor; struct mutex arvo_lock; int actual_profile; }; struct arvo_actual_profile { uint8_t command; uint8_t actual_profile; }; struct arvo_roccat_report { uint8_t profile; uint8_t button; uint8_t action; }; struct arvo_mode_key { uint8_t command; uint8_t state; }; struct arvo_key_mask { uint8_t command; uint8_t key_mask; }; enum { ISKU_PROFILE_NUM = 5, ISKU_USB_INTERFACE_PROTOCOL = 0, }; enum isku_commands { ISKU_COMMAND_CONTROL = 4, ISKU_COMMAND_ACTUAL_PROFILE = 5, ISKU_COMMAND_KEY_MASK = 7, ISKU_COMMAND_KEYS_FUNCTION = 8, ISKU_COMMAND_KEYS_EASYZONE = 9, ISKU_COMMAND_KEYS_MEDIA = 10, ISKU_COMMAND_KEYS_THUMBSTER = 11, ISKU_COMMAND_KEYS_MACRO = 13, ISKU_COMMAND_MACRO = 14, ISKU_COMMAND_INFO = 15, ISKU_COMMAND_LIGHT = 16, ISKU_COMMAND_RESET = 17, ISKU_COMMAND_KEYS_CAPSLOCK = 19, ISKU_COMMAND_LAST_SET = 20, ISKU_COMMAND_15 = 21, ISKU_COMMAND_TALK = 22, ISKU_COMMAND_TALKFX = 23, ISKU_COMMAND_FIRMWARE_WRITE = 27, ISKU_COMMAND_FIRMWARE_WRITE_CONTROL = 28, }; enum isku_report_numbers { ISKU_REPORT_NUMBER_BUTTON = 3, }; enum isku_report_button_events { ISKU_REPORT_BUTTON_EVENT_PROFILE = 2, }; enum { ISKU_SIZE_CONTROL = 3, ISKU_SIZE_INFO = 6, ISKU_SIZE_KEY_MASK = 6, ISKU_SIZE_KEYS_FUNCTION = 41, ISKU_SIZE_KEYS_EASYZONE = 65, ISKU_SIZE_KEYS_MEDIA = 29, ISKU_SIZE_KEYS_THUMBSTER = 23, ISKU_SIZE_KEYS_MACRO = 35, ISKU_SIZE_KEYS_CAPSLOCK = 6, ISKU_SIZE_LAST_SET = 20, ISKU_SIZE_LIGHT = 16, ISKU_SIZE_MACRO = 2083, ISKU_SIZE_RESET = 3, ISKU_SIZE_TALK = 16, ISKU_SIZE_TALKFX = 16, }; struct isku_report_button { uint8_t number; uint8_t zero; uint8_t event; uint8_t data1; uint8_t data2; }; struct isku_device { int roccat_claimed; int chrdev_minor; struct mutex isku_lock; int actual_profile; }; struct isku_actual_profile { uint8_t command; uint8_t size; uint8_t actual_profile; }; struct isku_roccat_report { uint8_t event; uint8_t data1; uint8_t data2; uint8_t profile; }; enum kone_commands { kone_command_profile = 90, kone_command_settings = 346, kone_command_firmware_version = 602, kone_command_weight = 1114, kone_command_calibrate = 1370, kone_command_confirm_write = 1626, kone_command_firmware = 3674, }; enum kone_mouse_events { kone_mouse_event_osd_dpi = 160, kone_mouse_event_osd_profile = 176, kone_mouse_event_calibration = 192, kone_mouse_event_call_overlong_macro = 224, kone_mouse_event_multimedia = 225, kone_mouse_event_switch_dpi = 240, kone_mouse_event_switch_profile = 241, }; enum kone_keystroke_actions { kone_keystroke_action_press = 0, kone_keystroke_action_release = 1, }; struct kone_mouse_event { uint8_t report_number; uint8_t button; uint16_t x; uint16_t y; uint8_t wheel; union { struct { uint8_t tilt; uint8_t unknown; uint8_t event; uint8_t value; uint8_t macro_key; }; struct { uint8_t tilt; uint8_t unknown; uint8_t event; uint8_t value; uint8_t macro_key; } wipe; }; }; struct kone_settings { uint16_t size; uint8_t startup_profile; uint8_t unknown1; uint8_t tcu; uint8_t unknown2[23]; uint8_t calibration_data[4]; uint8_t unknown3[2]; uint16_t checksum; }; struct kone_light_info { uint8_t number; uint8_t mod; uint8_t red; uint8_t green; uint8_t blue; }; struct kone_keystroke { uint8_t key; uint8_t action; uint16_t period; }; struct kone_button_info { uint8_t number; uint8_t type; uint8_t macro_type; uint8_t macro_set_name[16]; uint8_t macro_name[16]; uint8_t count; struct kone_keystroke keystrokes[20]; }; struct kone_profile { uint16_t size; uint16_t unused; uint8_t profile; uint16_t main_sensitivity; uint8_t xy_sensitivity_enabled; uint16_t x_sensitivity; uint16_t y_sensitivity; uint8_t dpi_rate; uint8_t startup_dpi; uint8_t polling_rate; uint8_t dcu_flag; uint8_t light_effect_1; uint8_t light_effect_2; uint8_t light_effect_3; uint8_t light_effect_speed; struct kone_light_info light_infos[5]; struct kone_button_info button_infos[8]; uint16_t checksum; } __attribute__((packed)); struct kone_device { int actual_profile; int actual_dpi; struct kone_mouse_event last_mouse_event; long: 0; struct mutex kone_lock; struct kone_profile profiles[5]; struct kone_settings settings; long: 0; int firmware_version; int roccat_claimed; int chrdev_minor; long: 0; } __attribute__((packed)); struct kone_roccat_report { uint8_t event; uint8_t value; uint8_t key; }; enum koneplus_commands { KONEPLUS_COMMAND_ACTUAL_PROFILE = 5, KONEPLUS_COMMAND_CONTROL = 4, KONEPLUS_COMMAND_PROFILE_SETTINGS = 6, KONEPLUS_COMMAND_PROFILE_BUTTONS = 7, KONEPLUS_COMMAND_MACRO = 8, KONEPLUS_COMMAND_INFO = 9, KONEPLUS_COMMAND_TCU = 12, KONEPLUS_COMMAND_TCU_IMAGE = 12, KONEPLUS_COMMAND_E = 14, KONEPLUS_COMMAND_SENSOR = 15, KONEPLUS_COMMAND_TALK = 16, KONEPLUS_COMMAND_FIRMWARE_WRITE = 27, KONEPLUS_COMMAND_FIRMWARE_WRITE_CONTROL = 28, }; enum { KONEPLUS_SIZE_ACTUAL_PROFILE = 3, KONEPLUS_SIZE_CONTROL = 3, KONEPLUS_SIZE_FIRMWARE_WRITE = 1026, KONEPLUS_SIZE_INFO = 6, KONEPLUS_SIZE_MACRO = 2082, KONEPLUS_SIZE_PROFILE_SETTINGS = 43, KONEPLUS_SIZE_PROFILE_BUTTONS = 77, KONEPLUS_SIZE_SENSOR = 6, KONEPLUS_SIZE_TALK = 16, KONEPLUS_SIZE_TCU = 4, KONEPLUS_SIZE_TCU_IMAGE = 1028, }; enum koneplus_mouse_report_numbers { KONEPLUS_MOUSE_REPORT_NUMBER_HID = 1, KONEPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2, KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON = 3, }; enum koneplus_mouse_report_button_types { KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE = 32, KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH = 96, KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER = 128, KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI = 176, KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY = 192, KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 240, KONEPLUS_MOUSE_REPORT_TALK = 255, }; enum koneplus_mouse_report_button_action { KONEPLUS_MOUSE_REPORT_BUTTON_ACTION_PRESS = 0, KONEPLUS_MOUSE_REPORT_BUTTON_ACTION_RELEASE = 1, }; enum koneplus_control_requests { KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 128, KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 144, }; struct koneplus_mouse_report_button { uint8_t report_number; uint8_t zero1; uint8_t type; uint8_t data1; uint8_t data2; uint8_t zero2; uint8_t unknown[2]; }; struct koneplus_device { int actual_profile; int roccat_claimed; int chrdev_minor; struct mutex koneplus_lock; }; struct koneplus_actual_profile { uint8_t command; uint8_t size; uint8_t actual_profile; }; struct koneplus_roccat_report { uint8_t type; uint8_t data1; uint8_t data2; uint8_t profile; }; struct koneplus_info { uint8_t command; uint8_t size; uint8_t firmware_version; uint8_t unknown[3]; }; enum { KONEPURE_MOUSE_REPORT_NUMBER_BUTTON = 3, }; enum kovaplus_control_requests { KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 16, KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 32, }; enum kovaplus_commands { KOVAPLUS_COMMAND_ACTUAL_PROFILE = 5, KOVAPLUS_COMMAND_CONTROL = 4, KOVAPLUS_COMMAND_PROFILE_SETTINGS = 6, KOVAPLUS_COMMAND_PROFILE_BUTTONS = 7, KOVAPLUS_COMMAND_INFO = 9, KOVAPLUS_COMMAND_A = 10, }; enum { KOVAPLUS_SIZE_CONTROL = 3, KOVAPLUS_SIZE_INFO = 6, KOVAPLUS_SIZE_PROFILE_SETTINGS = 16, KOVAPLUS_SIZE_PROFILE_BUTTONS = 23, }; enum kovaplus_mouse_report_numbers { KOVAPLUS_MOUSE_REPORT_NUMBER_MOUSE = 1, KOVAPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2, KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON = 3, KOVAPLUS_MOUSE_REPORT_NUMBER_KBD = 4, }; enum kovaplus_mouse_report_button_types { KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1 = 32, KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_2 = 48, KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MACRO = 64, KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SHORTCUT = 80, KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH = 96, KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER = 128, KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI = 176, KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY = 192, KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 240, }; struct kovaplus_mouse_report_button { uint8_t report_number; uint8_t unknown1; uint8_t type; uint8_t data1; uint8_t data2; }; struct kovaplus_profile_settings { uint8_t command; uint8_t size; uint8_t profile_index; uint8_t unknown1; uint8_t sensitivity_x; uint8_t sensitivity_y; uint8_t cpi_levels_enabled; uint8_t cpi_startup_level; uint8_t data[8]; }; struct kovaplus_profile_buttons { uint8_t command; uint8_t size; uint8_t profile_index; uint8_t data[20]; }; struct kovaplus_device { int actual_profile; int actual_cpi; int actual_x_sensitivity; int actual_y_sensitivity; int roccat_claimed; int chrdev_minor; struct mutex kovaplus_lock; struct kovaplus_profile_settings profile_settings[5]; struct kovaplus_profile_buttons profile_buttons[5]; }; struct kovaplus_actual_profile { uint8_t command; uint8_t size; uint8_t actual_profile; }; struct kovaplus_roccat_report { uint8_t type; uint8_t profile; uint8_t button; uint8_t data1; uint8_t data2; }; struct kovaplus_info { uint8_t command; uint8_t size; uint8_t firmware_version; uint8_t unknown[3]; }; enum { LUA_SIZE_CONTROL = 8, }; enum lua_commands { LUA_COMMAND_CONTROL = 3, }; struct lua_device { struct mutex lua_lock; }; enum pyra_commands { PYRA_COMMAND_CONTROL = 4, PYRA_COMMAND_SETTINGS = 5, PYRA_COMMAND_PROFILE_SETTINGS = 6, PYRA_COMMAND_PROFILE_BUTTONS = 7, PYRA_COMMAND_INFO = 9, PYRA_COMMAND_B = 11, }; enum { PYRA_SIZE_CONTROL = 3, PYRA_SIZE_INFO = 6, PYRA_SIZE_PROFILE_SETTINGS = 13, PYRA_SIZE_PROFILE_BUTTONS = 19, PYRA_SIZE_SETTINGS = 3, }; enum pyra_control_requests { PYRA_CONTROL_REQUEST_PROFILE_SETTINGS = 16, PYRA_CONTROL_REQUEST_PROFILE_BUTTONS = 32, }; enum pyra_mouse_report_numbers { PYRA_MOUSE_REPORT_NUMBER_HID = 1, PYRA_MOUSE_REPORT_NUMBER_AUDIO = 2, PYRA_MOUSE_REPORT_NUMBER_BUTTON = 3, }; enum pyra_mouse_event_button_types { PYRA_MOUSE_EVENT_BUTTON_TYPE_TILT = 16, PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_1 = 32, PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2 = 48, PYRA_MOUSE_EVENT_BUTTON_TYPE_MACRO = 64, PYRA_MOUSE_EVENT_BUTTON_TYPE_SHORTCUT = 80, PYRA_MOUSE_EVENT_BUTTON_TYPE_QUICKLAUNCH = 96, PYRA_MOUSE_EVENT_BUTTON_TYPE_CPI = 176, PYRA_MOUSE_EVENT_BUTTON_TYPE_SENSITIVITY = 192, PYRA_MOUSE_EVENT_BUTTON_TYPE_MULTIMEDIA = 240, }; enum { PYRA_MOUSE_EVENT_BUTTON_PRESS = 0, PYRA_MOUSE_EVENT_BUTTON_RELEASE = 1, }; struct pyra_mouse_event_button { uint8_t report_number; uint8_t unknown; uint8_t type; uint8_t data1; uint8_t data2; }; struct pyra_settings { uint8_t command; uint8_t size; uint8_t startup_profile; }; struct pyra_profile_settings { uint8_t command; uint8_t size; uint8_t number; uint8_t xysync; uint8_t x_sensitivity; uint8_t y_sensitivity; uint8_t x_cpi; uint8_t y_cpi; uint8_t lightswitch; uint8_t light_effect; uint8_t handedness; uint16_t checksum; } __attribute__((packed)); struct pyra_device { int actual_profile; int actual_cpi; int roccat_claimed; int chrdev_minor; struct mutex pyra_lock; struct pyra_profile_settings profile_settings[5]; }; struct pyra_roccat_report { uint8_t type; uint8_t value; uint8_t key; }; struct pyra_info { uint8_t command; uint8_t size; uint8_t firmware_version; uint8_t unknown1; uint8_t unknown2; uint8_t unknown3; }; enum { RYOS_REPORT_NUMBER_SPECIAL = 3, RYOS_USB_INTERFACE_PROTOCOL = 0, }; enum { SAVU_MOUSE_REPORT_NUMBER_SPECIAL = 3, }; struct savu_mouse_report_special { uint8_t report_number; uint8_t zero; uint8_t type; uint8_t data[2]; }; struct savu_roccat_report { uint8_t type; uint8_t data[2]; }; struct sixaxis_rumble { u8 padding; u8 right_duration; u8 right_motor_on; u8 left_duration; u8 left_motor_force; }; struct sixaxis_led { u8 time_enabled; u8 duty_length; u8 enabled; u8 duty_off; u8 duty_on; }; struct sixaxis_output_report { u8 report_id; struct sixaxis_rumble rumble; u8 padding[4]; u8 leds_bitmap; struct sixaxis_led led[4]; struct sixaxis_led _reserved; }; union sixaxis_output_report_01 { struct sixaxis_output_report data; u8 buf[36]; }; enum sony_worker { SONY_WORKER_STATE = 0, }; struct sony_sc { spinlock_t lock; struct list_head list_node; struct hid_device *hdev; struct input_dev *touchpad; struct input_dev *sensor_dev; struct led_classdev *leds[4]; unsigned long quirks; struct work_struct state_worker; void (*send_output_report)(struct sony_sc *); struct power_supply *battery; struct power_supply_desc battery_desc; int device_id; u8 *output_report_dmabuf; u8 left; u8 right; u8 mac_address[6]; u8 state_worker_initialized; u8 defer_initialization; u8 battery_capacity; int battery_status; u8 led_state[4]; u8 led_delay_on[4]; u8 led_delay_off[4]; u8 led_count; struct urb *ghl_urb; struct timer_list ghl_poke_timer; }; struct motion_output_report_02 { u8 type; u8 zero; u8 r; u8 g; u8 b; u8 zero2; u8 rumble; }; struct steam_device { struct list_head list; spinlock_t lock; struct hid_device *hdev; struct hid_device *client_hdev; struct mutex mutex; bool client_opened; struct input_dev __attribute__((btf_type_tag("rcu"))) *input; unsigned long quirks; struct work_struct work_connect; bool connected; char serial_no[11]; struct power_supply_desc battery_desc; struct power_supply __attribute__((btf_type_tag("rcu"))) *battery; u8 battery_charge; u16 voltage; struct delayed_work heartbeat; struct work_struct rumble_work; u16 rumble_left; u16 rumble_right; }; enum uclogic_params_pen_inrange { UCLOGIC_PARAMS_PEN_INRANGE_NORMAL = 0, UCLOGIC_PARAMS_PEN_INRANGE_INVERTED = 1, UCLOGIC_PARAMS_PEN_INRANGE_NONE = 2, }; struct uclogic_params_pen_subreport { __u8 value; __u8 id; }; struct uclogic_params_pen { bool usage_invalid; __u8 *desc_ptr; unsigned int desc_size; unsigned int id; struct uclogic_params_pen_subreport subreport_list[3]; enum uclogic_params_pen_inrange inrange; bool fragmented_hires; bool tilt_y_flipped; }; struct uclogic_params_frame { __u8 *desc_ptr; unsigned int desc_size; unsigned int id; const char *suffix; unsigned int re_lsb; unsigned int dev_id_byte; unsigned int touch_byte; __s8 touch_flip_at; __s8 touch_max; unsigned int bitmap_dial_byte; }; struct uclogic_raw_event_hook; struct uclogic_params { bool invalid; __u8 *desc_ptr; unsigned int desc_size; struct uclogic_params_pen pen; struct uclogic_params_frame frame_list[3]; struct uclogic_raw_event_hook *event_hooks; }; struct uclogic_drvdata { struct uclogic_params params; __u8 *desc_ptr; unsigned int desc_size; struct input_dev *pen_input; struct timer_list inrange_timer; u8 re_state; unsigned long quirks; }; struct uclogic_raw_event_hook { struct hid_device *hdev; __u8 *event; size_t size; struct work_struct work; struct list_head list; }; enum uclogic_rdesc_ph_id { UCLOGIC_RDESC_PEN_PH_ID_X_LM = 0, UCLOGIC_RDESC_PEN_PH_ID_X_PM = 1, UCLOGIC_RDESC_PEN_PH_ID_Y_LM = 2, UCLOGIC_RDESC_PEN_PH_ID_Y_PM = 3, UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM = 4, UCLOGIC_RDESC_FRAME_PH_ID_UM = 5, UCLOGIC_RDESC_PH_ID_NUM = 6, }; enum uclogic_params_frame_type { UCLOGIC_PARAMS_FRAME_BUTTONS = 0, UCLOGIC_PARAMS_FRAME_DIAL = 1, UCLOGIC_PARAMS_FRAME_MOUSE = 2, }; struct wacom_features { const char *name; int x_max; int y_max; int pressure_max; int distance_max; int type; int x_resolution; int y_resolution; int numbered_buttons; int offset_left; int offset_right; int offset_top; int offset_bottom; int device_type; int x_phy; int y_phy; unsigned int unit; int unitExpo; int x_fuzz; int y_fuzz; int pressure_fuzz; int distance_fuzz; int tilt_fuzz; unsigned int quirks; unsigned int touch_max; int oVid; int oPid; int pktlen; bool check_for_hid_type; int hid_type; }; enum { PENPARTNER = 0, GRAPHIRE = 1, GRAPHIRE_BT = 2, WACOM_G4 = 3, PTU = 4, PL = 5, DTU = 6, DTUS = 7, DTUSX = 8, INTUOS = 9, INTUOS3S = 10, INTUOS3 = 11, INTUOS3L = 12, INTUOS4S = 13, INTUOS4 = 14, INTUOS4WL = 15, INTUOS4L = 16, INTUOS5S = 17, INTUOS5 = 18, INTUOS5L = 19, INTUOSPS = 20, INTUOSPM = 21, INTUOSPL = 22, INTUOSP2_BT = 23, INTUOSP2S_BT = 24, INTUOSHT3_BT = 25, WACOM_21UX2 = 26, WACOM_22HD = 27, DTK = 28, WACOM_24HD = 29, WACOM_27QHD = 30, CINTIQ_HYBRID = 31, CINTIQ_COMPANION_2 = 32, CINTIQ = 33, WACOM_BEE = 34, WACOM_13HD = 35, WACOM_MO = 36, BAMBOO_PEN = 37, INTUOSHT = 38, INTUOSHT2 = 39, BAMBOO_TOUCH = 40, BAMBOO_PT = 41, WACOM_24HDT = 42, WACOM_27QHDT = 43, BAMBOO_PAD = 44, WIRELESS = 45, REMOTE = 46, TABLETPC = 47, TABLETPCE = 48, TABLETPC2FG = 49, MTSCREEN = 50, MTTPC = 51, MTTPC_B = 52, HID_GENERIC = 53, BOOTLOADER = 54, MAX_TYPE = 55, }; enum wacom_worker { WACOM_WORKER_WIRELESS = 0, WACOM_WORKER_BATTERY = 1, WACOM_WORKER_REMOTE = 2, WACOM_WORKER_MODE_CHANGE = 3, }; struct hid_data { __s16 inputmode; __s16 inputmode_index; bool sense_state; bool inrange_state; bool invert_state; bool tipswitch; bool barrelswitch; bool barrelswitch2; bool barrelswitch3; bool serialhi; bool confidence; int x; int y; int pressure; int width; int height; int id; int cc_report; int cc_index; int cc_value_index; int last_slot_field; int num_expected; int num_received; int bat_status; int battery_capacity; int bat_charging; int bat_connected; int ps_connected; bool pad_input_event_flag; unsigned short sequence_number; ktime_t time_delayed; }; struct wacom_shared; struct kfifo_rec_ptr_2; struct wacom_wac { char name[64]; char pen_name[64]; char touch_name[64]; char pad_name[64]; unsigned char data[361]; int tool[2]; int id[2]; __u64 serial[2]; bool probe_complete; bool reporting_data; struct wacom_features features; struct wacom_shared *shared; struct input_dev *pen_input; struct input_dev *touch_input; struct input_dev *pad_input; struct kfifo_rec_ptr_2 *pen_fifo; int pid; int num_contacts_left; u8 bt_features; u8 bt_high_speed; int mode_report; int mode_value; struct hid_data hid_data; bool has_mute_touch_switch; bool is_soft_touch_switch; bool has_mode_change; bool is_direct_mode; bool is_invalid_bt_frame; }; struct wacom_group_leds; struct wacom_leds { struct wacom_group_leds *groups; unsigned int count; u8 llv; u8 hlv; u8 img_lum; u8 max_llv; u8 max_hlv; }; struct wacom; struct wacom_battery { struct wacom *wacom; struct power_supply_desc bat_desc; struct power_supply *battery; char bat_name[64]; int bat_status; int battery_capacity; int bat_charging; int bat_connected; int ps_connected; }; struct wacom_remote; struct wacom { struct usb_device *usbdev; struct usb_interface *intf; struct wacom_wac wacom_wac; struct hid_device *hdev; struct mutex lock; struct work_struct wireless_work; struct work_struct battery_work; struct work_struct remote_work; struct delayed_work init_work; struct wacom_remote *remote; struct work_struct mode_change_work; struct timer_list idleprox_timer; bool generic_has_leds; struct wacom_leds led; struct wacom_battery battery; bool resources; }; struct wacom_shared { bool stylus_in_proximity; bool touch_down; unsigned int touch_max; int type; struct input_dev *touch_input; struct hid_device *pen; struct hid_device *touch; bool has_mute_touch_switch; bool is_touch_on; }; struct kfifo_rec_ptr_2 { union { struct __kfifo kfifo; unsigned char *type; const unsigned char *const_type; char (*rectype)[2]; void *ptr; const void *ptr_const; }; unsigned char buf[0]; }; struct wacom_remote { spinlock_t remote_lock; struct kfifo remote_fifo; struct kobject *remote_dir; struct { struct attribute_group group; u32 serial; struct input_dev *input; bool registered; struct wacom_battery battery; ktime_t active_time; } remotes[5]; }; struct wacom_led; struct wacom_group_leds { u8 select; struct wacom_led *leds; unsigned int count; struct device *dev; }; struct wacom_led { struct led_classdev cdev; struct led_trigger trigger; struct wacom *wacom; unsigned int group; unsigned int id; u8 llv; u8 hlv; bool held; }; struct wacom_remote_work_data { struct { u32 serial; } remote[5]; }; struct wacom_hdev_data { struct list_head list; struct kref kref; struct hid_device *dev; struct wacom_shared shared; }; struct wacom_sysfs_group_devres { const struct attribute_group *group; struct kobject *root; }; struct wiimote_data; struct wiiproto_handler { __u8 id; size_t size; void (*func)(struct wiimote_data *, const __u8 *); }; struct wiimote_buf { __u8 data[16384]; size_t size; }; struct wiimote_queue { spinlock_t lock; struct work_struct worker; __u8 head; __u8 tail; struct wiimote_buf outq[32]; }; struct wiimote_state { spinlock_t lock; __u32 flags; __u8 accel_split[2]; __u8 drm; __u8 devtype; __u8 exttype; __u8 mp; struct mutex sync; struct completion ready; int cmd; __u32 opt; __u8 cmd_battery; __u8 cmd_err; __u8 *cmd_read_buf; __u8 cmd_read_size; __u16 calib_bboard[12]; __s16 calib_pro_sticks[4]; __u8 pressure_drums[7]; __u8 cache_rumble; }; struct wiimote_debug; struct wiimote_data { struct hid_device *hdev; struct input_dev *input; struct work_struct rumble_worker; struct led_classdev *leds[4]; struct input_dev *accel; struct input_dev *ir; struct power_supply *battery; struct power_supply_desc battery_desc; struct input_dev *mp; struct timer_list timer; struct wiimote_debug *debug; union { struct input_dev *input; } extension; struct wiimote_queue queue; struct wiimote_state state; struct work_struct init_worker; }; enum wiiproto_reqs { WIIPROTO_REQ_NULL = 0, WIIPROTO_REQ_RUMBLE = 16, WIIPROTO_REQ_LED = 17, WIIPROTO_REQ_DRM = 18, WIIPROTO_REQ_IR1 = 19, WIIPROTO_REQ_SREQ = 21, WIIPROTO_REQ_WMEM = 22, WIIPROTO_REQ_RMEM = 23, WIIPROTO_REQ_IR2 = 26, WIIPROTO_REQ_STATUS = 32, WIIPROTO_REQ_DATA = 33, WIIPROTO_REQ_RETURN = 34, WIIPROTO_REQ_DRM_K = 48, WIIPROTO_REQ_DRM_KA = 49, WIIPROTO_REQ_DRM_KE = 50, WIIPROTO_REQ_DRM_KAI = 51, WIIPROTO_REQ_DRM_KEE = 52, WIIPROTO_REQ_DRM_KAE = 53, WIIPROTO_REQ_DRM_KIE = 54, WIIPROTO_REQ_DRM_KAIE = 55, WIIPROTO_REQ_DRM_E = 61, WIIPROTO_REQ_DRM_SKAI1 = 62, WIIPROTO_REQ_DRM_SKAI2 = 63, WIIPROTO_REQ_MAX = 64, }; enum wiimote_devtype { WIIMOTE_DEV_PENDING = 0, WIIMOTE_DEV_UNKNOWN = 1, WIIMOTE_DEV_GENERIC = 2, WIIMOTE_DEV_GEN10 = 3, WIIMOTE_DEV_GEN20 = 4, WIIMOTE_DEV_BALANCE_BOARD = 5, WIIMOTE_DEV_PRO_CONTROLLER = 6, WIIMOTE_DEV_NUM = 7, }; enum wiimote_exttype { WIIMOTE_EXT_NONE = 0, WIIMOTE_EXT_UNKNOWN = 1, WIIMOTE_EXT_NUNCHUK = 2, WIIMOTE_EXT_CLASSIC_CONTROLLER = 3, WIIMOTE_EXT_BALANCE_BOARD = 4, WIIMOTE_EXT_PRO_CONTROLLER = 5, WIIMOTE_EXT_DRUMS = 6, WIIMOTE_EXT_GUITAR = 7, WIIMOTE_EXT_TURNTABLE = 8, WIIMOTE_EXT_NUM = 9, }; enum wiimod_module { WIIMOD_KEYS = 0, WIIMOD_RUMBLE = 1, WIIMOD_BATTERY = 2, WIIMOD_LED1 = 3, WIIMOD_LED2 = 4, WIIMOD_LED3 = 5, WIIMOD_LED4 = 6, WIIMOD_ACCEL = 7, WIIMOD_IR = 8, WIIMOD_BUILTIN_MP = 9, WIIMOD_NO_MP = 10, WIIMOD_NUM = 11, WIIMOD_NULL = 11, }; enum wiimote_mptype { WIIMOTE_MP_NONE = 0, WIIMOTE_MP_UNKNOWN = 1, WIIMOTE_MP_SINGLE = 2, WIIMOTE_MP_PASSTHROUGH_NUNCHUK = 3, WIIMOTE_MP_PASSTHROUGH_CLASSIC = 4, }; struct wiimod_ops { __u16 flags; unsigned long arg; int (*probe)(const struct wiimod_ops *, struct wiimote_data *); void (*remove)(const struct wiimod_ops *, struct wiimote_data *); void (*in_keys)(struct wiimote_data *, const __u8 *); void (*in_accel)(struct wiimote_data *, const __u8 *); void (*in_ir)(struct wiimote_data *, const __u8 *, bool, unsigned int); void (*in_mp)(struct wiimote_data *, const __u8 *); void (*in_ext)(struct wiimote_data *, const __u8 *); }; enum wiiproto_keys { WIIPROTO_KEY_LEFT = 0, WIIPROTO_KEY_RIGHT = 1, WIIPROTO_KEY_UP = 2, WIIPROTO_KEY_DOWN = 3, WIIPROTO_KEY_PLUS = 4, WIIPROTO_KEY_MINUS = 5, WIIPROTO_KEY_ONE = 6, WIIPROTO_KEY_TWO = 7, WIIPROTO_KEY_A = 8, WIIPROTO_KEY_B = 9, WIIPROTO_KEY_HOME = 10, WIIPROTO_KEY_COUNT = 11, }; enum wiimod_nunchuk_keys { WIIMOD_NUNCHUK_KEY_C = 0, WIIMOD_NUNCHUK_KEY_Z = 1, WIIMOD_NUNCHUK_KEY_NUM = 2, }; enum wiimod_classic_keys { WIIMOD_CLASSIC_KEY_A = 0, WIIMOD_CLASSIC_KEY_B = 1, WIIMOD_CLASSIC_KEY_X = 2, WIIMOD_CLASSIC_KEY_Y = 3, WIIMOD_CLASSIC_KEY_ZL = 4, WIIMOD_CLASSIC_KEY_ZR = 5, WIIMOD_CLASSIC_KEY_PLUS = 6, WIIMOD_CLASSIC_KEY_MINUS = 7, WIIMOD_CLASSIC_KEY_HOME = 8, WIIMOD_CLASSIC_KEY_LEFT = 9, WIIMOD_CLASSIC_KEY_RIGHT = 10, WIIMOD_CLASSIC_KEY_UP = 11, WIIMOD_CLASSIC_KEY_DOWN = 12, WIIMOD_CLASSIC_KEY_LT = 13, WIIMOD_CLASSIC_KEY_RT = 14, WIIMOD_CLASSIC_KEY_NUM = 15, }; enum wiimod_pro_keys { WIIMOD_PRO_KEY_A = 0, WIIMOD_PRO_KEY_B = 1, WIIMOD_PRO_KEY_X = 2, WIIMOD_PRO_KEY_Y = 3, WIIMOD_PRO_KEY_PLUS = 4, WIIMOD_PRO_KEY_MINUS = 5, WIIMOD_PRO_KEY_HOME = 6, WIIMOD_PRO_KEY_LEFT = 7, WIIMOD_PRO_KEY_RIGHT = 8, WIIMOD_PRO_KEY_UP = 9, WIIMOD_PRO_KEY_DOWN = 10, WIIMOD_PRO_KEY_TL = 11, WIIMOD_PRO_KEY_TR = 12, WIIMOD_PRO_KEY_ZL = 13, WIIMOD_PRO_KEY_ZR = 14, WIIMOD_PRO_KEY_THUMBL = 15, WIIMOD_PRO_KEY_THUMBR = 16, WIIMOD_PRO_KEY_NUM = 17, }; enum wiimod_guitar_keys { WIIMOD_GUITAR_KEY_G = 0, WIIMOD_GUITAR_KEY_R = 1, WIIMOD_GUITAR_KEY_Y = 2, WIIMOD_GUITAR_KEY_B = 3, WIIMOD_GUITAR_KEY_O = 4, WIIMOD_GUITAR_KEY_UP = 5, WIIMOD_GUITAR_KEY_DOWN = 6, WIIMOD_GUITAR_KEY_PLUS = 7, WIIMOD_GUITAR_KEY_MINUS = 8, WIIMOD_GUITAR_KEY_NUM = 9, }; enum wiimod_turntable_keys { WIIMOD_TURNTABLE_KEY_G_RIGHT = 0, WIIMOD_TURNTABLE_KEY_R_RIGHT = 1, WIIMOD_TURNTABLE_KEY_B_RIGHT = 2, WIIMOD_TURNTABLE_KEY_G_LEFT = 3, WIIMOD_TURNTABLE_KEY_R_LEFT = 4, WIIMOD_TURNTABLE_KEY_B_LEFT = 5, WIIMOD_TURNTABLE_KEY_EUPHORIA = 6, WIIMOD_TURNTABLE_KEY_PLUS = 7, WIIMOD_TURNTABLE_KEY_MINUS = 8, WIIMOD_TURNTABLE_KEY_NUM = 9, }; struct wiimote_debug { struct wiimote_data *wdata; struct dentry *eeprom; struct dentry *drm; }; struct hid_control_fifo { unsigned char dir; struct hid_report *report; char *raw_report; }; struct hid_output_fifo { struct hid_report *report; char *raw_report; }; struct usbhid_device { struct hid_device *hid; struct usb_interface *intf; int ifnum; unsigned int bufsize; struct urb *urbin; char *inbuf; dma_addr_t inbuf_dma; struct urb *urbctrl; struct usb_ctrlrequest *cr; struct hid_control_fifo ctrl[256]; unsigned char ctrlhead; unsigned char ctrltail; char *ctrlbuf; dma_addr_t ctrlbuf_dma; unsigned long last_ctrl; struct urb *urbout; struct hid_output_fifo out[256]; unsigned char outhead; unsigned char outtail; char *outbuf; dma_addr_t outbuf_dma; unsigned long last_out; struct mutex mutex; spinlock_t lock; unsigned long iofl; struct timer_list io_retry; unsigned long stop_retry; unsigned int retry_delay; struct work_struct reset_work; wait_queue_head_t wait; }; struct hiddev_usage_ref { __u32 report_type; __u32 report_id; __u32 field_index; __u32 usage_index; __u32 usage_code; __s32 value; }; struct hiddev_list { struct hiddev_usage_ref buffer[2048]; int head; int tail; unsigned int flags; struct fasync_struct *fasync; struct hiddev *hiddev; struct list_head node; struct mutex thread_lock; }; struct hiddev_report_info { __u32 report_type; __u32 report_id; __u32 num_fields; }; struct hiddev_event { unsigned int hid; int value; }; struct hiddev_collection_info { __u32 index; __u32 type; __u32 usage; __u32 level; }; struct hiddev_field_info { __u32 report_type; __u32 report_id; __u32 field_index; __u32 maxusage; __u32 flags; __u32 physical; __u32 logical; __u32 application; __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; __s32 physical_maximum; __u32 unit_exponent; __u32 unit; }; struct hiddev_devinfo { __u32 bustype; __u32 busnum; __u32 devnum; __u32 ifnum; __s16 vendor; __s16 product; __s16 version; __u32 num_applications; }; struct hiddev_usage_ref_multi { struct hiddev_usage_ref uref; __u32 num_values; __s32 values[1024]; }; struct alias_prop { struct list_head link; const char *alias; struct device_node *np; int id; char stem[0]; }; struct supplier_bindings { struct device_node * (*parse_prop)(struct device_node *, const char *, int); struct device_node * (*get_con_dev)(struct device_node *); bool optional; }; struct of_bus___2 { void (*count_cells)(const void *, int, int *, int *); u64 (*map)(__be32 *, const __be32 *, int, int, int); int (*translate)(__be32 *, u64, int); }; struct of_bus { const char *name; const char *addresses; int (*match)(struct device_node *); void (*count_cells)(struct device_node *, int *, int *); u64 (*map)(__be32 *, const __be32 *, int, int, int); int (*translate)(__be32 *, u64, int); bool has_flags; unsigned int (*get_flags)(const __be32 *); }; struct of_intc_desc { struct list_head list; of_irq_init_cb_t irq_init_cb; struct device_node *dev; struct device_node *interrupt_parent; }; struct rmem_assigned_device { struct device *dev; struct reserved_mem *rmem; struct list_head list; }; typedef int (*reservedmem_of_init_fn)(struct reserved_mem *); struct vsock_sock; struct sockaddr_vm; struct vsock_transport_recv_notify_data; struct vsock_transport_send_notify_data; struct vsock_transport { struct module *module; int (*init)(struct vsock_sock *, struct vsock_sock *); void (*destruct)(struct vsock_sock *); void (*release)(struct vsock_sock *); int (*cancel_pkt)(struct vsock_sock *); int (*connect)(struct vsock_sock *); int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *); int (*dgram_dequeue)(struct vsock_sock *, struct msghdr *, size_t, int); int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, struct msghdr *, size_t); bool (*dgram_allow)(u32, u32); ssize_t (*stream_dequeue)(struct vsock_sock *, struct msghdr *, size_t, int); ssize_t (*stream_enqueue)(struct vsock_sock *, struct msghdr *, size_t); s64 (*stream_has_data)(struct vsock_sock *); s64 (*stream_has_space)(struct vsock_sock *); u64 (*stream_rcvhiwat)(struct vsock_sock *); bool (*stream_is_active)(struct vsock_sock *); bool (*stream_allow)(u32, u32); ssize_t (*seqpacket_dequeue)(struct vsock_sock *, struct msghdr *, int); int (*seqpacket_enqueue)(struct vsock_sock *, struct msghdr *, size_t); bool (*seqpacket_allow)(u32); u32 (*seqpacket_has_data)(struct vsock_sock *); int (*notify_poll_in)(struct vsock_sock *, size_t, bool *); int (*notify_poll_out)(struct vsock_sock *, size_t, bool *); int (*notify_recv_init)(struct vsock_sock *, size_t, struct vsock_transport_recv_notify_data *); int (*notify_recv_pre_block)(struct vsock_sock *, size_t, struct vsock_transport_recv_notify_data *); int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t, struct vsock_transport_recv_notify_data *); int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t, ssize_t, bool, struct vsock_transport_recv_notify_data *); int (*notify_send_init)(struct vsock_sock *, struct vsock_transport_send_notify_data *); int (*notify_send_pre_block)(struct vsock_sock *, struct vsock_transport_send_notify_data *); int (*notify_send_pre_enqueue)(struct vsock_sock *, struct vsock_transport_send_notify_data *); int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t, struct vsock_transport_send_notify_data *); void (*notify_buffer_size)(struct vsock_sock *, u64 *); int (*notify_set_rcvlowat)(struct vsock_sock *, int); int (*shutdown)(struct vsock_sock *, int); u32 (*get_local_cid)(); int (*read_skb)(struct vsock_sock *, skb_read_actor_t); }; struct virtio_transport { struct vsock_transport transport; int (*send_pkt)(struct sk_buff *); }; struct sockaddr_vm { __kernel_sa_family_t svm_family; unsigned short svm_reserved1; unsigned int svm_port; unsigned int svm_cid; __u8 svm_flags; unsigned char svm_zero[3]; }; struct vsock_sock { struct sock sk; const struct vsock_transport *transport; struct sockaddr_vm local_addr; struct sockaddr_vm remote_addr; struct list_head bound_table; struct list_head connected_table; bool trusted; bool cached_peer_allow_dgram; u32 cached_peer; const struct cred *owner; long connect_timeout; struct sock *listener; struct list_head pending_links; struct list_head accept_queue; bool rejected; struct delayed_work connect_work; struct delayed_work pending_work; struct delayed_work close_work; bool close_work_scheduled; u32 peer_shutdown; bool sent_request; bool ignore_connecting_rst; u64 buffer_size; u64 buffer_min_size; u64 buffer_max_size; void *trans; }; struct vsock_transport_recv_notify_data { u64 data1; u64 data2; bool notify_on_block; }; struct vsock_transport_send_notify_data { u64 data1; u64 data2; }; enum { VHOST_VSOCK_FEATURES = 13908312066ULL, }; enum { VHOST_VSOCK_BACKEND_FEATURES = 2, }; enum { VSOCK_VQ_RX = 0, VSOCK_VQ_TX = 1, VSOCK_VQ_EVENT = 2, VSOCK_VQ_MAX = 3, }; enum virtio_vsock_rw { VIRTIO_VSOCK_SEQ_EOM = 1, VIRTIO_VSOCK_SEQ_EOR = 2, }; struct vhost_virtqueue; struct vhost_iotlb; struct vhost_iotlb_msg; struct vhost_dev { struct mm_struct *mm; struct mutex mutex; struct vhost_virtqueue **vqs; int nvqs; struct eventfd_ctx *log_ctx; struct vhost_iotlb *umem; struct vhost_iotlb *iotlb; spinlock_t iotlb_lock; struct list_head read_list; struct list_head pending_list; wait_queue_head_t wait; int iov_limit; int weight; int byte_weight; struct xarray worker_xa; bool use_worker; int (*msg_handler)(struct vhost_dev *, u32, struct vhost_iotlb_msg *); }; struct vhost_vring_call { struct eventfd_ctx *ctx; struct irq_bypass_producer producer; }; struct vhost_work; typedef void (*vhost_work_fn_t)(struct vhost_work *); struct vhost_work { struct llist_node node; vhost_work_fn_t fn; unsigned long flags; }; struct vhost_poll { poll_table table; wait_queue_head_t *wqh; wait_queue_entry_t wait; struct vhost_work work; __poll_t mask; struct vhost_dev *dev; struct vhost_virtqueue *vq; }; struct vhost_worker; struct vhost_iotlb_map; struct vhost_log; struct vhost_virtqueue { struct vhost_dev *dev; struct vhost_worker __attribute__((btf_type_tag("rcu"))) *worker; struct mutex mutex; unsigned int num; vring_desc_t __attribute__((btf_type_tag("user"))) *desc; vring_avail_t __attribute__((btf_type_tag("user"))) *avail; vring_used_t __attribute__((btf_type_tag("user"))) *used; const struct vhost_iotlb_map *meta_iotlb[3]; struct file *kick; struct vhost_vring_call call_ctx; struct eventfd_ctx *error_ctx; struct eventfd_ctx *log_ctx; struct vhost_poll poll; vhost_work_fn_t handle_kick; u16 last_avail_idx; u16 avail_idx; u16 last_used_idx; u16 used_flags; u16 signalled_used; bool signalled_used_valid; bool log_used; u64 log_addr; struct iovec iov[1024]; struct iovec iotlb_iov[64]; struct iovec *indirect; struct vring_used_elem *heads; struct vhost_iotlb *umem; struct vhost_iotlb *iotlb; void *private_data; u64 acked_features; u64 acked_backend_features; void __attribute__((btf_type_tag("user"))) *log_base; struct vhost_log *log; struct iovec log_iov[64]; bool is_le; u32 busyloop_timeout; }; struct vhost_vsock { struct vhost_dev dev; struct vhost_virtqueue vqs[2]; struct hlist_node hash; struct vhost_work send_pkt_work; struct sk_buff_head send_pkt_queue; atomic_t queued_replies; u32 guest_cid; bool seqpacket_allow; }; struct vhost_worker { struct vhost_task *vtsk; struct mutex mutex; struct llist_head work_list; u64 kcov_handle; u32 id; int attachment_cnt; }; struct vhost_iotlb_map { struct rb_node rb; struct list_head link; u64 start; u64 last; u64 size; u64 addr; u32 perm; u32 flags_padding; u64 __subtree_last; void *opaque; }; struct vhost_iotlb { struct rb_root_cached root; struct list_head list; unsigned int limit; unsigned int nmaps; unsigned int flags; }; struct vhost_log { u64 addr; u64 len; }; struct vhost_iotlb_msg { __u64 iova; __u64 size; __u64 uaddr; __u8 perm; __u8 type; }; struct virtio_vsock_hdr { __le64 src_cid; __le64 dst_cid; __le32 src_port; __le32 dst_port; __le32 len; __le16 type; __le16 op; __le32 flags; __le32 buf_alloc; __le32 fwd_cnt; } __attribute__((packed)); struct virtio_vsock_skb_cb { bool reply; bool tap_delivered; }; enum vhost_uaddr_type { VHOST_ADDR_DESC = 0, VHOST_ADDR_AVAIL = 1, VHOST_ADDR_USED = 2, VHOST_NUM_ADDRS = 3, }; struct vhost_msg { int type; union { struct vhost_iotlb_msg iotlb; __u8 padding[64]; }; }; struct vhost_msg_v2 { __u32 type; __u32 asid; union { struct vhost_iotlb_msg iotlb; __u8 padding[64]; }; }; struct vhost_msg_node { union { struct vhost_msg msg; struct vhost_msg_v2 msg_v2; }; struct vhost_virtqueue *vq; struct list_head node; }; struct vhost_flush_struct { struct vhost_work work; struct completion wait_event; }; struct vhost_worker_state { unsigned int worker_id; }; struct vhost_vring_addr { unsigned int index; unsigned int flags; __u64 desc_user_addr; __u64 used_user_addr; __u64 avail_user_addr; __u64 log_guest_addr; }; struct vhost_vring_state { unsigned int index; unsigned int num; }; struct vhost_memory_region { __u64 guest_phys_addr; __u64 memory_size; __u64 userspace_addr; __u64 flags_padding; }; struct vhost_memory { __u32 nregions; __u32 padding; struct vhost_memory_region regions[0]; }; struct vhost_vring_worker { unsigned int index; unsigned int worker_id; }; struct vhost_vring_file { unsigned int index; int fd; }; struct ashmem_area; struct ashmem_range { struct list_head lru; struct list_head unpinned; struct ashmem_area *asma; size_t pgstart; size_t pgend; unsigned int purged; }; struct ashmem_area { char name[267]; struct list_head unpinned_list; struct file *file; size_t size; unsigned long prot_mask; }; struct ashmem_pin { __u32 offset; __u32 len; }; struct mbox_chan_ops; struct mbox_controller { struct device *dev; const struct mbox_chan_ops *ops; struct mbox_chan *chans; int num_chans; bool txdone_irq; bool txdone_poll; unsigned int txpoll_period; struct mbox_chan * (*of_xlate)(struct mbox_controller *, const struct of_phandle_args *); struct hrtimer poll_hrt; spinlock_t poll_hrt_lock; struct list_head node; }; struct mbox_chan_ops { int (*send_data)(struct mbox_chan *, void *); int (*flush)(struct mbox_chan *, unsigned long); int (*startup)(struct mbox_chan *); void (*shutdown)(struct mbox_chan *); bool (*last_tx_done)(struct mbox_chan *); bool (*peek_data)(struct mbox_chan *); }; struct mbox_chan { struct mbox_controller *mbox; unsigned int txdone_method; struct mbox_client *cl; struct completion tx_complete; void *active_req; unsigned int msg_count; unsigned int msg_free; void *msg_data[20]; spinlock_t lock; void *con_priv; }; struct hi3660_chan_info { unsigned int dst_irq; unsigned int ack_irq; }; struct hi3660_mbox { struct device *dev; void *base; struct mbox_chan chan[32]; struct hi3660_chan_info mchan[32]; struct mbox_controller controller; }; struct hi6220_mbox_chan; struct hi6220_mbox { struct device *dev; int irq; bool tx_irq_mode; void *ipc; void *base; unsigned int chan_num; struct hi6220_mbox_chan *mchan; void *irq_map_chan[32]; struct mbox_chan *chan; struct mbox_controller controller; }; struct hi6220_mbox_chan { unsigned int dir; unsigned int dst_irq; unsigned int ack_irq; unsigned int slot; struct hi6220_mbox *parent; }; struct tegra_hsp_channel; struct tegra_hsp_sm_ops { void (*send)(struct tegra_hsp_channel *, void *); void (*recv)(struct tegra_hsp_channel *); }; struct tegra_hsp; struct tegra_hsp_channel { struct tegra_hsp *hsp; struct mbox_chan *chan; void *regs; }; struct tegra_hsp_soc; struct tegra_hsp_mailbox; struct tegra_hsp { struct device *dev; const struct tegra_hsp_soc *soc; struct mbox_controller mbox_db; struct mbox_controller mbox_sm; void *regs; unsigned int doorbell_irq; unsigned int *shared_irqs; unsigned int shared_irq; unsigned int num_sm; unsigned int num_as; unsigned int num_ss; unsigned int num_db; unsigned int num_si; spinlock_t lock; struct lock_class_key lock_key; struct list_head doorbells; struct tegra_hsp_mailbox *mailboxes; unsigned long mask; }; struct tegra_hsp_db_map; struct tegra_hsp_soc { const struct tegra_hsp_db_map *map; bool has_per_mb_ie; bool has_128_bit_mb; unsigned int reg_stride; }; struct tegra_hsp_db_map { const char *name; unsigned int master; unsigned int index; }; struct tegra_hsp_mailbox { struct tegra_hsp_channel channel; const struct tegra_hsp_sm_ops *ops; unsigned int index; bool producer; }; struct tegra_hsp_doorbell { struct tegra_hsp_channel channel; struct list_head list; const char *name; unsigned int master; unsigned int index; }; struct sun6i_msgbox { struct mbox_controller controller; struct clk *clk; spinlock_t lock; void *regs; }; struct hwspinlock_device; struct hwspinlock { struct hwspinlock_device *bank; spinlock_t lock; void *priv; }; struct hwspinlock_ops; struct hwspinlock_device { struct device *dev; const struct hwspinlock_ops *ops; int base_id; int num_locks; struct hwspinlock lock[0]; }; struct hwspinlock_ops { int (*trylock)(struct hwspinlock *); void (*unlock)(struct hwspinlock *); int (*bust)(struct hwspinlock *, unsigned int); void (*relax)(struct hwspinlock *); }; struct rproc; typedef int (*rproc_handle_resource_t)(struct rproc *, void *, int, int); enum rproc_dump_mechanism { RPROC_COREDUMP_DISABLED = 0, RPROC_COREDUMP_ENABLED = 1, RPROC_COREDUMP_INLINE = 2, }; struct rproc_ops; struct resource_table; struct rproc { struct list_head node; struct iommu_domain *domain; const char *name; const char *firmware; void *priv; struct rproc_ops *ops; struct device dev; atomic_t power; unsigned int state; enum rproc_dump_mechanism dump_conf; struct mutex lock; struct dentry *dbg_dir; struct list_head traces; int num_traces; struct list_head carveouts; struct list_head mappings; u64 bootaddr; struct list_head rvdevs; struct list_head subdevs; struct idr notifyids; int index; struct work_struct crash_handler; unsigned int crash_cnt; bool recovery_disabled; int max_notifyid; struct resource_table *table_ptr; struct resource_table *clean_table; struct resource_table *cached_table; size_t table_sz; bool has_iommu; bool auto_boot; bool sysfs_read_only; struct list_head dump_segments; int nb_vdev; u8 elf_class; u16 elf_machine; struct cdev cdev; bool cdev_put_on_release; unsigned long features[1]; }; struct rproc_ops { int (*prepare)(struct rproc *); int (*unprepare)(struct rproc *); int (*start)(struct rproc *); int (*stop)(struct rproc *); int (*attach)(struct rproc *); int (*detach)(struct rproc *); void (*kick)(struct rproc *, int); void * (*da_to_va)(struct rproc *, u64, size_t, bool *); int (*parse_fw)(struct rproc *, const struct firmware *); int (*handle_rsc)(struct rproc *, u32, void *, int, int); struct resource_table * (*find_loaded_rsc_table)(struct rproc *, const struct firmware *); struct resource_table * (*get_loaded_rsc_table)(struct rproc *, size_t *); int (*load)(struct rproc *, const struct firmware *); int (*sanity_check)(struct rproc *, const struct firmware *); u64 (*get_boot_addr)(struct rproc *, const struct firmware *); unsigned long (*panic)(struct rproc *); void (*coredump)(struct rproc *); }; struct resource_table { u32 ver; u32 num; u32 reserved[2]; u32 offset[0]; }; enum rproc_state { RPROC_OFFLINE = 0, RPROC_SUSPENDED = 1, RPROC_RUNNING = 2, RPROC_CRASHED = 3, RPROC_DELETED = 4, RPROC_ATTACHED = 5, RPROC_DETACHED = 6, RPROC_LAST = 7, }; enum rproc_features { RPROC_FEAT_ATTACH_ON_RECOVERY = 0, RPROC_MAX_FEATURES = 1, }; enum rproc_crash_type { RPROC_MMUFAULT = 0, RPROC_WATCHDOG = 1, RPROC_FATAL_ERROR = 2, }; enum fw_resource_type { RSC_CARVEOUT = 0, RSC_DEVMEM = 1, RSC_TRACE = 2, RSC_VDEV = 3, RSC_LAST = 4, RSC_VENDOR_START = 128, RSC_VENDOR_END = 512, }; enum rsc_handling_status { RSC_HANDLED = 0, RSC_IGNORED = 1, }; struct rproc_mem_entry { void *va; bool is_iomem; dma_addr_t dma; size_t len; u32 da; void *priv; char name[32]; struct list_head node; u32 rsc_offset; u32 flags; u32 of_resm_idx; int (*alloc)(struct rproc *, struct rproc_mem_entry *); int (*release)(struct rproc *, struct rproc_mem_entry *); }; struct rproc_debug_trace { struct rproc *rproc; struct dentry *tfile; struct list_head node; struct rproc_mem_entry trace_mem; }; struct rproc_subdev { struct list_head node; int (*prepare)(struct rproc_subdev *); int (*start)(struct rproc_subdev *); void (*stop)(struct rproc_subdev *, bool); void (*unprepare)(struct rproc_subdev *); }; struct rproc_vdev; struct rproc_vring { void *va; int num; u32 da; u32 align; int notifyid; struct rproc_vdev *rvdev; struct virtqueue *vq; }; struct rproc_vdev { struct rproc_subdev subdev; struct platform_device *pdev; unsigned int id; struct list_head node; struct rproc *rproc; struct rproc_vring vring[2]; u32 rsc_offset; u32 index; }; struct fw_rsc_vdev_vring { u32 da; u32 align; u32 num; u32 notifyid; u32 pa; }; struct fw_rsc_vdev { u32 id; u32 notifyid; u32 dfeatures; u32 gfeatures; u32 config_len; u8 status; u8 num_of_vrings; u8 reserved[2]; struct fw_rsc_vdev_vring vring[0]; }; struct fw_rsc_hdr { u32 type; u8 data[0]; }; struct fw_rsc_carveout { u32 da; u32 pa; u32 len; u32 flags; u32 reserved; u8 name[32]; }; struct fw_rsc_devmem { u32 da; u32 pa; u32 len; u32 flags; u32 reserved; u8 name[32]; }; struct fw_rsc_trace { u32 da; u32 len; u32 reserved; u8 name[32]; }; struct rproc_vdev_data { u32 rsc_offset; unsigned int id; u32 index; struct fw_rsc_vdev *rsc; }; struct rproc_dump_segment { struct list_head node; dma_addr_t da; size_t size; void *priv; void (*dump)(struct rproc *, struct rproc_dump_segment *, void *, size_t, size_t); loff_t offset; }; struct rproc_coredump_state { struct rproc *rproc; void *header; struct completion dump_done; }; struct rpmsg_device_id { char name[32]; kernel_ulong_t driver_data; }; struct rpmsg_endpoint; struct rpmsg_device_ops; struct rpmsg_device { struct device dev; struct rpmsg_device_id id; const char *driver_override; u32 src; u32 dst; struct rpmsg_endpoint *ept; bool announce; bool little_endian; const struct rpmsg_device_ops *ops; }; typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32); typedef int (*rpmsg_flowcontrol_cb_t)(struct rpmsg_device *, void *, bool); struct rpmsg_endpoint_ops; struct rpmsg_endpoint { struct rpmsg_device *rpdev; struct kref refcount; rpmsg_rx_cb_t cb; rpmsg_flowcontrol_cb_t flow_cb; struct mutex cb_lock; u32 addr; void *priv; const struct rpmsg_endpoint_ops *ops; }; struct rpmsg_endpoint_ops { void (*destroy_ept)(struct rpmsg_endpoint *); int (*send)(struct rpmsg_endpoint *, void *, int); int (*sendto)(struct rpmsg_endpoint *, void *, int, u32); int (*send_offchannel)(struct rpmsg_endpoint *, u32, u32, void *, int); int (*trysend)(struct rpmsg_endpoint *, void *, int); int (*trysendto)(struct rpmsg_endpoint *, void *, int, u32); int (*trysend_offchannel)(struct rpmsg_endpoint *, u32, u32, void *, int); __poll_t (*poll)(struct rpmsg_endpoint *, struct file *, poll_table *); int (*set_flow_control)(struct rpmsg_endpoint *, bool, u32); ssize_t (*get_mtu)(struct rpmsg_endpoint *); }; struct rpmsg_channel_info; struct rpmsg_device_ops { struct rpmsg_device * (*create_channel)(struct rpmsg_device *, struct rpmsg_channel_info *); int (*release_channel)(struct rpmsg_device *, struct rpmsg_channel_info *); struct rpmsg_endpoint * (*create_ept)(struct rpmsg_device *, rpmsg_rx_cb_t, void *, struct rpmsg_channel_info); int (*announce_create)(struct rpmsg_device *); int (*announce_destroy)(struct rpmsg_device *); }; struct rpmsg_channel_info { char name[32]; u32 src; u32 dst; }; struct rpmsg_driver { struct device_driver drv; const struct rpmsg_device_id *id_table; int (*probe)(struct rpmsg_device *); void (*remove)(struct rpmsg_device *); int (*callback)(struct rpmsg_device *, void *, int, void *, u32); int (*flowcontrol)(struct rpmsg_device *, void *, bool); }; struct rpmsg_eptdev { struct device dev; struct cdev cdev; struct rpmsg_device *rpdev; struct rpmsg_channel_info chinfo; struct mutex ept_lock; struct rpmsg_endpoint *ept; struct rpmsg_endpoint *default_ept; spinlock_t queue_lock; struct sk_buff_head queue; wait_queue_head_t readq; bool remote_flow_restricted; bool remote_flow_updated; }; enum gunyah_resource_type { GUNYAH_RESOURCE_TYPE_BELL_TX = 0, GUNYAH_RESOURCE_TYPE_BELL_RX = 1, GUNYAH_RESOURCE_TYPE_MSGQ_TX = 2, GUNYAH_RESOURCE_TYPE_MSGQ_RX = 3, GUNYAH_RESOURCE_TYPE_VCPU = 4, GUNYAH_RESOURCE_TYPE_MEM_EXTENT = 9, GUNYAH_RESOURCE_TYPE_ADDR_SPACE = 10, }; enum gunyah_rm_error { GUNYAH_RM_ERROR_OK = 0, GUNYAH_RM_ERROR_UNIMPLEMENTED = 4294967295, GUNYAH_RM_ERROR_NOMEM = 1, GUNYAH_RM_ERROR_NORESOURCE = 2, GUNYAH_RM_ERROR_DENIED = 3, GUNYAH_RM_ERROR_INVALID = 4, GUNYAH_RM_ERROR_BUSY = 5, GUNYAH_RM_ERROR_ARGUMENT_INVALID = 6, GUNYAH_RM_ERROR_HANDLE_INVALID = 7, GUNYAH_RM_ERROR_VALIDATE_FAILED = 8, GUNYAH_RM_ERROR_MAP_FAILED = 9, GUNYAH_RM_ERROR_MEM_INVALID = 10, GUNYAH_RM_ERROR_MEM_INUSE = 11, GUNYAH_RM_ERROR_MEM_RELEASED = 12, GUNYAH_RM_ERROR_VMID_INVALID = 13, GUNYAH_RM_ERROR_LOOKUP_FAILED = 14, GUNYAH_RM_ERROR_IRQ_INVALID = 15, GUNYAH_RM_ERROR_IRQ_INUSE = 16, GUNYAH_RM_ERROR_IRQ_RELEASED = 17, }; struct gunyah_rm_rpc_hdr { u8 api; u8 type; __le16 seq; __le32 msg_id; }; struct gunyah_resource { enum gunyah_resource_type type; u64 capid; unsigned int irq; struct list_head list; u32 rm_label; }; struct gunyah_rm_message; struct gunyah_rm { struct device *dev; struct gunyah_resource tx_ghrsc; struct gunyah_resource rx_ghrsc; struct gunyah_rm_message *active_rx_message; struct xarray call_xarray; u32 next_seq; unsigned char recv_msg[240]; unsigned char send_msg[240]; struct mutex send_lock; struct completion send_ready; struct blocking_notifier_head nh; struct auxiliary_device adev; struct miscdevice miscdev; struct fwnode_handle *parent_fwnode; }; struct gunyah_rm_message { void *payload; size_t size; u32 msg_id; u8 type; u8 num_fragments; u8 fragments_received; struct { int ret; u16 seq; enum gunyah_rm_error rm_error; struct completion seq_done; } reply; }; struct gunyah_rm_rpc_reply_hdr { struct gunyah_rm_rpc_hdr hdr; __le32 err_code; }; struct gunyah_rm_hyp_resource { u8 type; u8 reserved; __le16 partner_vmid; __le32 resource_handle; __le32 resource_label; __le64 cap_id; __le32 virq_handle; __le32 virq; __le64 base; __le64 size; } __attribute__((packed)); enum gunyah_rm_mem_type { GUNYAH_RM_MEM_TYPE_NORMAL = 0, GUNYAH_RM_MEM_TYPE_IO = 1, }; enum gunyah_rm_vm_auth_mechanism { GUNYAH_RM_VM_AUTH_NONE = 0, GUNYAH_RM_VM_AUTH_QCOM_PIL_ELF = 1, GUNYAH_RM_VM_AUTH_QCOM_ANDROID_PVM = 2, }; enum gunyah_rm_range_id { GUNYAH_RM_RANGE_ID_IMAGE = 0, GUNYAH_RM_RANGE_ID_FIRMWARE = 1, }; struct gunyah_rm_mem_acl_entry; struct gunyah_rm_mem_entry; struct gunyah_rm_mem_parcel { enum gunyah_rm_mem_type mem_type; u32 label; size_t n_acl_entries; struct gunyah_rm_mem_acl_entry *acl_entries; size_t n_mem_entries; struct gunyah_rm_mem_entry *mem_entries; u32 mem_handle; }; struct gunyah_rm_mem_acl_entry { __le16 vmid; u8 perms; u8 reserved; }; struct gunyah_rm_mem_entry { __le64 phys_addr; __le64 size; }; struct gunyah_rm_vm_common_vmid_req { __le16 vmid; __le16 _padding; }; struct gunyah_rm_mem_share_req_header { u8 mem_type; u8 _padding0; u8 flags; u8 _padding1; __le32 label; }; struct gunyah_rm_mem_share_req_mem_section { __le16 n_entries; __le16 _padding; struct gunyah_rm_mem_entry entries[0]; } __attribute__((packed)); struct gunyah_rm_mem_share_req_acl_section { __le16 n_entries; __le16 _padding; struct gunyah_rm_mem_acl_entry entries[0]; }; struct gunyah_rm_mem_append_req_header { __le32 mem_handle; u8 flags; u8 _padding0; __le16 _padding1; }; struct gunyah_rm_mem_release_req { __le32 mem_handle; u8 flags; u8 _padding0; __le16 _padding1; }; struct gunyah_rm_vm_alloc_vmid_resp { __le16 vmid; __le16 _padding; }; struct gunyah_rm_vm_stop_req { __le16 vmid; u8 flags; u8 _padding; __le32 stop_reason; }; struct gunyah_rm_vm_config_image_req { __le16 vmid; __le16 auth_mech; __le32 mem_handle; __le64 image_offset; __le64 image_size; __le64 dtb_offset; __le64 dtb_size; }; struct gunyah_rm_vm_set_boot_context_req { __le16 vmid; u8 reg_set; u8 reg_index; __le32 _padding; __le64 value; }; struct gunyah_rm_hyp_resources { __le32 n_entries; struct gunyah_rm_hyp_resource entries[0]; }; struct gunyah_rm_vm_set_demand_paging_req { __le16 vmid; __le16 _padding; __le32 range_count; struct { struct {} __empty_ranges; struct gunyah_rm_mem_entry ranges[0]; }; }; struct gunyah_rm_vm_set_address_layout_req { __le16 vmid; __le16 _padding; __le32 range_id; __le64 range_base; __le64 range_size; }; struct gunyah_vm_set_firmware_mem_req { __le16 vmid; __le16 reserved; __le32 mem_handle; __le64 fw_offset; __le64 fw_size; }; enum gunyah_rm_vm_status { GUNYAH_RM_VM_STATUS_NO_STATE = 0, GUNYAH_RM_VM_STATUS_INIT = 1, GUNYAH_RM_VM_STATUS_READY = 2, GUNYAH_RM_VM_STATUS_RUNNING = 3, GUNYAH_RM_VM_STATUS_PAUSED = 4, GUNYAH_RM_VM_STATUS_LOAD = 5, GUNYAH_RM_VM_STATUS_AUTH = 6, GUNYAH_RM_VM_STATUS_INIT_FAILED = 8, GUNYAH_RM_VM_STATUS_EXITED = 9, GUNYAH_RM_VM_STATUS_RESETTING = 10, GUNYAH_RM_VM_STATUS_RESET = 11, }; enum gunyah_vm_mem_share_type { VM_MEM_SHARE = 0, VM_MEM_LEND = 1, }; enum gunyah_map_flags { GUNYAH_MEM_ALLOW_READ = 1, GUNYAH_MEM_ALLOW_WRITE = 2, GUNYAH_MEM_ALLOW_EXEC = 4, GUNYAH_MEM_ALLOW_RWX = 7, GUNYAH_MEM_DEFAULT_ACCESS = 0, GUNYAH_MEM_FORCE_LEND = 16, GUNYAH_MEM_FORCE_SHARE = 32, GUNYAH_MEM_UNMAP = 256, }; enum gunyah_rm_notification_id { GUNYAH_RM_NOTIFICATION_VM_EXITED = 1443889153, GUNYAH_RM_NOTIFICATION_VM_STATUS = 1443889160, }; enum gunyah_vm_boot_context_reg { REG_SET_X = 0, REG_SET_PC = 1, REG_SET_SP = 2, }; struct gunyah_vm_resource_ticket { struct list_head vm_list; struct list_head resources; enum gunyah_resource_type resource_type; u32 label; struct module *owner; bool (*populate)(struct gunyah_vm_resource_ticket *, struct gunyah_resource *); void (*unpopulate)(struct gunyah_vm_resource_ticket *, struct gunyah_resource *); }; struct gunyah_vm_io_handler_ops; struct gunyah_vm_io_handler { struct rb_node node; u64 addr; bool datamatch; u8 len; u64 data; struct gunyah_vm_io_handler_ops *ops; }; struct gunyah_vm_io_handler_ops { int (*read)(struct gunyah_vm_io_handler *, u64, u32, u64); int (*write)(struct gunyah_vm_io_handler *, u64, u32, u64); }; struct gunyah_vm_dtb_config { __u64 guest_phys_addr; __u64 size; }; struct gunyah_vm_firmware_config { __u64 guest_phys_addr; __u64 size; }; struct gunyah_vm_exit_info { __u16 type; __u16 padding; __u32 reason_size; __u8 reason[8]; }; struct gunyah_vm { u16 vmid; struct maple_tree mm; struct maple_tree bindings; struct rw_semaphore bindings_lock; struct mm_struct *mm_s; struct gunyah_vm_resource_ticket addrspace_ticket; struct gunyah_vm_resource_ticket host_private_extent_ticket; struct gunyah_vm_resource_ticket host_shared_extent_ticket; struct gunyah_vm_resource_ticket guest_private_extent_ticket; struct gunyah_vm_resource_ticket guest_shared_extent_ticket; struct rb_root mmio_handler_root; struct rw_semaphore mmio_handler_lock; struct gunyah_rm *rm; struct notifier_block nb; enum gunyah_rm_vm_status vm_status; wait_queue_head_t vm_status_wait; struct rw_semaphore status_lock; struct gunyah_vm_exit_info exit_info; struct kref kref; struct mutex fn_lock; struct list_head functions; struct mutex resources_lock; struct list_head resources; struct list_head resource_tickets; struct device *parent; enum gunyah_rm_vm_auth_mechanism auth; struct { struct gunyah_vm_dtb_config config; u64 parcel_start; u64 parcel_pages; struct gunyah_rm_mem_parcel parcel; } dtb; struct { struct gunyah_vm_firmware_config config; u64 parcel_start; u64 parcel_pages; struct gunyah_rm_mem_parcel parcel; } fw; struct xarray boot_context; }; struct gunyah_vm_function; struct gunyah_vm_function_instance { size_t arg_size; void *argp; struct gunyah_vm *ghvm; struct gunyah_rm *rm; struct gunyah_vm_function *fn; void *data; struct list_head vm_list; }; struct gunyah_vm_function { u32 type; const char *name; struct module *mod; long (*bind)(struct gunyah_vm_function_instance *); void (*unbind)(struct gunyah_vm_function_instance *); bool (*compare)(const struct gunyah_vm_function_instance *, const void *, size_t); }; struct gunyah_vm_gup_binding { enum gunyah_vm_mem_share_type share_type; u64 guest_phys_addr; u64 userspace_addr; u64 size; u32 flags; }; struct gunyah_fn_desc { __u32 type; __u32 arg_size; __u64 arg; }; struct gunyah_vm_boot_context { __u32 reg; __u32 reserved; __u64 value; }; struct gunyah_userspace_memory_region { __u32 label; __u32 flags; __u64 guest_phys_addr; __u64 memory_size; __u64 userspace_addr; }; struct gunyah_rm_vm_status_payload { __le16 vmid; u16 reserved; u8 vm_status; u8 os_status; __le16 app_status; }; struct gunyah_rm_vm_exited_payload { __le16 vmid; __le16 exit_type; __le32 exit_reason_size; u8 exit_reason[0]; }; enum gunyah_addrspace_map_flag_bits { GUNYAH_ADDRSPACE_MAP_FLAG_PARTIAL = 0, GUNYAH_ADDRSPACE_MAP_FLAG_PRIVATE = 1, GUNYAH_ADDRSPACE_MAP_FLAG_VMMIO = 2, GUNYAH_ADDRSPACE_MAP_FLAG_NOSYNC = 31, }; enum gunyah_pagetable_access { GUNYAH_PAGETABLE_ACCESS_NONE = 0, GUNYAH_PAGETABLE_ACCESS_X = 1, GUNYAH_PAGETABLE_ACCESS_W = 2, GUNYAH_PAGETABLE_ACCESS_R = 4, GUNYAH_PAGETABLE_ACCESS_RX = 5, GUNYAH_PAGETABLE_ACCESS_RW = 6, GUNYAH_PAGETABLE_ACCESS_RWX = 7, }; enum arch_gunyah_memtype { GUNYAH_MEMTYPE_DEVICE_nGnRnE = 0, GUNYAH_DEVICE_nGnRE = 1, GUNYAH_DEVICE_nGRE = 2, GUNYAH_DEVICE_GRE = 3, GUNYAH_NORMAL_NC = 5, GUNYAH_NORMAL_ONC_IWT = 6, GUNYAH_NORMAL_ONC_IWB = 7, GUNYAH_NORMAL_OWT_INC = 9, GUNYAH_NORMAL_WT = 10, GUNYAH_NORMAL_OWT_IWB = 11, GUNYAH_NORMAL_OWB_INC = 13, GUNYAH_NORMAL_OWB_IWT = 14, GUNYAH_NORMAL_WB = 15, }; enum gunyah_memextent_donate_type { GUNYAH_MEMEXTENT_DONATE_TO_CHILD = 0, GUNYAH_MEMEXTENT_DONATE_TO_PARENT = 1, GUNYAH_MEMEXTENT_DONATE_TO_SIBLING = 2, GUNYAH_MEMEXTENT_DONATE_TO_PROTECTED = 3, GUNYAH_MEMEXTENT_DONATE_FROM_PROTECTED = 4, }; enum gunyah_vm_status { GUNYAH_VM_STATUS_LOAD_FAILED = 1, GUNYAH_VM_STATUS_EXITED = 2, GUNYAH_VM_STATUS_CRASHED = 3, }; struct gunyah_vcpu_run; struct gunyah_vcpu { struct gunyah_vm_function_instance *f; struct gunyah_resource *rsc; struct mutex run_lock; struct gunyah_vm *ghvm; struct gunyah_vcpu_run *vcpu_run; enum { GUNYAH_VCPU_RUN_STATE_UNKNOWN = 0, GUNYAH_VCPU_RUN_STATE_READY = 1, GUNYAH_VCPU_RUN_STATE_MMIO_READ = 2, GUNYAH_VCPU_RUN_STATE_MMIO_WRITE = 3, GUNYAH_VCPU_RUN_STATE_SYSTEM_DOWN = 4, } state; u8 mmio_read_len; u64 mmio_addr; struct completion ready; struct notifier_block nb; struct gunyah_vm_resource_ticket ticket; struct kref kref; }; struct gunyah_vcpu_run { __u8 immediate_exit; __u8 padding[7]; __u32 exit_reason; union { struct { __u64 phys_addr; __u8 data[8]; __u32 len; __u8 is_write; __u8 resume_action; } mmio; struct { enum gunyah_vm_status status; struct gunyah_vm_exit_info exit_info; } status; struct { __u64 phys_addr; __s32 attempt; __u8 resume_action; } page_fault; }; }; enum gunyah_vcpu_exit { GUNYAH_VCPU_EXIT_UNKNOWN = 0, GUNYAH_VCPU_EXIT_MMIO = 1, GUNYAH_VCPU_EXIT_STATUS = 2, GUNYAH_VCPU_EXIT_PAGE_FAULT = 3, }; enum gunyah_vcpu_resume_action { GUNYAH_VCPU_RESUME_HANDLED = 0, GUNYAH_VCPU_RESUME_FAULT = 1, GUNYAH_VCPU_RESUME_RETRY = 2, }; enum { GUNYAH_ADDRSPACE_VMMIO_ACTION_EMULATE = 0, GUNYAH_ADDRSPACE_VMMIO_ACTION_RETRY = 1, GUNYAH_ADDRSPACE_VMMIO_ACTION_FAULT = 2, }; struct gunyah_fn_vcpu_arg { __u32 id; }; struct gunyah_rm_platform_ops { int (*pre_mem_share)(struct gunyah_rm *, struct gunyah_rm_mem_parcel *); int (*post_mem_reclaim)(struct gunyah_rm *, struct gunyah_rm_mem_parcel *); int (*pre_demand_page)(struct gunyah_rm *, u16, enum gunyah_pagetable_access, struct folio *); int (*release_demand_page)(struct gunyah_rm *, u16, enum gunyah_pagetable_access, struct folio *); }; enum gunyah_irqfd_flags { GUNYAH_IRQFD_FLAGS_LEVEL = 1, }; struct gunyah_irqfd { struct gunyah_resource *ghrsc; struct gunyah_vm_resource_ticket ticket; struct gunyah_vm_function_instance *f; bool level; struct eventfd_ctx *ctx; wait_queue_entry_t wait; poll_table pt; }; struct gunyah_fn_irqfd_arg { __u32 fd; __u32 label; __u32 flags; __u32 padding; }; enum gunyah_ioeventfd_flags { GUNYAH_IOEVENTFD_FLAGS_DATAMATCH = 1, }; struct gunyah_ioeventfd { struct gunyah_vm_function_instance *f; struct gunyah_vm_io_handler io_handler; struct eventfd_ctx *ctx; }; struct gunyah_fn_ioeventfd_arg { __u64 datamatch; __u64 addr; __u32 len; __s32 fd; __u32 flags; __u32 padding; }; typedef void (*btf_trace_devfreq_frequency)(void *, struct devfreq *, unsigned long, unsigned long); typedef void (*btf_trace_devfreq_monitor)(void *, struct devfreq *); enum devfreq_parent_dev_type { DEVFREQ_PARENT_DEV = 0, CPUFREQ_PARENT_DEV = 1, }; struct trace_event_raw_devfreq_frequency { struct trace_entry ent; u32 __data_loc_dev_name; unsigned long freq; unsigned long prev_freq; unsigned long busy_time; unsigned long total_time; char __data[0]; }; struct trace_event_raw_devfreq_monitor { struct trace_entry ent; unsigned long freq; unsigned long busy_time; unsigned long total_time; unsigned int polling_ms; u32 __data_loc_dev_name; char __data[0]; }; struct trace_event_data_offsets_devfreq_frequency { u32 dev_name; }; struct trace_event_data_offsets_devfreq_monitor { u32 dev_name; }; struct devfreq_freqs { unsigned long old; unsigned long new; }; struct devfreq_notifier_devres { struct devfreq *devfreq; struct notifier_block *nb; unsigned int list; }; struct devfreq_passive_data { struct devfreq *parent; int (*get_target_freq)(struct devfreq *, unsigned long *); enum devfreq_parent_dev_type parent_type; struct devfreq *this; struct notifier_block nb; struct list_head cpu_data_list; }; struct devfreq_event_desc; struct devfreq_event_dev { struct list_head node; struct device dev; struct mutex lock; u32 enable_count; const struct devfreq_event_desc *desc; }; struct devfreq_event_ops; struct devfreq_event_desc { const char *name; u32 event_type; void *driver_data; const struct devfreq_event_ops *ops; }; struct devfreq_event_data; struct devfreq_event_ops { int (*enable)(struct devfreq_event_dev *); int (*disable)(struct devfreq_event_dev *); int (*reset)(struct devfreq_event_dev *); int (*set_event)(struct devfreq_event_dev *); int (*get_event)(struct devfreq_event_dev *, struct devfreq_event_data *); }; struct devfreq_event_data { unsigned long load_count; unsigned long total_count; }; struct userspace_data { unsigned long user_frequency; bool valid; }; struct devfreq_cpu_data { struct list_head node; struct device *dev; unsigned int first_cpu; struct opp_table *opp_table; unsigned int cur_freq; unsigned int min_freq; unsigned int max_freq; }; struct __extcon_info { unsigned int type; unsigned int id; const char *name; }; struct extcon_cable; struct extcon_dev { const char *name; const unsigned int *supported_cable; const u32 *mutually_exclusive; struct device dev; unsigned int id; struct raw_notifier_head nh_all; struct raw_notifier_head *nh; struct list_head entry; int max_supported; spinlock_t lock; u32 state; struct device_type extcon_dev_type; struct extcon_cable *cables; struct attribute_group attr_g_muex; struct attribute **attrs_muex; struct device_attribute *d_attrs_muex; }; union extcon_property_value { int intval; }; struct extcon_cable { struct extcon_dev *edev; int cable_index; struct attribute_group attr_g; struct device_attribute attr_name; struct device_attribute attr_state; struct attribute *attrs[3]; union extcon_property_value usb_propval[3]; union extcon_property_value chg_propval[1]; union extcon_property_value jack_propval[1]; union extcon_property_value disp_propval[2]; unsigned long usb_bits[1]; unsigned long chg_bits[1]; unsigned long jack_bits[1]; unsigned long disp_bits[1]; }; struct extcon_dev_notifier_devres { struct extcon_dev *edev; unsigned int id; struct notifier_block *nb; }; struct tegra_mc; struct tegra_mc_reset; struct tegra_mc_reset_ops { int (*hotreset_assert)(struct tegra_mc *, const struct tegra_mc_reset *); int (*hotreset_deassert)(struct tegra_mc *, const struct tegra_mc_reset *); int (*block_dma)(struct tegra_mc *, const struct tegra_mc_reset *); bool (*dma_idling)(struct tegra_mc *, const struct tegra_mc_reset *); int (*unblock_dma)(struct tegra_mc *, const struct tegra_mc_reset *); int (*reset_status)(struct tegra_mc *, const struct tegra_mc_reset *); }; struct tegra_smmu; struct gart_device; struct icc_node; struct icc_node_data; struct icc_provider { struct list_head provider_list; struct list_head nodes; int (*set)(struct icc_node *, struct icc_node *); int (*aggregate)(struct icc_node *, u32, u32, u32, u32 *, u32 *); void (*pre_aggregate)(struct icc_node *); int (*get_bw)(struct icc_node *, u32 *, u32 *); struct icc_node * (*xlate)(struct of_phandle_args *, void *); struct icc_node_data * (*xlate_extended)(struct of_phandle_args *, void *); struct device *dev; int users; bool inter_set; void *data; }; struct tegra_mc_soc; struct tegra_mc_timing; struct tegra_mc { struct tegra_bpmp *bpmp; struct device *dev; struct tegra_smmu *smmu; struct gart_device *gart; void *regs; void *bcast_ch_regs; void **ch_regs; struct clk *clk; int irq; const struct tegra_mc_soc *soc; unsigned long tick; struct tegra_mc_timing *timings; unsigned int num_timings; unsigned int num_channels; bool bwmgr_mrq_supported; struct reset_controller_dev reset; struct icc_provider provider; spinlock_t lock; struct { struct dentry *root; } debugfs; }; struct tegra_mc_client; struct tegra_smmu_soc; struct tegra_mc_icc_ops; struct tegra_mc_ops; struct tegra_mc_soc { const struct tegra_mc_client *clients; unsigned int num_clients; const unsigned long *emem_regs; unsigned int num_emem_regs; unsigned int num_address_bits; unsigned int atom_size; unsigned int num_carveouts; u16 client_id_mask; u8 num_channels; const struct tegra_smmu_soc *smmu; u32 intmask; u32 ch_intmask; u32 global_intstatus_channel_shift; bool has_addr_hi_reg; const struct tegra_mc_reset_ops *reset_ops; const struct tegra_mc_reset *resets; unsigned int num_resets; const struct tegra_mc_icc_ops *icc_ops; const struct tegra_mc_ops *ops; }; enum tegra_icc_client_type { TEGRA_ICC_NONE = 0, TEGRA_ICC_NISO = 1, TEGRA_ICC_ISO_DISPLAY = 2, TEGRA_ICC_ISO_VI = 3, TEGRA_ICC_ISO_AUDIO = 4, TEGRA_ICC_ISO_VIFAL = 5, }; struct tegra_mc_client { unsigned int id; unsigned int bpmp_id; enum tegra_icc_client_type type; const char *name; union { unsigned int swgroup; unsigned int sid; }; unsigned int fifo_size; struct { struct { unsigned int reg; unsigned int bit; } smmu; struct { unsigned int reg; unsigned int shift; unsigned int mask; unsigned int def; } la; struct { unsigned int override; unsigned int security; } sid; } regs; }; struct tegra_smmu_swgroup; struct tegra_smmu_group_soc; struct tegra_smmu_soc { const struct tegra_mc_client *clients; unsigned int num_clients; const struct tegra_smmu_swgroup *swgroups; unsigned int num_swgroups; const struct tegra_smmu_group_soc *groups; unsigned int num_groups; bool supports_round_robin_arbitration; bool supports_request_limit; unsigned int num_tlb_lines; unsigned int num_asids; }; struct tegra_smmu_swgroup { const char *name; unsigned int swgroup; unsigned int reg; }; struct tegra_smmu_group_soc { const char *name; const unsigned int *swgroups; unsigned int num_swgroups; }; struct tegra_mc_reset { const char *name; unsigned long id; unsigned int control; unsigned int status; unsigned int reset; unsigned int bit; }; struct tegra_mc_icc_ops { int (*set)(struct icc_node *, struct icc_node *); int (*aggregate)(struct icc_node *, u32, u32, u32, u32 *, u32 *); struct icc_node * (*xlate)(struct of_phandle_args *, void *); struct icc_node_data * (*xlate_extended)(struct of_phandle_args *, void *); int (*get_bw)(struct icc_node *, u32 *, u32 *); }; struct icc_node { int id; const char *name; struct icc_node **links; size_t num_links; struct icc_provider *provider; struct list_head node_list; struct list_head search_list; struct icc_node *reverse; u8 is_traversed: 1; struct hlist_head req_list; u32 avg_bw; u32 peak_bw; u32 init_avg; u32 init_peak; void *data; }; struct icc_node_data { struct icc_node *node; u32 tag; }; struct tegra_mc_ops { int (*probe)(struct tegra_mc *); void (*remove)(struct tegra_mc *); int (*suspend)(struct tegra_mc *); int (*resume)(struct tegra_mc *); irqreturn_t (*handle_irq)(int, void *); int (*probe_device)(struct tegra_mc *, struct device *); }; struct tegra_mc_timing { unsigned long rate; u32 *emem_data; }; enum mrq_bwmgr_int_cmd { CMD_BWMGR_INT_QUERY_ABI = 1, CMD_BWMGR_INT_CALC_AND_SET = 2, CMD_BWMGR_INT_CAP_SET = 3, }; struct cmd_bwmgr_int_calc_and_set_response { uint64_t rate; }; struct mrq_bwmgr_int_response { union { struct cmd_bwmgr_int_calc_and_set_response bwmgr_calc_set_resp; }; }; struct cmd_bwmgr_int_query_abi_request { uint32_t type; }; struct cmd_bwmgr_int_calc_and_set_request { uint32_t client_id; uint32_t niso_bw; uint32_t iso_bw; uint32_t mc_floor; uint8_t floor_unit; } __attribute__((packed)); struct cmd_bwmgr_int_cap_set_request { uint64_t rate; }; struct mrq_bwmgr_int_request { uint32_t cmd; union { struct cmd_bwmgr_int_query_abi_request query_abi; struct cmd_bwmgr_int_calc_and_set_request bwmgr_calc_set_req; struct cmd_bwmgr_int_cap_set_request bwmgr_cap_set_req; }; } __attribute__((packed)); struct tegra186_emc_dvfs; struct tegra186_emc { struct tegra_bpmp *bpmp; struct device *dev; struct clk *clk; struct tegra186_emc_dvfs *dvfs; unsigned int num_dvfs; struct { struct dentry *root; unsigned long min_rate; unsigned long max_rate; } debugfs; struct icc_provider provider; }; struct tegra186_emc_dvfs { unsigned long latency; unsigned long rate; }; struct emc_dvfs_latency { uint32_t freq; uint32_t latency; }; struct mrq_emc_dvfs_latency_response { uint32_t num_pairs; struct emc_dvfs_latency pairs[14]; }; struct iio_dev; struct iio_buffer_setup_ops { int (*preenable)(struct iio_dev *); int (*postenable)(struct iio_dev *); int (*predisable)(struct iio_dev *); int (*postdisable)(struct iio_dev *); bool (*validate_scan_mask)(struct iio_dev *, const unsigned long *); }; struct iio_buffer; struct iio_trigger; struct iio_poll_func; struct iio_chan_spec; struct iio_info; struct iio_dev { int modes; struct device dev; struct iio_buffer *buffer; int scan_bytes; const unsigned long *available_scan_masks; unsigned int masklength; const unsigned long *active_scan_mask; bool scan_timestamp; struct iio_trigger *trig; struct iio_poll_func *pollfunc; struct iio_poll_func *pollfunc_event; const struct iio_chan_spec *channels; int num_channels; const char *name; const char *label; const struct iio_info *info; const struct iio_buffer_setup_ops *setup_ops; void *priv; }; enum iio_buffer_direction { IIO_BUFFER_DIRECTION_IN = 0, IIO_BUFFER_DIRECTION_OUT = 1, }; struct iio_buffer_access_funcs; struct iio_dev_attr; struct iio_buffer { unsigned int length; unsigned long flags; size_t bytes_per_datum; enum iio_buffer_direction direction; const struct iio_buffer_access_funcs *access; long *scan_mask; struct list_head demux_list; wait_queue_head_t pollq; unsigned int watermark; bool scan_timestamp; struct list_head buffer_attr_list; struct attribute_group buffer_group; const struct iio_dev_attr **attrs; void *demux_bounce; struct list_head attached_entry; struct list_head buffer_list; struct kref ref; }; struct iio_buffer_access_funcs { int (*store_to)(struct iio_buffer *, const void *); int (*read)(struct iio_buffer *, size_t, char __attribute__((btf_type_tag("user"))) *); size_t (*data_available)(struct iio_buffer *); int (*remove_from)(struct iio_buffer *, void *); int (*write)(struct iio_buffer *, size_t, const char __attribute__((btf_type_tag("user"))) *); size_t (*space_available)(struct iio_buffer *); int (*request_update)(struct iio_buffer *); int (*set_bytes_per_datum)(struct iio_buffer *, size_t); int (*set_length)(struct iio_buffer *, unsigned int); int (*enable)(struct iio_buffer *, struct iio_dev *); int (*disable)(struct iio_buffer *, struct iio_dev *); void (*release)(struct iio_buffer *); unsigned int modes; unsigned int flags; }; struct iio_dev_attr { struct device_attribute dev_attr; u64 address; struct list_head l; const struct iio_chan_spec *c; struct iio_buffer *buffer; }; enum iio_chan_type { IIO_VOLTAGE = 0, IIO_CURRENT = 1, IIO_POWER = 2, IIO_ACCEL = 3, IIO_ANGL_VEL = 4, IIO_MAGN = 5, IIO_LIGHT = 6, IIO_INTENSITY = 7, IIO_PROXIMITY = 8, IIO_TEMP = 9, IIO_INCLI = 10, IIO_ROT = 11, IIO_ANGL = 12, IIO_TIMESTAMP = 13, IIO_CAPACITANCE = 14, IIO_ALTVOLTAGE = 15, IIO_CCT = 16, IIO_PRESSURE = 17, IIO_HUMIDITYRELATIVE = 18, IIO_ACTIVITY = 19, IIO_STEPS = 20, IIO_ENERGY = 21, IIO_DISTANCE = 22, IIO_VELOCITY = 23, IIO_CONCENTRATION = 24, IIO_RESISTANCE = 25, IIO_PH = 26, IIO_UVINDEX = 27, IIO_ELECTRICALCONDUCTIVITY = 28, IIO_COUNT = 29, IIO_INDEX = 30, IIO_GRAVITY = 31, IIO_POSITIONRELATIVE = 32, IIO_PHASE = 33, IIO_MASSCONCENTRATION = 34, }; enum iio_endian { IIO_CPU = 0, IIO_BE = 1, IIO_LE = 2, }; struct iio_event_spec; struct iio_chan_spec_ext_info; struct iio_chan_spec { enum iio_chan_type type; int channel; int channel2; unsigned long address; int scan_index; struct { char sign; u8 realbits; u8 storagebits; u8 shift; u8 repeat; enum iio_endian endianness; } scan_type; long info_mask_separate; long info_mask_separate_available; long info_mask_shared_by_type; long info_mask_shared_by_type_available; long info_mask_shared_by_dir; long info_mask_shared_by_dir_available; long info_mask_shared_by_all; long info_mask_shared_by_all_available; const struct iio_event_spec *event_spec; unsigned int num_event_specs; const struct iio_chan_spec_ext_info *ext_info; const char *extend_name; const char *datasheet_name; unsigned int modified: 1; unsigned int indexed: 1; unsigned int output: 1; unsigned int differential: 1; }; enum iio_event_type { IIO_EV_TYPE_THRESH = 0, IIO_EV_TYPE_MAG = 1, IIO_EV_TYPE_ROC = 2, IIO_EV_TYPE_THRESH_ADAPTIVE = 3, IIO_EV_TYPE_MAG_ADAPTIVE = 4, IIO_EV_TYPE_CHANGE = 5, IIO_EV_TYPE_MAG_REFERENCED = 6, IIO_EV_TYPE_GESTURE = 7, }; enum iio_event_direction { IIO_EV_DIR_EITHER = 0, IIO_EV_DIR_RISING = 1, IIO_EV_DIR_FALLING = 2, IIO_EV_DIR_NONE = 3, IIO_EV_DIR_SINGLETAP = 4, IIO_EV_DIR_DOUBLETAP = 5, }; struct iio_event_spec { enum iio_event_type type; enum iio_event_direction dir; unsigned long mask_separate; unsigned long mask_shared_by_type; unsigned long mask_shared_by_dir; unsigned long mask_shared_by_all; }; enum iio_shared_by { IIO_SEPARATE = 0, IIO_SHARED_BY_TYPE = 1, IIO_SHARED_BY_DIR = 2, IIO_SHARED_BY_ALL = 3, }; struct iio_chan_spec_ext_info { const char *name; enum iio_shared_by shared; ssize_t (*read)(struct iio_dev *, uintptr_t, const struct iio_chan_spec *, char *); ssize_t (*write)(struct iio_dev *, uintptr_t, const struct iio_chan_spec *, const char *, size_t); uintptr_t private; }; enum iio_event_info { IIO_EV_INFO_ENABLE = 0, IIO_EV_INFO_VALUE = 1, IIO_EV_INFO_HYSTERESIS = 2, IIO_EV_INFO_PERIOD = 3, IIO_EV_INFO_HIGH_PASS_FILTER_3DB = 4, IIO_EV_INFO_LOW_PASS_FILTER_3DB = 5, IIO_EV_INFO_TIMEOUT = 6, IIO_EV_INFO_RESET_TIMEOUT = 7, IIO_EV_INFO_TAP2_MIN_DELAY = 8, IIO_EV_INFO_RUNNING_PERIOD = 9, IIO_EV_INFO_RUNNING_COUNT = 10, }; struct iio_info { const struct attribute_group *event_attrs; const struct attribute_group *attrs; int (*read_raw)(struct iio_dev *, const struct iio_chan_spec *, int *, int *, long); int (*read_raw_multi)(struct iio_dev *, const struct iio_chan_spec *, int, int *, int *, long); int (*read_avail)(struct iio_dev *, const struct iio_chan_spec *, const int **, int *, int *, long); int (*write_raw)(struct iio_dev *, const struct iio_chan_spec *, int, int, long); int (*read_label)(struct iio_dev *, const struct iio_chan_spec *, char *); int (*write_raw_get_fmt)(struct iio_dev *, const struct iio_chan_spec *, long); int (*read_event_config)(struct iio_dev *, const struct iio_chan_spec *, enum iio_event_type, enum iio_event_direction); int (*write_event_config)(struct iio_dev *, const struct iio_chan_spec *, enum iio_event_type, enum iio_event_direction, int); int (*read_event_value)(struct iio_dev *, const struct iio_chan_spec *, enum iio_event_type, enum iio_event_direction, enum iio_event_info, int *, int *); int (*write_event_value)(struct iio_dev *, const struct iio_chan_spec *, enum iio_event_type, enum iio_event_direction, enum iio_event_info, int, int); int (*validate_trigger)(struct iio_dev *, struct iio_trigger *); int (*update_scan_mode)(struct iio_dev *, const unsigned long *); int (*debugfs_reg_access)(struct iio_dev *, unsigned int, unsigned int, unsigned int *); int (*fwnode_xlate)(struct iio_dev *, const struct fwnode_reference_args *); int (*hwfifo_set_watermark)(struct iio_dev *, unsigned int); int (*hwfifo_flush_to_buffer)(struct iio_dev *, unsigned int); }; struct iio_mount_matrix { const char *rotation[9]; }; enum iio_available_type { IIO_AVAIL_LIST = 0, IIO_AVAIL_RANGE = 1, }; struct iio_event_interface; struct iio_ioctl_handler; struct iio_dev_opaque { struct iio_dev indio_dev; int currentmode; int id; struct module *driver_module; struct mutex mlock; struct lock_class_key mlock_key; struct mutex info_exist_lock; bool trig_readonly; struct iio_event_interface *event_interface; struct iio_buffer **attached_buffers; unsigned int attached_buffers_cnt; struct iio_ioctl_handler *buffer_ioctl_handler; struct list_head buffer_list; struct list_head channel_attr_list; struct attribute_group chan_attr_group; struct list_head ioctl_handlers; const struct attribute_group **groups; int groupcounter; struct attribute_group legacy_scan_el_group; struct attribute_group legacy_buffer_group; void *bounce_buffer; size_t bounce_buffer_size; unsigned int scan_index_timestamp; clockid_t clock_id; struct cdev chrdev; unsigned long flags; struct dentry *debugfs_dentry; unsigned int cached_reg_addr; char read_buf[20]; unsigned int read_buf_len; }; struct iio_ioctl_handler { struct list_head entry; long (*ioctl)(struct iio_dev *, struct file *, unsigned int, unsigned long); }; struct iio_const_attr { const char *string; struct device_attribute dev_attr; }; struct iio_enum { const char * const *items; unsigned int num_items; int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int); int (*get)(struct iio_dev *, const struct iio_chan_spec *); }; typedef const struct iio_mount_matrix *iio_get_mount_matrix_t(const struct iio_dev *, const struct iio_chan_spec *); struct iio_event_data { __u64 id; __s64 timestamp; }; struct iio_event_interface { wait_queue_head_t wait; struct { union { struct __kfifo kfifo; struct iio_event_data *type; const struct iio_event_data *const_type; char (*rectype)[0]; struct iio_event_data *ptr; const struct iio_event_data *ptr_const; }; struct iio_event_data buf[16]; } det_events; struct list_head dev_attr_list; unsigned long flags; struct attribute_group group; struct mutex read_lock; struct iio_ioctl_handler ioctl_handler; }; struct iio_dev_buffer_pair { struct iio_dev *indio_dev; struct iio_buffer *buffer; }; enum iio_chan_info_enum { IIO_CHAN_INFO_RAW = 0, IIO_CHAN_INFO_PROCESSED = 1, IIO_CHAN_INFO_SCALE = 2, IIO_CHAN_INFO_OFFSET = 3, IIO_CHAN_INFO_CALIBSCALE = 4, IIO_CHAN_INFO_CALIBBIAS = 5, IIO_CHAN_INFO_PEAK = 6, IIO_CHAN_INFO_PEAK_SCALE = 7, IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW = 8, IIO_CHAN_INFO_AVERAGE_RAW = 9, IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY = 10, IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY = 11, IIO_CHAN_INFO_SAMP_FREQ = 12, IIO_CHAN_INFO_FREQUENCY = 13, IIO_CHAN_INFO_PHASE = 14, IIO_CHAN_INFO_HARDWAREGAIN = 15, IIO_CHAN_INFO_HYSTERESIS = 16, IIO_CHAN_INFO_HYSTERESIS_RELATIVE = 17, IIO_CHAN_INFO_INT_TIME = 18, IIO_CHAN_INFO_ENABLE = 19, IIO_CHAN_INFO_CALIBHEIGHT = 20, IIO_CHAN_INFO_CALIBWEIGHT = 21, IIO_CHAN_INFO_DEBOUNCE_COUNT = 22, IIO_CHAN_INFO_DEBOUNCE_TIME = 23, IIO_CHAN_INFO_CALIBEMISSIVITY = 24, IIO_CHAN_INFO_OVERSAMPLING_RATIO = 25, IIO_CHAN_INFO_THERMOCOUPLE_TYPE = 26, IIO_CHAN_INFO_CALIBAMBIENT = 27, IIO_CHAN_INFO_ZEROPOINT = 28, }; struct iio_map; struct iio_map_internal { struct iio_dev *indio_dev; struct iio_map *map; struct list_head l; }; struct iio_map { const char *adc_channel_label; const char *consumer_dev_name; const char *consumer_channel; void *consumer_data; }; struct iio_channel { struct iio_dev *indio_dev; const struct iio_chan_spec *channel; void *data; }; struct iio_demux_table { unsigned int from; unsigned int to; unsigned int length; struct list_head l; }; struct iio_device_config { unsigned int mode; unsigned int watermark; const unsigned long *scan_mask; unsigned int scan_bytes; bool scan_timestamp; }; struct iio_subirq { bool enabled; }; struct iio_trigger_ops; struct iio_trigger { const struct iio_trigger_ops *ops; struct module *owner; int id; const char *name; struct device dev; struct list_head list; struct list_head alloc_list; atomic_t use_count; struct irq_chip subirq_chip; int subirq_base; struct iio_subirq subirqs[2]; unsigned long pool[1]; struct mutex pool_lock; bool attached_own_device; struct work_struct reenable_work; }; struct iio_trigger_ops { int (*set_trigger_state)(struct iio_trigger *, bool); void (*reenable)(struct iio_trigger *); int (*validate_device)(struct iio_trigger *, struct iio_dev *); }; struct iio_poll_func { struct iio_dev *indio_dev; irqreturn_t (*h)(int, void *); irqreturn_t (*thread)(int, void *); int type; char *name; int irq; s64 timestamp; }; struct dtpm; struct dtpm_subsys_ops { const char *name; int (*init)(); void (*exit)(); int (*setup)(struct dtpm *, struct device_node *); }; struct powercap_zone_ops; struct powercap_zone_constraint; struct powercap_zone { int id; char *name; void *control_type_inst; const struct powercap_zone_ops *ops; struct device dev; int const_id_cnt; struct idr idr; struct idr *parent_idr; void *private_data; struct attribute **zone_dev_attrs; int zone_attr_count; struct attribute_group dev_zone_attr_group; const struct attribute_group *dev_attr_groups[2]; bool allocated; struct powercap_zone_constraint *constraints; }; struct dtpm_ops; struct dtpm { struct powercap_zone zone; struct dtpm *parent; struct list_head sibling; struct list_head children; struct dtpm_ops *ops; unsigned long flags; u64 power_limit; u64 power_max; u64 power_min; int weight; }; struct powercap_zone_ops { int (*get_max_energy_range_uj)(struct powercap_zone *, u64 *); int (*get_energy_uj)(struct powercap_zone *, u64 *); int (*reset_energy_uj)(struct powercap_zone *); int (*get_max_power_range_uw)(struct powercap_zone *, u64 *); int (*get_power_uw)(struct powercap_zone *, u64 *); int (*set_enable)(struct powercap_zone *, bool); int (*get_enable)(struct powercap_zone *, bool *); int (*release)(struct powercap_zone *); }; struct powercap_zone_constraint_ops; struct powercap_zone_constraint { int id; struct powercap_zone *power_zone; const struct powercap_zone_constraint_ops *ops; }; struct powercap_zone_constraint_ops { int (*set_power_limit_uw)(struct powercap_zone *, int, u64); int (*get_power_limit_uw)(struct powercap_zone *, int, u64 *); int (*set_time_window_us)(struct powercap_zone *, int, u64); int (*get_time_window_us)(struct powercap_zone *, int, u64 *); int (*get_max_power_uw)(struct powercap_zone *, int, u64 *); int (*get_min_power_uw)(struct powercap_zone *, int, u64 *); int (*get_max_time_window_us)(struct powercap_zone *, int, u64 *); int (*get_min_time_window_us)(struct powercap_zone *, int, u64 *); const char * (*get_name)(struct powercap_zone *, int); }; struct dtpm_ops { u64 (*set_power_uw)(struct dtpm *, u64); u64 (*get_power_uw)(struct dtpm *); int (*update_power_uw)(struct dtpm *); void (*release)(struct dtpm *); }; struct powercap_control_type_ops; struct powercap_control_type { struct device dev; struct idr idr; int nr_zones; const struct powercap_control_type_ops *ops; struct mutex lock; bool allocated; struct list_head node; }; struct powercap_control_type_ops { int (*set_enable)(struct powercap_control_type *, bool); int (*get_enable)(struct powercap_control_type *, bool *); int (*release)(struct powercap_control_type *); }; struct dtpm_node; typedef struct dtpm * (*dtpm_node_callback_t)(const struct dtpm_node *, struct dtpm *); enum DTPM_NODE_TYPE { DTPM_NODE_VIRTUAL = 0, DTPM_NODE_DT = 1, }; struct dtpm_node { enum DTPM_NODE_TYPE type; const char *name; struct dtpm_node *parent; }; struct dtpm_cpu { struct dtpm dtpm; struct freq_qos_request qos_req; int cpu; }; struct dtpm_devfreq { struct dtpm dtpm; struct dev_pm_qos_request qos_req; struct devfreq *devfreq; }; struct powercap_constraint_attr { struct device_attribute power_limit_attr; struct device_attribute time_window_attr; struct device_attribute max_power_attr; struct device_attribute min_power_attr; struct device_attribute max_time_window_attr; struct device_attribute min_time_window_attr; struct device_attribute name_attr; }; struct idle_inject_thread { struct task_struct *tsk; int should_run; }; struct idle_inject_device { struct hrtimer timer; unsigned int idle_duration_us; unsigned int run_duration_us; unsigned int latency_us; bool (*update)(); unsigned long cpumask[0]; }; struct pmu_irq_ops { void (*enable_pmuirq)(unsigned int); void (*disable_pmuirq)(unsigned int); void (*free_pmuirq)(unsigned int, int, void __attribute__((btf_type_tag("percpu"))) *); }; enum armpmu_attr_groups { ARMPMU_ATTR_GROUP_COMMON = 0, ARMPMU_ATTR_GROUP_EVENTS = 1, ARMPMU_ATTR_GROUP_FORMATS = 2, ARMPMU_ATTR_GROUP_CAPS = 3, ARMPMU_NR_ATTR_GROUPS = 4, }; enum perf_hw_id { PERF_COUNT_HW_CPU_CYCLES = 0, PERF_COUNT_HW_INSTRUCTIONS = 1, PERF_COUNT_HW_CACHE_REFERENCES = 2, PERF_COUNT_HW_CACHE_MISSES = 3, PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, PERF_COUNT_HW_REF_CPU_CYCLES = 9, PERF_COUNT_HW_MAX = 10, }; enum perf_hw_cache_id { PERF_COUNT_HW_CACHE_L1D = 0, PERF_COUNT_HW_CACHE_L1I = 1, PERF_COUNT_HW_CACHE_LL = 2, PERF_COUNT_HW_CACHE_DTLB = 3, PERF_COUNT_HW_CACHE_ITLB = 4, PERF_COUNT_HW_CACHE_BPU = 5, PERF_COUNT_HW_CACHE_NODE = 6, PERF_COUNT_HW_CACHE_MAX = 7, }; enum perf_hw_cache_op_id { PERF_COUNT_HW_CACHE_OP_READ = 0, PERF_COUNT_HW_CACHE_OP_WRITE = 1, PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, PERF_COUNT_HW_CACHE_OP_MAX = 3, }; enum perf_hw_cache_op_result_id { PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, PERF_COUNT_HW_CACHE_RESULT_MISS = 1, PERF_COUNT_HW_CACHE_RESULT_MAX = 2, }; typedef int (*armpmu_init_fn)(struct arm_pmu *); struct pmu_probe_info { unsigned int cpuid; unsigned int mask; armpmu_init_fn init; }; struct armv8pmu_probe_info { struct arm_pmu *pmu; bool present; }; typedef void (*btf_trace_mc_event)(void *, const unsigned int, const char *, const char *, const int, const u8, const s8, const s8, const s8, unsigned long, const u8, unsigned long, const char *); struct cper_sec_proc_arm; typedef void (*btf_trace_arm_event)(void *, const struct cper_sec_proc_arm *); struct cper_sec_proc_arm { u32 validation_bits; u16 err_info_num; u16 context_info_num; u32 section_length; u8 affinity_level; u8 reserved[3]; u64 mpidr; u64 midr; u32 running_state; u32 psci_state; }; typedef void (*btf_trace_non_standard_event)(void *, const guid_t *, const guid_t *, const char *, const u8, const u8 *, const u32); typedef void (*btf_trace_aer_event)(void *, const char *, const u32, const u8, const u8, struct aer_header_log_regs *); struct trace_event_raw_mc_event { struct trace_entry ent; unsigned int error_type; u32 __data_loc_msg; u32 __data_loc_label; u16 error_count; u8 mc_index; s8 top_layer; s8 middle_layer; s8 lower_layer; long address; u8 grain_bits; long syndrome; u32 __data_loc_driver_detail; char __data[0]; }; struct trace_event_raw_arm_event { struct trace_entry ent; u64 mpidr; u64 midr; u32 running_state; u32 psci_state; u8 affinity; char __data[0]; }; struct trace_event_raw_non_standard_event { struct trace_entry ent; char sec_type[16]; char fru_id[16]; u32 __data_loc_fru_text; u8 sev; u32 len; u32 __data_loc_buf; char __data[0]; }; struct trace_event_raw_aer_event { struct trace_entry ent; u32 __data_loc_dev_name; u32 status; u8 severity; u8 tlp_header_valid; u32 tlp_header[4]; char __data[0]; }; struct trace_event_data_offsets_non_standard_event { u32 fru_text; u32 buf; }; struct trace_event_data_offsets_aer_event { u32 dev_name; }; struct trace_event_data_offsets_mc_event { u32 msg; u32 label; u32 driver_detail; }; struct trace_event_data_offsets_arm_event {}; struct binder_features { bool oneway_spam_detection; bool extended_error; }; enum binderfs_stats_mode { binderfs_stats_mode_unset = 0, binderfs_stats_mode_global = 1, }; enum binderfs_param { Opt_max___2 = 0, Opt_stats_mode = 1, }; enum binder_work_type { BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE = 2, BINDER_WORK_TRANSACTION_PENDING = 3, BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT = 4, BINDER_WORK_RETURN_ERROR = 5, BINDER_WORK_NODE = 6, BINDER_WORK_DEAD_BINDER = 7, BINDER_WORK_DEAD_BINDER_AND_CLEAR = 8, BINDER_WORK_CLEAR_DEATH_NOTIFICATION = 9, }; struct binderfs_device { char name[256]; __u32 major; __u32 minor; }; struct binderfs_mount_opts { int max; int stats_mode; }; struct binderfs_info { struct ipc_namespace *ipc_ns; struct dentry *control_dentry; kuid_t root_uid; kgid_t root_gid; struct binderfs_mount_opts mount_opts; int device_count; struct dentry *proc_log_dir; }; struct binder_node; struct binder_context { struct binder_node *binder_context_mgr_node; struct mutex context_mgr_node_lock; kuid_t binder_context_mgr_uid; const char *name; }; struct binder_device { struct hlist_node hlist; struct miscdevice miscdev; struct binder_context context; struct inode *binderfs_inode; refcount_t ref; }; struct binder_work { struct list_head entry; enum binder_work_type type; u64 android_oem_data1; }; typedef __u64 binder_uintptr_t; struct binder_proc; struct binder_node { int debug_id; spinlock_t lock; struct binder_work work; union { struct rb_node rb_node; struct hlist_node dead_node; }; struct binder_proc *proc; struct hlist_head refs; int internal_strong_refs; int local_weak_refs; int local_strong_refs; int tmp_refs; binder_uintptr_t ptr; binder_uintptr_t cookie; struct { u8 has_strong_ref: 1; u8 pending_strong_ref: 1; u8 has_weak_ref: 1; u8 pending_weak_ref: 1; }; struct { u8 sched_policy: 2; u8 inherit_rt: 1; u8 accept_fds: 1; u8 txn_security_ctx: 1; u8 min_priority; }; bool has_async_transaction; struct list_head async_todo; }; struct binder_stats { atomic_t br[21]; atomic_t bc[19]; atomic_t obj_created[7]; atomic_t obj_deleted[7]; }; struct binder_priority { unsigned int sched_policy; int prio; }; struct binder_lru_page; struct binder_alloc { spinlock_t lock; struct vm_area_struct *vma; struct mm_struct *mm; unsigned long buffer; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; size_t free_async_space; struct binder_lru_page *pages; size_t buffer_size; int pid; size_t pages_high; bool oneway_spam_detected; u64 android_oem_data1; }; struct binder_proc { struct hlist_node proc_node; struct rb_root threads; struct rb_root nodes; struct rb_root refs_by_desc; struct rb_root refs_by_node; struct list_head waiting_threads; int pid; struct task_struct *tsk; const struct cred *cred; struct hlist_node deferred_work_node; int deferred_work; int outstanding_txns; bool is_dead; bool is_frozen; bool sync_recv; bool async_recv; wait_queue_head_t freeze_wait; struct list_head todo; struct binder_stats stats; struct list_head delivered_death; u32 max_threads; int requested_threads; int requested_threads_started; int tmp_ref; struct binder_priority default_priority; struct dentry *debugfs_entry; struct binder_alloc alloc; struct binder_context *context; spinlock_t inner_lock; spinlock_t outer_lock; struct dentry *binderfs_entry; bool oneway_spam_detection_enabled; u64 android_oem_data1; }; struct binder_lru_page { struct list_head lru; struct page *page_ptr; struct binder_alloc *alloc; }; struct binder_debugfs_entry { const char *name; umode_t mode; const struct file_operations *fops; void *data; }; typedef void (*btf_trace_binder_ioctl)(void *, unsigned int, unsigned long); typedef void (*btf_trace_binder_lock)(void *, const char *); typedef void (*btf_trace_binder_locked)(void *, const char *); typedef void (*btf_trace_binder_unlock)(void *, const char *); typedef void (*btf_trace_binder_ioctl_done)(void *, int); typedef void (*btf_trace_binder_write_done)(void *, int); typedef void (*btf_trace_binder_read_done)(void *, int); typedef void (*btf_trace_binder_set_priority)(void *, int, int, unsigned int, unsigned int, unsigned int); typedef void (*btf_trace_binder_wait_for_work)(void *, bool, bool, bool); struct binder_transaction; typedef void (*btf_trace_binder_txn_latency_free)(void *, struct binder_transaction *, int, int, int, int); struct binder_thread; struct binder_buffer; struct binder_transaction { int debug_id; struct binder_work work; struct binder_thread *from; pid_t from_pid; pid_t from_tid; struct binder_transaction *from_parent; struct binder_proc *to_proc; struct binder_thread *to_thread; struct binder_transaction *to_parent; unsigned int need_reply: 1; struct binder_buffer *buffer; unsigned int code; unsigned int flags; struct binder_priority priority; struct binder_priority saved_priority; bool set_priority_called; bool is_nested; kuid_t sender_euid; ktime_t start_time; struct list_head fd_fixups; binder_uintptr_t security_ctx; spinlock_t lock; u64 android_vendor_data1; u64 android_oem_data1; }; struct binder_error { struct binder_work work; uint32_t cmd; }; struct binder_extended_error { __u32 id; __u32 command; __s32 param; }; enum binder_prio_state { BINDER_PRIO_SET = 0, BINDER_PRIO_PENDING = 1, BINDER_PRIO_ABORT = 2, }; struct binder_thread { struct binder_proc *proc; struct rb_node rb_node; struct list_head waiting_thread_node; int pid; int looper; bool looper_need_return; struct binder_transaction *transaction_stack; struct list_head todo; bool process_todo; struct binder_error return_error; struct binder_error reply_error; struct binder_extended_error ee; wait_queue_head_t wait; struct binder_stats stats; atomic_t tmp_ref; bool is_dead; struct task_struct *task; spinlock_t prio_lock; struct binder_priority prio_next; enum binder_prio_state prio_state; }; struct binder_buffer { struct list_head entry; struct rb_node rb_node; unsigned int free: 1; unsigned int clear_on_free: 1; unsigned int allow_user_free: 1; unsigned int async_transaction: 1; unsigned int oneway_spam_suspect: 1; unsigned int debug_id: 27; struct binder_transaction *transaction; struct binder_node *target_node; size_t data_size; size_t offsets_size; size_t extra_buffers_size; unsigned long user_data; int pid; }; typedef void (*btf_trace_binder_transaction)(void *, bool, struct binder_transaction *, struct binder_node *); typedef void (*btf_trace_binder_transaction_received)(void *, struct binder_transaction *); struct binder_ref_data; typedef void (*btf_trace_binder_transaction_node_to_ref)(void *, struct binder_transaction *, struct binder_node *, struct binder_ref_data *); struct binder_ref_data { int debug_id; uint32_t desc; int strong; int weak; }; typedef void (*btf_trace_binder_transaction_ref_to_node)(void *, struct binder_transaction *, struct binder_node *, struct binder_ref_data *); typedef void (*btf_trace_binder_transaction_ref_to_ref)(void *, struct binder_transaction *, struct binder_node *, struct binder_ref_data *, struct binder_ref_data *); typedef void (*btf_trace_binder_transaction_fd_send)(void *, struct binder_transaction *, int, size_t); typedef void (*btf_trace_binder_transaction_fd_recv)(void *, struct binder_transaction *, int, size_t); typedef void (*btf_trace_binder_transaction_alloc_buf)(void *, struct binder_buffer *); typedef void (*btf_trace_binder_transaction_buffer_release)(void *, struct binder_buffer *); typedef void (*btf_trace_binder_transaction_failed_buffer_release)(void *, struct binder_buffer *); typedef void (*btf_trace_binder_transaction_update_buffer_release)(void *, struct binder_buffer *); typedef void (*btf_trace_binder_update_page_range)(void *, struct binder_alloc *, bool, unsigned long, unsigned long); typedef void (*btf_trace_binder_alloc_lru_start)(void *, const struct binder_alloc *, size_t); typedef void (*btf_trace_binder_alloc_lru_end)(void *, const struct binder_alloc *, size_t); typedef void (*btf_trace_binder_free_lru_start)(void *, const struct binder_alloc *, size_t); typedef void (*btf_trace_binder_free_lru_end)(void *, const struct binder_alloc *, size_t); typedef void (*btf_trace_binder_alloc_page_start)(void *, const struct binder_alloc *, size_t); typedef void (*btf_trace_binder_alloc_page_end)(void *, const struct binder_alloc *, size_t); typedef void (*btf_trace_binder_unmap_user_start)(void *, const struct binder_alloc *, size_t); typedef void (*btf_trace_binder_unmap_user_end)(void *, const struct binder_alloc *, size_t); typedef void (*btf_trace_binder_unmap_kernel_start)(void *, const struct binder_alloc *, size_t); typedef void (*btf_trace_binder_unmap_kernel_end)(void *, const struct binder_alloc *, size_t); typedef void (*btf_trace_binder_command)(void *, uint32_t); typedef void (*btf_trace_binder_return)(void *, uint32_t); struct binder_transaction_log_entry { int debug_id; int debug_id_done; int call_type; int from_proc; int from_thread; int target_handle; int to_proc; int to_thread; int to_node; int data_size; int offsets_size; int return_error_line; uint32_t return_error; uint32_t return_error_param; char context_name[256]; }; struct binder_transaction_log { atomic_t cur; bool full; struct binder_transaction_log_entry entry[32]; }; enum { BINDER_LOOPER_STATE_REGISTERED = 1, BINDER_LOOPER_STATE_ENTERED = 2, BINDER_LOOPER_STATE_EXITED = 4, BINDER_LOOPER_STATE_INVALID = 8, BINDER_LOOPER_STATE_WAITING = 16, BINDER_LOOPER_STATE_POLL = 32, }; enum binder_stat_types { BINDER_STAT_PROC = 0, BINDER_STAT_THREAD = 1, BINDER_STAT_NODE = 2, BINDER_STAT_REF = 3, BINDER_STAT_DEATH = 4, BINDER_STAT_TRANSACTION = 5, BINDER_STAT_TRANSACTION_COMPLETE = 6, BINDER_STAT_COUNT = 7, }; enum binder_driver_return_protocol { BR_ERROR = 2147774976, BR_OK = 29185, BR_TRANSACTION_SEC_CTX = 2152231426, BR_TRANSACTION = 2151707138, BR_REPLY = 2151707139, BR_ACQUIRE_RESULT = 2147774980, BR_DEAD_REPLY = 29189, BR_TRANSACTION_COMPLETE = 29190, BR_INCREFS = 2148561415, BR_ACQUIRE = 2148561416, BR_RELEASE = 2148561417, BR_DECREFS = 2148561418, BR_ATTEMPT_ACQUIRE = 2149085707, BR_NOOP = 29196, BR_SPAWN_LOOPER = 29197, BR_FINISHED = 29198, BR_DEAD_BINDER = 2148037135, BR_CLEAR_DEATH_NOTIFICATION_DONE = 2148037136, BR_FAILED_REPLY = 29201, BR_FROZEN_REPLY = 29202, BR_ONEWAY_SPAM_SUSPECT = 29203, BR_TRANSACTION_PENDING_FROZEN = 29204, }; enum { BINDER_DEBUG_USER_ERROR = 1, BINDER_DEBUG_FAILED_TRANSACTION = 2, BINDER_DEBUG_DEAD_TRANSACTION = 4, BINDER_DEBUG_OPEN_CLOSE = 8, BINDER_DEBUG_DEAD_BINDER = 16, BINDER_DEBUG_DEATH_NOTIFICATION = 32, BINDER_DEBUG_READ_WRITE = 64, BINDER_DEBUG_USER_REFS = 128, BINDER_DEBUG_THREADS = 256, BINDER_DEBUG_TRANSACTION = 512, BINDER_DEBUG_TRANSACTION_COMPLETE = 1024, BINDER_DEBUG_FREE_BUFFER = 2048, BINDER_DEBUG_INTERNAL_REFS = 4096, BINDER_DEBUG_PRIORITY_CAP = 8192, BINDER_DEBUG_SPINLOCKS = 16384, }; enum { BINDER_WRITE_READ = 3224396289, BINDER_SET_IDLE_TIMEOUT = 1074291203, BINDER_SET_MAX_THREADS = 1074029061, BINDER_SET_IDLE_PRIORITY = 1074029062, BINDER_SET_CONTEXT_MGR = 1074029063, BINDER_THREAD_EXIT = 1074029064, BINDER_VERSION = 3221512713, BINDER_GET_NODE_DEBUG_INFO = 3222823435, BINDER_GET_NODE_INFO_FOR_REF = 3222823436, BINDER_SET_CONTEXT_MGR_EXT = 1075339789, BINDER_FREEZE = 1074553358, BINDER_GET_FROZEN_INFO = 3222037007, BINDER_ENABLE_ONEWAY_SPAM_DETECTION = 1074029072, BINDER_GET_EXTENDED_ERROR = 3222037009, }; enum binder_driver_command_protocol { BC_TRANSACTION = 1077961472, BC_REPLY = 1077961473, BC_ACQUIRE_RESULT = 1074029314, BC_FREE_BUFFER = 1074291459, BC_INCREFS = 1074029316, BC_ACQUIRE = 1074029317, BC_RELEASE = 1074029318, BC_DECREFS = 1074029319, BC_INCREFS_DONE = 1074815752, BC_ACQUIRE_DONE = 1074815753, BC_ATTEMPT_ACQUIRE = 1074291466, BC_REGISTER_LOOPER = 25355, BC_ENTER_LOOPER = 25356, BC_EXIT_LOOPER = 25357, BC_REQUEST_DEATH_NOTIFICATION = 1074553614, BC_CLEAR_DEATH_NOTIFICATION = 1074553615, BC_DEAD_BINDER_DONE = 1074291472, BC_TRANSACTION_SG = 1078485777, BC_REPLY_SG = 1078485778, }; enum { BINDER_TYPE_BINDER = 1935813253, BINDER_TYPE_WEAK_BINDER = 2002922117, BINDER_TYPE_HANDLE = 1936206469, BINDER_TYPE_WEAK_HANDLE = 2003315333, BINDER_TYPE_FD = 1717840517, BINDER_TYPE_FDA = 1717854597, BINDER_TYPE_PTR = 1886661253, }; enum transaction_flags { TF_ONE_WAY = 1, TF_ROOT_OBJECT = 4, TF_STATUS_CODE = 8, TF_ACCEPT_FDS = 16, TF_CLEAR_BUF = 32, TF_UPDATE_TXN = 64, }; enum flat_binder_object_flags { FLAT_BINDER_FLAG_PRIORITY_MASK = 255, FLAT_BINDER_FLAG_ACCEPTS_FDS = 256, FLAT_BINDER_FLAG_SCHED_POLICY_MASK = 1536, FLAT_BINDER_FLAG_INHERIT_RT = 2048, FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 4096, }; enum flat_binder_object_shifts { FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9, }; enum { BINDER_BUFFER_FLAG_HAS_PARENT = 1, }; enum binder_deferred_state { BINDER_DEFERRED_FLUSH = 1, BINDER_DEFERRED_RELEASE = 2, }; struct binder_ref_death { struct binder_work work; binder_uintptr_t cookie; }; struct binder_ref { struct binder_ref_data data; struct rb_node rb_node_desc; struct rb_node rb_node_node; struct hlist_node node_entry; struct binder_proc *proc; struct binder_node *node; struct binder_ref_death *death; }; struct dbitmap { unsigned int nbits; unsigned long *map; }; struct binder_proc_wrap { struct binder_proc proc; struct dbitmap dmap; }; struct binder_object_header { __u32 type; }; struct flat_binder_object { struct binder_object_header hdr; __u32 flags; union { binder_uintptr_t binder; __u32 handle; }; binder_uintptr_t cookie; }; typedef __u64 binder_size_t; struct binder_fd_array_object { struct binder_object_header hdr; __u32 pad; binder_size_t num_fds; binder_size_t parent; binder_size_t parent_offset; }; struct binder_task_work_cb { struct callback_head twork; struct file *file; }; struct binder_fd_object { struct binder_object_header hdr; __u32 pad_flags; union { binder_uintptr_t pad_binder; __u32 fd; }; binder_uintptr_t cookie; }; struct binder_buffer_object { struct binder_object_header hdr; __u32 flags; binder_uintptr_t buffer; binder_size_t length; binder_size_t parent; binder_size_t parent_offset; }; struct binder_ptr_fixup { binder_size_t offset; size_t skip_size; binder_uintptr_t fixup_data; struct list_head node; }; struct binder_sg_copy { binder_size_t offset; const void __attribute__((btf_type_tag("user"))) *sender_uaddr; size_t length; struct list_head node; }; struct binder_txn_fd_fixup { struct list_head fixup_entry; struct file *file; size_t offset; int target_fd; }; struct trace_event_raw_binder_ioctl { struct trace_entry ent; unsigned int cmd; unsigned long arg; char __data[0]; }; struct trace_event_raw_binder_lock_class { struct trace_entry ent; const char *tag; char __data[0]; }; struct trace_event_raw_binder_function_return_class { struct trace_entry ent; int ret; char __data[0]; }; struct trace_event_raw_binder_set_priority { struct trace_entry ent; int proc; int thread; unsigned int old_prio; unsigned int new_prio; unsigned int desired_prio; char __data[0]; }; struct trace_event_raw_binder_wait_for_work { struct trace_entry ent; bool proc_work; bool transaction_stack; bool thread_todo; char __data[0]; }; struct trace_event_raw_binder_txn_latency_free { struct trace_entry ent; int debug_id; int from_proc; int from_thread; int to_proc; int to_thread; unsigned int code; unsigned int flags; char __data[0]; }; struct trace_event_raw_binder_transaction { struct trace_entry ent; int debug_id; int target_node; int to_proc; int to_thread; int reply; unsigned int code; unsigned int flags; char __data[0]; }; struct trace_event_raw_binder_transaction_received { struct trace_entry ent; int debug_id; char __data[0]; }; struct trace_event_raw_binder_transaction_node_to_ref { struct trace_entry ent; int debug_id; int node_debug_id; binder_uintptr_t node_ptr; int ref_debug_id; uint32_t ref_desc; char __data[0]; }; struct trace_event_raw_binder_transaction_ref_to_node { struct trace_entry ent; int debug_id; int ref_debug_id; uint32_t ref_desc; int node_debug_id; binder_uintptr_t node_ptr; char __data[0]; }; struct trace_event_raw_binder_transaction_ref_to_ref { struct trace_entry ent; int debug_id; int node_debug_id; int src_ref_debug_id; uint32_t src_ref_desc; int dest_ref_debug_id; uint32_t dest_ref_desc; char __data[0]; }; struct trace_event_raw_binder_transaction_fd_send { struct trace_entry ent; int debug_id; int fd; size_t offset; char __data[0]; }; struct trace_event_raw_binder_transaction_fd_recv { struct trace_entry ent; int debug_id; int fd; size_t offset; char __data[0]; }; struct trace_event_raw_binder_buffer_class { struct trace_entry ent; int debug_id; size_t data_size; size_t offsets_size; size_t extra_buffers_size; char __data[0]; }; struct trace_event_raw_binder_update_page_range { struct trace_entry ent; int proc; bool allocate; size_t offset; size_t size; char __data[0]; }; struct trace_event_raw_binder_lru_page_class { struct trace_entry ent; int proc; size_t page_index; char __data[0]; }; struct trace_event_raw_binder_command { struct trace_entry ent; uint32_t cmd; char __data[0]; }; struct trace_event_raw_binder_return { struct trace_entry ent; uint32_t cmd; char __data[0]; }; struct binder_freeze_info { __u32 pid; __u32 enable; __u32 timeout_ms; }; struct binder_transaction_data { union { __u32 handle; binder_uintptr_t ptr; } target; binder_uintptr_t cookie; __u32 code; __u32 flags; __kernel_pid_t sender_pid; __kernel_uid32_t sender_euid; binder_size_t data_size; binder_size_t offsets_size; union { struct { binder_uintptr_t buffer; binder_uintptr_t offsets; } ptr; __u8 buf[8]; } data; }; struct binder_transaction_data_sg { struct binder_transaction_data transaction_data; binder_size_t buffers_size; }; struct binder_transaction_data_secctx { struct binder_transaction_data transaction_data; binder_uintptr_t secctx; }; struct binder_object { union { struct binder_object_header hdr; struct flat_binder_object fbo; struct binder_fd_object fdo; struct binder_buffer_object bbo; struct binder_fd_array_object fdao; }; }; struct binder_node_info_for_ref { __u32 handle; __u32 strong_count; __u32 weak_count; __u32 reserved1; __u32 reserved2; __u32 reserved3; }; struct binder_node_debug_info { binder_uintptr_t ptr; binder_uintptr_t cookie; __u32 has_strong_ref; __u32 has_weak_ref; }; struct binder_frozen_status_info { __u32 pid; __u32 sync_recv; __u32 async_recv; }; struct binder_version { __s32 protocol_version; }; struct trace_event_data_offsets_binder_ioctl {}; struct trace_event_data_offsets_binder_lock_class {}; struct trace_event_data_offsets_binder_function_return_class {}; struct trace_event_data_offsets_binder_set_priority {}; struct trace_event_data_offsets_binder_wait_for_work {}; struct trace_event_data_offsets_binder_txn_latency_free {}; struct trace_event_data_offsets_binder_transaction {}; struct trace_event_data_offsets_binder_transaction_received {}; struct trace_event_data_offsets_binder_transaction_node_to_ref {}; struct trace_event_data_offsets_binder_transaction_ref_to_node {}; struct trace_event_data_offsets_binder_transaction_ref_to_ref {}; struct trace_event_data_offsets_binder_transaction_fd_send {}; struct trace_event_data_offsets_binder_transaction_fd_recv {}; struct trace_event_data_offsets_binder_buffer_class {}; struct trace_event_data_offsets_binder_update_page_range {}; struct trace_event_data_offsets_binder_lru_page_class {}; struct trace_event_data_offsets_binder_command {}; struct trace_event_data_offsets_binder_return {}; struct binder_write_read { binder_size_t write_size; binder_size_t write_consumed; binder_uintptr_t write_buffer; binder_size_t read_size; binder_size_t read_consumed; binder_uintptr_t read_buffer; }; enum { BINDER_DEBUG_USER_ERROR___2 = 1, BINDER_DEBUG_OPEN_CLOSE___2 = 2, BINDER_DEBUG_BUFFER_ALLOC = 4, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 8, }; struct ads_entry { char *name; void *addr; }; enum android_debug_symbol { ADS_PER_CPU_START = 0, ADS_PER_CPU_END = 1, ADS_TEXT = 2, ADS_SEND = 3, ADS_MEM_BLOCK = 4, ADS_INIT_MM = 5, ADS_ITERATE_SUPERS = 6, ADS_DROP_SLAB = 7, ADS_FREE_PAGES = 8, ADS_COMPACT_PAGES = 9, ADS_SHOW_MEM = 10, ADS_TOTAL_CMA = 11, ADS_SLAB_CACHES = 12, ADS_SLAB_MUTEX = 13, ADS_END = 14, }; enum android_debug_per_cpu_symbol { ADS_IRQ_STACK_PTR = 0, ADS_DEBUG_PER_CPU_END = 1, }; struct packet_type { __be16 type; bool ignore_outgoing; struct net_device *dev; netdevice_tracker dev_tracker; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); void (*list_func)(struct list_head *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); struct net *af_packet_net; void *af_packet_priv; struct list_head list; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct gzvm_vcpu; struct kernel_info { __u8 enabled_all; __u8 enabled_base_relative; __u8 enabled_absolute_percpu; __u8 enabled_cfi_clang; __u32 num_syms; __u16 name_len; __u16 bit_per_long; __u16 module_name_len; __u16 symbol_len; __u64 _addresses_pa; __u64 _relative_pa; __u64 _stext_pa; __u64 _etext_pa; __u64 _sinittext_pa; __u64 _einittext_pa; __u64 _end_pa; __u64 _offsets_pa; __u64 _names_pa; __u64 _token_table_pa; __u64 _token_index_pa; __u64 _markers_pa; __u32 thread_size; __u64 swapper_pg_dir_pa; __u8 last_uts_release[64]; __u8 build_info[256]; __u32 enabled_modules_tree_lookup; __u32 mod_mem_offset; __u32 mod_kallsyms_offset; } __attribute__((packed)); struct kernel_all_info { __u32 magic_number; __u32 combined_checksum; struct kernel_info info; }; enum { NVMEM_ADD = 1, NVMEM_REMOVE = 2, NVMEM_CELL_ADD = 3, NVMEM_CELL_REMOVE = 4, NVMEM_LAYOUT_ADD = 5, NVMEM_LAYOUT_REMOVE = 6, }; struct nvmem_device { struct module *owner; struct device dev; int stride; int word_size; int id; struct kref refcnt; size_t size; bool read_only; bool root_only; int flags; enum nvmem_type type; struct bin_attribute eeprom; struct device *base_dev; struct list_head cells; const struct nvmem_keepout *keepout; unsigned int nkeepout; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; struct gpio_desc *wp_gpio; struct nvmem_layout *layout; void *priv; }; struct nvmem_cell_table { const char *nvmem_name; const struct nvmem_cell_info *cells; size_t ncells; struct list_head node; }; struct nvmem_cell_entry { const char *name; int offset; size_t raw_len; int bytes; int bit_offset; int nbits; nvmem_cell_post_process_t read_post_process; void *priv; struct device_node *np; struct nvmem_device *nvmem; struct list_head node; }; struct nvmem_cell { struct nvmem_cell_entry *entry; const char *id; int index; }; enum gnss_type { GNSS_TYPE_NMEA = 0, GNSS_TYPE_SIRF = 1, GNSS_TYPE_UBX = 2, GNSS_TYPE_MTK = 3, GNSS_TYPE_COUNT = 4, }; struct gnss_operations; struct gnss_device { struct device dev; struct cdev cdev; int id; enum gnss_type type; unsigned long flags; struct rw_semaphore rwsem; const struct gnss_operations *ops; unsigned int count; unsigned int disconnected: 1; struct mutex read_mutex; struct kfifo read_fifo; wait_queue_head_t read_queue; struct mutex write_mutex; char *write_buf; }; struct gnss_operations { int (*open)(struct gnss_device *); void (*close)(struct gnss_device *); int (*write_raw)(struct gnss_device *, const unsigned char *, size_t); }; typedef void (*btf_trace_icc_set_bw)(void *, struct icc_path *, struct icc_node *, int, u32, u32); struct icc_req { struct hlist_node req_node; struct icc_node *node; struct device *dev; bool enabled; u32 tag; u32 avg_bw; u32 peak_bw; }; struct icc_path { const char *name; size_t num_nodes; struct icc_req reqs[0]; }; typedef void (*btf_trace_icc_set_bw_end)(void *, struct icc_path *, int); struct trace_event_raw_icc_set_bw { struct trace_entry ent; u32 __data_loc_path_name; u32 __data_loc_dev; u32 __data_loc_node_name; u32 avg_bw; u32 peak_bw; u32 node_avg_bw; u32 node_peak_bw; char __data[0]; }; struct trace_event_raw_icc_set_bw_end { struct trace_entry ent; u32 __data_loc_path_name; u32 __data_loc_dev; int ret; char __data[0]; }; struct trace_event_data_offsets_icc_set_bw { u32 path_name; u32 dev; u32 node_name; }; struct trace_event_data_offsets_icc_set_bw_end { u32 path_name; u32 dev; }; struct icc_onecell_data { unsigned int num_nodes; struct icc_node *nodes[0]; }; struct icc_bulk_data { struct icc_path *path; const char *name; u32 avg_bw; u32 peak_bw; }; struct icc_bulk_devres { struct icc_bulk_data *paths; int num_paths; }; struct debugfs_path { const char *src; const char *dst; struct icc_path *path; struct list_head list; }; struct snd_minor { int type; int card; int device; const struct file_operations *f_ops; void *private_data; struct device *dev; struct snd_card *card_ptr; u64 android_kabi_reserved1; }; enum { SNDRV_DEVICE_TYPE_CONTROL = 0, SNDRV_DEVICE_TYPE_SEQUENCER = 1, SNDRV_DEVICE_TYPE_TIMER = 2, SNDRV_DEVICE_TYPE_HWDEP = 3, SNDRV_DEVICE_TYPE_RAWMIDI = 4, SNDRV_DEVICE_TYPE_PCM_PLAYBACK = 5, SNDRV_DEVICE_TYPE_PCM_CAPTURE = 6, SNDRV_DEVICE_TYPE_COMPRESS = 7, }; struct snd_monitor_file { struct file *file; const struct file_operations *disconnected_f_op; struct list_head shutdown_list; struct list_head list; }; struct snd_ctl_layer_ops { struct snd_ctl_layer_ops *next; const char *module_name; void (*lregister)(struct snd_card *); void (*ldisconnect)(struct snd_card *); void (*lnotify)(struct snd_card *, unsigned int, struct snd_kcontrol *, unsigned int); }; enum snd_ctl_add_mode { CTL_ADD_EXCLUSIVE = 0, CTL_REPLACE = 1, CTL_ADD_ON_REPLACE = 2, }; enum sndrv_ctl_event_type { SNDRV_CTL_EVENT_ELEM = 0, SNDRV_CTL_EVENT_LAST = 0, }; enum { SNDRV_CTL_TLV_OP_READ = 0, SNDRV_CTL_TLV_OP_WRITE = 1, SNDRV_CTL_TLV_OP_CMD = -1, }; enum { SNDRV_CTL_IOCTL_ELEM_LIST32 = 3225965840, SNDRV_CTL_IOCTL_ELEM_INFO32 = 3239073041, SNDRV_CTL_IOCTL_ELEM_READ32 = 3267908882, SNDRV_CTL_IOCTL_ELEM_WRITE32 = 3267908883, SNDRV_CTL_IOCTL_ELEM_ADD32 = 3239073047, SNDRV_CTL_IOCTL_ELEM_REPLACE32 = 3239073048, }; enum { SND_CTL_SUBDEV_PCM = 0, SND_CTL_SUBDEV_RAWMIDI = 1, SND_CTL_SUBDEV_ITEMS = 2, }; struct snd_kctl_event { struct list_head list; struct snd_ctl_elem_id id; unsigned int mask; }; typedef int (*snd_kctl_ioctl_func_t)(struct snd_card *, struct snd_ctl_file *, unsigned int, unsigned long); struct snd_kctl_ioctl { struct list_head list; snd_kctl_ioctl_func_t fioctl; }; struct snd_ctl_elem_list { unsigned int offset; unsigned int space; unsigned int used; unsigned int count; struct snd_ctl_elem_id __attribute__((btf_type_tag("user"))) *pids; unsigned char reserved[50]; }; struct snd_ctl_card_info { int card; int pad; unsigned char id[16]; unsigned char driver[16]; unsigned char name[32]; unsigned char longname[80]; unsigned char reserved_[16]; unsigned char mixername[80]; unsigned char components[128]; }; struct snd_ctl_elem_info32 { struct snd_ctl_elem_id id; s32 type; u32 access; u32 count; s32 owner; union { struct { s32 min; s32 max; s32 step; } integer; struct { u64 min; u64 max; u64 step; } integer64; struct { u32 items; u32 item; char name[64]; u64 names_ptr; u32 names_length; } enumerated; unsigned char reserved[128]; } value; unsigned char reserved[64]; }; struct snd_ctl_elem_value32 { struct snd_ctl_elem_id id; unsigned int indirect; union { s32 integer[128]; unsigned char data[512]; s64 integer64[64]; } value; unsigned char reserved[128]; }; struct snd_ctl_elem_list32 { u32 offset; u32 space; u32 used; u32 count; u32 pids; unsigned char reserved[50]; }; struct user_element { struct snd_ctl_elem_info info; struct snd_card *card; char *elem_data; unsigned long elem_data_size; void *tlv_data; unsigned long tlv_data_size; void *priv_data; }; struct snd_ctl_event { int type; union { struct { unsigned int mask; struct snd_ctl_elem_id id; } elem; unsigned char data8[60]; } data; }; struct snd_ctl_tlv { unsigned int numid; unsigned int length; unsigned int tlv[0]; }; struct snd_fasync { struct fasync_struct *fasync; int signal; int poll; int on; struct list_head list; }; struct snd_pci_quirk { unsigned short subvendor; unsigned short subdevice; unsigned short subdevice_mask; int value; }; struct snd_info_private_data { struct snd_info_buffer *rbuffer; struct snd_info_buffer *wbuffer; struct snd_info_entry *entry; void *file_private_data; }; struct link_ctl_info { snd_ctl_elem_type_t type; int count; int min_val; int max_val; }; struct link_master; struct link_follower { struct list_head list; struct link_master *master; struct link_ctl_info info; int vals[2]; unsigned int flags; struct snd_kcontrol *kctl; struct snd_kcontrol follower; }; struct link_master { struct list_head followers; struct link_ctl_info info; int val; unsigned int tlv[4]; void (*hook)(void *, int); void *hook_private_data; }; enum snd_jack_types { SND_JACK_HEADPHONE = 1, SND_JACK_MICROPHONE = 2, SND_JACK_HEADSET = 3, SND_JACK_LINEOUT = 4, SND_JACK_MECHANICAL = 8, SND_JACK_VIDEOOUT = 16, SND_JACK_AVOUT = 20, SND_JACK_LINEIN = 32, SND_JACK_BTN_0 = 16384, SND_JACK_BTN_1 = 8192, SND_JACK_BTN_2 = 4096, SND_JACK_BTN_3 = 2048, SND_JACK_BTN_4 = 1024, SND_JACK_BTN_5 = 512, }; struct snd_jack; struct snd_jack_kctl { struct snd_kcontrol *kctl; struct list_head list; unsigned int mask_bits; struct snd_jack *jack; bool sw_inject_enable; }; struct snd_jack { struct list_head kctl_list; struct snd_card *card; const char *id; struct input_dev *input_dev; struct mutex input_dev_lock; int registered; int type; char name[100]; unsigned int key[6]; int hw_status_cache; void *private_data; void (*private_free)(struct snd_jack *); u64 android_kabi_reserved1; }; enum { SNDRV_HWDEP_IOCTL_DSP_LOAD32 = 1079003139, }; struct snd_hwdep; struct snd_hwdep_dsp_status; struct snd_hwdep_dsp_image; struct snd_hwdep_ops { long long (*llseek)(struct snd_hwdep *, struct file *, long long, int); long (*read)(struct snd_hwdep *, char __attribute__((btf_type_tag("user"))) *, long, loff_t *); long (*write)(struct snd_hwdep *, const char __attribute__((btf_type_tag("user"))) *, long, loff_t *); int (*open)(struct snd_hwdep *, struct file *); int (*release)(struct snd_hwdep *, struct file *); __poll_t (*poll)(struct snd_hwdep *, struct file *, poll_table *); int (*ioctl)(struct snd_hwdep *, struct file *, unsigned int, unsigned long); int (*ioctl_compat)(struct snd_hwdep *, struct file *, unsigned int, unsigned long); int (*mmap)(struct snd_hwdep *, struct file *, struct vm_area_struct *); int (*dsp_status)(struct snd_hwdep *, struct snd_hwdep_dsp_status *); int (*dsp_load)(struct snd_hwdep *, struct snd_hwdep_dsp_image *); u64 android_kabi_reserved1; }; struct snd_hwdep { struct snd_card *card; struct list_head list; int device; char id[32]; char name[80]; int iface; struct snd_hwdep_ops ops; wait_queue_head_t open_wait; void *private_data; void (*private_free)(struct snd_hwdep *); struct device *dev; struct mutex open_mutex; int used; unsigned int dsp_loaded; unsigned int exclusive: 1; u64 android_kabi_reserved1; }; struct snd_hwdep_dsp_status { unsigned int version; unsigned char id[32]; unsigned int num_dsps; unsigned int dsp_loaded; unsigned int chip_ready; unsigned char reserved[16]; }; struct snd_hwdep_dsp_image { unsigned int index; unsigned char name[64]; unsigned char __attribute__((btf_type_tag("user"))) *image; size_t length; unsigned long driver_data; }; struct snd_hwdep_info { unsigned int device; int card; unsigned char id[64]; unsigned char name[80]; int iface; unsigned char reserved[64]; }; struct snd_hwdep_dsp_image32 { u32 index; unsigned char name[64]; u32 image; u32 length; u32 driver_data; }; struct snd_timer_hardware { unsigned int flags; unsigned long resolution; unsigned long resolution_min; unsigned long resolution_max; unsigned long ticks; int (*open)(struct snd_timer *); int (*close)(struct snd_timer *); unsigned long (*c_resolution)(struct snd_timer *); int (*start)(struct snd_timer *); int (*stop)(struct snd_timer *); int (*set_period)(struct snd_timer *, unsigned long, unsigned long); int (*precise_resolution)(struct snd_timer *, unsigned long *, unsigned long *); u64 android_kabi_reserved1; }; struct snd_timer { int tmr_class; struct snd_card *card; struct module *module; int tmr_device; int tmr_subdevice; char id[64]; char name[80]; unsigned int flags; int running; unsigned long sticks; void *private_data; void (*private_free)(struct snd_timer *); struct snd_timer_hardware hw; spinlock_t lock; struct list_head device_list; struct list_head open_list_head; struct list_head active_list_head; struct list_head ack_list_head; struct list_head sack_list_head; struct work_struct task_work; int max_instances; int num_instances; u64 android_kabi_reserved1; }; enum { SNDRV_TIMER_CLASS_NONE = -1, SNDRV_TIMER_CLASS_SLAVE = 0, SNDRV_TIMER_CLASS_GLOBAL = 1, SNDRV_TIMER_CLASS_CARD = 2, SNDRV_TIMER_CLASS_PCM = 3, SNDRV_TIMER_CLASS_LAST = 3, }; enum { SNDRV_TIMER_SCLASS_NONE = 0, SNDRV_TIMER_SCLASS_APPLICATION = 1, SNDRV_TIMER_SCLASS_SEQUENCER = 2, SNDRV_TIMER_SCLASS_OSS_SEQUENCER = 3, SNDRV_TIMER_SCLASS_LAST = 3, }; enum { SNDRV_TIMER_EVENT_RESOLUTION = 0, SNDRV_TIMER_EVENT_TICK = 1, SNDRV_TIMER_EVENT_START = 2, SNDRV_TIMER_EVENT_STOP = 3, SNDRV_TIMER_EVENT_CONTINUE = 4, SNDRV_TIMER_EVENT_PAUSE = 5, SNDRV_TIMER_EVENT_EARLY = 6, SNDRV_TIMER_EVENT_SUSPEND = 7, SNDRV_TIMER_EVENT_RESUME = 8, SNDRV_TIMER_EVENT_MSTART = 12, SNDRV_TIMER_EVENT_MSTOP = 13, SNDRV_TIMER_EVENT_MCONTINUE = 14, SNDRV_TIMER_EVENT_MPAUSE = 15, SNDRV_TIMER_EVENT_MSUSPEND = 17, SNDRV_TIMER_EVENT_MRESUME = 18, }; enum timer_tread_format { TREAD_FORMAT_NONE = 0, TREAD_FORMAT_TIME64 = 1, TREAD_FORMAT_TIME32 = 2, }; enum { SNDRV_TIMER_IOCTL_GPARAMS32 = 1077695492, SNDRV_TIMER_IOCTL_INFO32 = 2162185233, SNDRV_TIMER_IOCTL_STATUS_COMPAT32 = 1079530516, SNDRV_TIMER_IOCTL_STATUS_COMPAT64 = 1080054804, }; struct snd_timer_instance { struct snd_timer *timer; char *owner; unsigned int flags; void *private_data; void (*private_free)(struct snd_timer_instance *); void (*callback)(struct snd_timer_instance *, unsigned long, unsigned long); void (*ccallback)(struct snd_timer_instance *, int, struct timespec64 *, unsigned long); void (*disconnect)(struct snd_timer_instance *); void *callback_data; unsigned long ticks; unsigned long cticks; unsigned long pticks; unsigned long resolution; unsigned long lost; int slave_class; unsigned int slave_id; struct list_head open_list; struct list_head active_list; struct list_head ack_list; struct list_head slave_list_head; struct list_head slave_active_head; struct snd_timer_instance *master; u64 android_kabi_reserved1; }; struct snd_timer_system_private { struct timer_list tlist; struct snd_timer *snd_timer; unsigned long last_expires; unsigned long last_jiffies; unsigned long correction; }; struct snd_timer_id { int dev_class; int dev_sclass; int card; int device; int subdevice; }; struct snd_timer_gparams32 { struct snd_timer_id tid; u32 period_num; u32 period_den; unsigned char reserved[32]; }; struct snd_timer_gparams { struct snd_timer_id tid; unsigned long period_num; unsigned long period_den; unsigned char reserved[32]; }; struct snd_timer_info32 { u32 flags; s32 card; unsigned char id[64]; unsigned char name[80]; u32 reserved0; u32 resolution; unsigned char reserved[64]; }; struct snd_timer_read; struct snd_timer_tread64; struct snd_timer_user { struct snd_timer_instance *timeri; int tread; unsigned long ticks; unsigned long overrun; int qhead; int qtail; int qused; int queue_size; bool disconnected; struct snd_timer_read *queue; struct snd_timer_tread64 *tqueue; spinlock_t qlock; unsigned long last_resolution; unsigned int filter; struct timespec64 tstamp; wait_queue_head_t qchange_sleep; struct snd_fasync *fasync; struct mutex ioctl_lock; }; struct snd_timer_read { unsigned int resolution; unsigned int ticks; }; struct snd_timer_tread64 { int event; u8 pad1[4]; s64 tstamp_sec; s64 tstamp_nsec; unsigned int val; u8 pad2[4]; }; struct snd_timer_select { struct snd_timer_id id; unsigned char reserved[32]; }; struct snd_timer_params { unsigned int flags; unsigned int ticks; unsigned int queue_size; unsigned int reserved0; unsigned int filter; unsigned char reserved[60]; }; struct snd_timer_ginfo { struct snd_timer_id tid; unsigned int flags; int card; unsigned char id[64]; unsigned char name[80]; unsigned long reserved0; unsigned long resolution; unsigned long resolution_min; unsigned long resolution_max; unsigned int clients; unsigned char reserved[32]; }; struct snd_timer_info { unsigned int flags; int card; unsigned char id[64]; unsigned char name[80]; unsigned long reserved0; unsigned long resolution; unsigned char reserved[64]; }; struct snd_timer_gstatus { struct snd_timer_id tid; unsigned long resolution; unsigned long resolution_num; unsigned long resolution_den; unsigned char reserved[32]; }; struct snd_timer_tread32 { int event; s32 tstamp_sec; s32 tstamp_nsec; unsigned int val; }; struct snd_timer_status32 { s32 tstamp_sec; s32 tstamp_nsec; unsigned int resolution; unsigned int lost; unsigned int overrun; unsigned int queue; unsigned char reserved[64]; }; struct snd_timer_status64 { s64 tstamp_sec; s64 tstamp_nsec; unsigned int resolution; unsigned int lost; unsigned int overrun; unsigned int queue; unsigned char reserved[64]; }; struct snd_hrtimer { struct snd_timer *timer; struct hrtimer hrt; bool in_callback; }; enum { SNDRV_PCM_CLASS_GENERIC = 0, SNDRV_PCM_CLASS_MULTI = 1, SNDRV_PCM_CLASS_MODEM = 2, SNDRV_PCM_CLASS_DIGITIZER = 3, SNDRV_PCM_CLASS_LAST = 3, }; struct snd_pcm_info { unsigned int device; unsigned int subdevice; int stream; int card; unsigned char id[64]; unsigned char name[80]; unsigned char subname[32]; int dev_class; int dev_subclass; unsigned int subdevices_count; unsigned int subdevices_avail; union snd_pcm_sync_id sync; unsigned char reserved[64]; }; struct snd_pcm_hw_constraint_list { const unsigned int *list; unsigned int count; unsigned int mask; }; struct action_ops { int (*pre_action)(struct snd_pcm_substream *, snd_pcm_state_t); int (*do_action)(struct snd_pcm_substream *, snd_pcm_state_t); void (*undo_action)(struct snd_pcm_substream *, snd_pcm_state_t); void (*post_action)(struct snd_pcm_substream *, snd_pcm_state_t); }; enum { SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT = 0, SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT = 1, SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK = 2, SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_ABSOLUTE = 3, SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_ESTIMATED = 4, SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED = 5, SNDRV_PCM_AUDIO_TSTAMP_TYPE_LAST = 5, }; enum { SNDRV_PCM_TSTAMP_NONE = 0, SNDRV_PCM_TSTAMP_ENABLE = 1, SNDRV_PCM_TSTAMP_LAST = 1, }; enum { SNDRV_PCM_TSTAMP_TYPE_GETTIMEOFDAY = 0, SNDRV_PCM_TSTAMP_TYPE_MONOTONIC = 1, SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW = 2, SNDRV_PCM_TSTAMP_TYPE_LAST = 2, }; enum snd_dma_sync_mode { SNDRV_DMA_SYNC_CPU = 0, SNDRV_DMA_SYNC_DEVICE = 1, }; enum { SNDRV_PCM_IOCTL_HW_REFINE32 = 3260825872, SNDRV_PCM_IOCTL_HW_PARAMS32 = 3260825873, SNDRV_PCM_IOCTL_SW_PARAMS32 = 3228057875, SNDRV_PCM_IOCTL_STATUS_COMPAT32 = 2154578208, SNDRV_PCM_IOCTL_STATUS_EXT_COMPAT32 = 3228320036, SNDRV_PCM_IOCTL_DELAY32 = 2147762465, SNDRV_PCM_IOCTL_CHANNEL_INFO32 = 2148548914, SNDRV_PCM_IOCTL_REWIND32 = 1074020678, SNDRV_PCM_IOCTL_FORWARD32 = 1074020681, SNDRV_PCM_IOCTL_WRITEI_FRAMES32 = 1074544976, SNDRV_PCM_IOCTL_READI_FRAMES32 = 2148286801, SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = 1074544978, SNDRV_PCM_IOCTL_READN_FRAMES32 = 2148286803, SNDRV_PCM_IOCTL_STATUS_COMPAT64 = 2155888928, SNDRV_PCM_IOCTL_STATUS_EXT_COMPAT64 = 3229630756, }; enum { SNDRV_PCM_MMAP_OFFSET_DATA = 0, SNDRV_PCM_MMAP_OFFSET_STATUS_OLD = 2147483648, SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD = 2164260864, SNDRV_PCM_MMAP_OFFSET_STATUS_NEW = 2181038080, SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW = 2197815296, SNDRV_PCM_MMAP_OFFSET_STATUS = 2181038080, SNDRV_PCM_MMAP_OFFSET_CONTROL = 2197815296, }; typedef char __pad_before_u32[0]; struct __snd_pcm_mmap_control64_buggy { __pad_before_u32 __pad1; __u32 appl_ptr; __pad_before_u32 __pad2; __pad_before_u32 __pad3; __u32 avail_min; __pad_after_uframe __pad4; }; typedef u64 u_int64_t; struct snd_pcm_file { struct snd_pcm_substream *substream; int no_compat_mmap; unsigned int user_pversion; }; struct snd_pcm_status64 { snd_pcm_state_t state; u8 rsvd[4]; s64 trigger_tstamp_sec; s64 trigger_tstamp_nsec; s64 tstamp_sec; s64 tstamp_nsec; snd_pcm_uframes_t appl_ptr; snd_pcm_uframes_t hw_ptr; snd_pcm_sframes_t delay; snd_pcm_uframes_t avail; snd_pcm_uframes_t avail_max; snd_pcm_uframes_t overrange; snd_pcm_state_t suspended_state; __u32 audio_tstamp_data; s64 audio_tstamp_sec; s64 audio_tstamp_nsec; s64 driver_tstamp_sec; s64 driver_tstamp_nsec; __u32 audio_tstamp_accuracy; unsigned char reserved[20]; }; struct snd_pcm_sw_params { int tstamp_mode; unsigned int period_step; unsigned int sleep_min; snd_pcm_uframes_t avail_min; snd_pcm_uframes_t xfer_align; snd_pcm_uframes_t start_threshold; snd_pcm_uframes_t stop_threshold; snd_pcm_uframes_t silence_threshold; snd_pcm_uframes_t silence_size; snd_pcm_uframes_t boundary; unsigned int proto; unsigned int tstamp_type; unsigned char reserved[56]; }; struct snd_pcm_status32 { snd_pcm_state_t state; s32 trigger_tstamp_sec; s32 trigger_tstamp_nsec; s32 tstamp_sec; s32 tstamp_nsec; u32 appl_ptr; u32 hw_ptr; s32 delay; u32 avail; u32 avail_max; u32 overrange; snd_pcm_state_t suspended_state; u32 audio_tstamp_data; s32 audio_tstamp_sec; s32 audio_tstamp_nsec; s32 driver_tstamp_sec; s32 driver_tstamp_nsec; u32 audio_tstamp_accuracy; unsigned char reserved[36]; }; struct snd_pcm_channel_info { unsigned int channel; __kernel_off_t offset; unsigned int first; unsigned int step; }; struct snd_pcm_mmap_status32 { snd_pcm_state_t state; s32 pad1; u32 hw_ptr; s32 tstamp_sec; s32 tstamp_nsec; snd_pcm_state_t suspended_state; s32 audio_tstamp_sec; s32 audio_tstamp_nsec; }; struct snd_pcm_mmap_control32 { u32 appl_ptr; u32 avail_min; }; struct snd_pcm_sync_ptr32 { u32 flags; union { struct snd_pcm_mmap_status32 status; unsigned char reserved[64]; } s; union { struct snd_pcm_mmap_control32 control; unsigned char reserved[64]; } c; }; struct snd_pcm_sync_ptr { __u32 flags; __u32 pad1; union { struct snd_pcm_mmap_status status; unsigned char reserved[64]; } s; union { struct snd_pcm_mmap_control control; unsigned char reserved[64]; } c; }; struct snd_xferi { snd_pcm_sframes_t result; void __attribute__((btf_type_tag("user"))) *buf; snd_pcm_uframes_t frames; }; struct snd_xfern { snd_pcm_sframes_t result; void __attribute__((btf_type_tag("user"))) * __attribute__((btf_type_tag("user"))) *bufs; snd_pcm_uframes_t frames; }; struct snd_pcm_hw_params32 { u32 flags; struct snd_mask masks[3]; struct snd_mask mres[5]; struct snd_interval intervals[12]; struct snd_interval ires[9]; u32 rmask; u32 cmask; u32 info; u32 msbits; u32 rate_num; u32 rate_den; u32 fifo_size; unsigned char reserved[64]; }; struct snd_pcm_sw_params32 { s32 tstamp_mode; u32 period_step; u32 sleep_min; u32 avail_min; u32 xfer_align; u32 start_threshold; u32 stop_threshold; u32 silence_threshold; u32 silence_size; u32 boundary; u32 proto; u32 tstamp_type; unsigned char reserved[56]; }; struct snd_pcm_channel_info32 { u32 channel; u32 offset; u32 first; u32 step; }; struct snd_xferi32 { s32 result; u32 buf; u32 frames; }; struct snd_xfern32 { s32 result; u32 bufs; u32 frames; }; struct compat_snd_pcm_status64 { snd_pcm_state_t state; u8 rsvd[4]; s64 trigger_tstamp_sec; s64 trigger_tstamp_nsec; s64 tstamp_sec; s64 tstamp_nsec; u32 appl_ptr; u32 hw_ptr; s32 delay; u32 avail; u32 avail_max; u32 overrange; snd_pcm_state_t suspended_state; u32 audio_tstamp_data; s64 audio_tstamp_sec; s64 audio_tstamp_nsec; s64 driver_tstamp_sec; s64 driver_tstamp_nsec; u32 audio_tstamp_accuracy; unsigned char reserved[20]; }; struct snd_pcm_chmap_elem { unsigned char channels; unsigned char map[15]; }; enum { SNDRV_CHMAP_UNKNOWN = 0, SNDRV_CHMAP_NA = 1, SNDRV_CHMAP_MONO = 2, SNDRV_CHMAP_FL = 3, SNDRV_CHMAP_FR = 4, SNDRV_CHMAP_RL = 5, SNDRV_CHMAP_RR = 6, SNDRV_CHMAP_FC = 7, SNDRV_CHMAP_LFE = 8, SNDRV_CHMAP_SL = 9, SNDRV_CHMAP_SR = 10, SNDRV_CHMAP_RC = 11, SNDRV_CHMAP_FLC = 12, SNDRV_CHMAP_FRC = 13, SNDRV_CHMAP_RLC = 14, SNDRV_CHMAP_RRC = 15, SNDRV_CHMAP_FLW = 16, SNDRV_CHMAP_FRW = 17, SNDRV_CHMAP_FLH = 18, SNDRV_CHMAP_FCH = 19, SNDRV_CHMAP_FRH = 20, SNDRV_CHMAP_TC = 21, SNDRV_CHMAP_TFL = 22, SNDRV_CHMAP_TFR = 23, SNDRV_CHMAP_TFC = 24, SNDRV_CHMAP_TRL = 25, SNDRV_CHMAP_TRR = 26, SNDRV_CHMAP_TRC = 27, SNDRV_CHMAP_TFLC = 28, SNDRV_CHMAP_TFRC = 29, SNDRV_CHMAP_TSL = 30, SNDRV_CHMAP_TSR = 31, SNDRV_CHMAP_LLFE = 32, SNDRV_CHMAP_RLFE = 33, SNDRV_CHMAP_BC = 34, SNDRV_CHMAP_BLC = 35, SNDRV_CHMAP_BRC = 36, SNDRV_CHMAP_LAST = 36, }; typedef int (*pcm_transfer_f)(struct snd_pcm_substream *, int, unsigned long, struct iov_iter *, unsigned long); struct snd_ratden { unsigned int num_min; unsigned int num_max; unsigned int num_step; unsigned int den; }; struct snd_pcm_chmap { struct snd_pcm *pcm; int stream; struct snd_kcontrol *kctl; const struct snd_pcm_chmap_elem *chmap; unsigned int max_channels; unsigned int channel_mask; void *private_data; }; struct snd_ratnum { unsigned int num; unsigned int den_min; unsigned int den_max; unsigned int den_step; }; struct snd_pcm_hw_constraint_ranges { unsigned int count; const struct snd_interval *ranges; unsigned int mask; }; struct snd_pcm_hw_constraint_ratnums { int nrats; const struct snd_ratnum *rats; }; struct snd_pcm_hw_constraint_ratdens { int nrats; const struct snd_ratden *rats; }; typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *, snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f, bool); struct pcm_format_data { unsigned char width; unsigned char phys; signed char le; signed char signd; unsigned char silence[8]; }; struct snd_malloc_ops { void * (*alloc)(struct snd_dma_buffer *, size_t); void (*free)(struct snd_dma_buffer *); dma_addr_t (*get_addr)(struct snd_dma_buffer *, size_t); struct page * (*get_page)(struct snd_dma_buffer *, size_t); unsigned int (*get_chunk_size)(struct snd_dma_buffer *, unsigned int, unsigned int); int (*mmap)(struct snd_dma_buffer *, struct vm_area_struct *); void (*sync)(struct snd_dma_buffer *, enum snd_dma_sync_mode); }; struct dmaengine_pcm_runtime_data { struct dma_chan *dma_chan; dma_cookie_t cookie; unsigned int pos; }; struct snd_dmaengine_dai_dma_data { dma_addr_t addr; enum dma_slave_buswidth addr_width; u32 maxburst; void *filter_data; const char *chan_name; unsigned int fifo_size; unsigned int flags; void *peripheral_config; size_t peripheral_size; }; enum { SNDRV_RAWMIDI_IOCTL_PARAMS32 = 3223344912, SNDRV_RAWMIDI_IOCTL_STATUS_COMPAT32 = 3223607072, SNDRV_RAWMIDI_IOCTL_STATUS_COMPAT64 = 3224393504, }; struct snd_rawmidi_framing_tstamp { __u8 frame_type; __u8 length; __u8 reserved[2]; __u32 tv_nsec; __u64 tv_sec; __u8 data[16]; }; struct snd_rawmidi_info { unsigned int device; unsigned int subdevice; int stream; int card; unsigned int flags; unsigned char id[64]; unsigned char name[80]; unsigned char subname[32]; unsigned int subdevices_count; unsigned int subdevices_avail; unsigned char reserved[64]; }; struct snd_rawmidi_file { struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *input; struct snd_rawmidi_substream *output; unsigned int user_pversion; }; struct snd_rawmidi_status64 { int stream; u8 rsvd[4]; s64 tstamp_sec; s64 tstamp_nsec; size_t avail; size_t xruns; unsigned char reserved[16]; }; struct snd_rawmidi_params32 { s32 stream; u32 buffer_size; u32 avail_min; unsigned int no_active_sensing; unsigned int mode; unsigned char reserved[12]; }; struct snd_rawmidi_params { int stream; size_t buffer_size; size_t avail_min; unsigned int no_active_sensing: 1; unsigned int mode; unsigned char reserved[12]; }; struct compat_snd_rawmidi_status64 { s32 stream; u8 rsvd[4]; s64 tstamp_sec; s64 tstamp_nsec; u32 avail; u32 xruns; unsigned char reserved[16]; }; struct snd_rawmidi_status32 { s32 stream; s32 tstamp_sec; s32 tstamp_nsec; u32 avail; u32 xruns; unsigned char reserved[16]; }; enum snd_compr_direction { SND_COMPRESS_PLAYBACK = 0, SND_COMPRESS_CAPTURE = 1, }; struct snd_compr_tstamp { __u32 byte_offset; __u32 copied_total; __u32 pcm_frames; __u32 pcm_io_frames; __u32 sampling_rate; }; struct snd_compr_ops; struct snd_compr_runtime; struct snd_compr; struct snd_compr_stream { const char *name; struct snd_compr_ops *ops; struct snd_compr_runtime *runtime; struct snd_compr *device; struct delayed_work error_work; enum snd_compr_direction direction; bool metadata_set; bool next_track; bool partial_drain; bool pause_in_draining; void *private_data; struct snd_dma_buffer dma_buffer; u64 android_kabi_reserved1; }; struct snd_compr_params; struct snd_codec; struct snd_compr_metadata; struct snd_compr_caps; struct snd_compr_codec_caps; struct snd_compr_ops { int (*open)(struct snd_compr_stream *); int (*free)(struct snd_compr_stream *); int (*set_params)(struct snd_compr_stream *, struct snd_compr_params *); int (*get_params)(struct snd_compr_stream *, struct snd_codec *); int (*set_metadata)(struct snd_compr_stream *, struct snd_compr_metadata *); int (*get_metadata)(struct snd_compr_stream *, struct snd_compr_metadata *); int (*trigger)(struct snd_compr_stream *, int); int (*pointer)(struct snd_compr_stream *, struct snd_compr_tstamp *); int (*copy)(struct snd_compr_stream *, char __attribute__((btf_type_tag("user"))) *, size_t); int (*mmap)(struct snd_compr_stream *, struct vm_area_struct *); int (*ack)(struct snd_compr_stream *, size_t); int (*get_caps)(struct snd_compr_stream *, struct snd_compr_caps *); int (*get_codec_caps)(struct snd_compr_stream *, struct snd_compr_codec_caps *); u64 android_kabi_reserved1; }; struct snd_compressed_buffer { __u32 fragment_size; __u32 fragments; }; struct snd_enc_wma { __u32 super_block_align; }; struct snd_enc_vorbis { __s32 quality; __u32 managed; __u32 max_bit_rate; __u32 min_bit_rate; __u32 downmix; }; struct snd_enc_real { __u32 quant_bits; __u32 start_region; __u32 num_regions; }; struct snd_enc_flac { __u32 num; __u32 gain; }; struct snd_enc_generic { __u32 bw; __s32 reserved[15]; }; struct snd_dec_flac { __u16 sample_size; __u16 min_blk_size; __u16 max_blk_size; __u16 min_frame_size; __u16 max_frame_size; __u16 reserved; }; struct snd_dec_wma { __u32 encoder_option; __u32 adv_encoder_option; __u32 adv_encoder_option2; __u32 reserved; }; struct snd_dec_alac { __u32 frame_length; __u8 compatible_version; __u8 pb; __u8 mb; __u8 kb; __u32 max_run; __u32 max_frame_bytes; }; struct snd_dec_ape { __u16 compatible_version; __u16 compression_level; __u32 format_flags; __u32 blocks_per_frame; __u32 final_frame_blocks; __u32 total_frames; __u32 seek_table_present; }; union snd_codec_options { struct snd_enc_wma wma; struct snd_enc_vorbis vorbis; struct snd_enc_real real; struct snd_enc_flac flac; struct snd_enc_generic generic; struct snd_dec_flac flac_d; struct snd_dec_wma wma_d; struct snd_dec_alac alac_d; struct snd_dec_ape ape_d; }; struct snd_codec { __u32 id; __u32 ch_in; __u32 ch_out; __u32 sample_rate; __u32 bit_rate; __u32 rate_control; __u32 profile; __u32 level; __u32 ch_mode; __u32 format; __u32 align; union snd_codec_options options; __u32 reserved[3]; }; struct snd_compr_params { struct snd_compressed_buffer buffer; struct snd_codec codec; __u8 no_wake_mode; }; struct snd_compr_metadata { __u32 key; __u32 value[8]; }; struct snd_compr_caps { __u32 num_codecs; __u32 direction; __u32 min_fragment_size; __u32 max_fragment_size; __u32 min_fragments; __u32 max_fragments; __u32 codecs[32]; __u32 reserved[11]; }; struct snd_codec_desc { __u32 max_ch; __u32 sample_rates[32]; __u32 num_sample_rates; __u32 bit_rate[32]; __u32 num_bitrates; __u32 rate_control; __u32 profiles; __u32 modes; __u32 formats; __u32 min_buffer; __u32 reserved[15]; }; struct snd_compr_codec_caps { __u32 codec; __u32 num_descriptors; struct snd_codec_desc descriptor[32]; }; struct snd_compr_runtime { snd_pcm_state_t state; struct snd_compr_ops *ops; void *buffer; u64 buffer_size; u32 fragment_size; u32 fragments; u64 total_bytes_available; u64 total_bytes_transferred; wait_queue_head_t sleep; void *private_data; unsigned char *dma_area; dma_addr_t dma_addr; size_t dma_bytes; struct snd_dma_buffer *dma_buffer_p; u64 android_kabi_reserved1; }; struct snd_compr { const char *name; struct device *dev; struct snd_compr_ops *ops; void *private_data; struct snd_card *card; unsigned int direction; struct mutex lock; int device; bool use_pause_in_draining; u64 android_kabi_reserved1; }; struct snd_compr_avail { __u64 avail; struct snd_compr_tstamp tstamp; } __attribute__((packed)); struct snd_compr_file { unsigned long caps; struct snd_compr_stream stream; }; struct snd_usb_audio; struct snd_usb_platform_ops { void (*connect_cb)(struct snd_usb_audio *); void (*disconnect_cb)(struct snd_usb_audio *); void (*suspend_cb)(struct usb_interface *, pm_message_t); void (*resume_cb)(struct usb_interface *); }; struct snd_usb_audio { int index; struct usb_device *dev; struct snd_card *card; struct usb_interface *intf[16]; u32 usb_id; uint16_t quirk_type; struct mutex mutex; unsigned int system_suspend; atomic_t active; atomic_t shutdown; atomic_t usage_count; wait_queue_head_t shutdown_wait; unsigned int quirk_flags; unsigned int need_delayed_register: 1; int num_interfaces; int last_iface; int num_suspended_intf; int sample_rate_read_error; int badd_profile; struct list_head pcm_list; struct list_head ep_list; struct list_head iface_ref_list; struct list_head clock_ref_list; int pcm_devs; unsigned int num_rawmidis; struct list_head midi_list; struct list_head midi_v2_list; struct list_head mixer_list; int setup; bool generic_implicit_fb; bool autoclock; bool lowlatency; struct usb_host_interface *ctrl_intf; struct media_device *media_dev; struct media_intf_devnode *ctl_intf_media_devnode; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct usb_audio_device_name { u32 id; const char *vendor_name; const char *product_name; const char *profile_name; }; struct snd_usb_audio_quirk { const char *vendor_name; const char *product_name; int16_t ifnum; uint16_t type; const void *data; }; struct snd_usb_stream; struct audioformat; struct snd_usb_power_domain; struct snd_usb_endpoint; struct media_ctl; struct snd_usb_substream { struct snd_usb_stream *stream; struct usb_device *dev; struct snd_pcm_substream *pcm_substream; int direction; int endpoint; const struct audioformat *cur_audiofmt; struct snd_usb_power_domain *str_pd; unsigned int channels_max; unsigned int txfr_quirk: 1; unsigned int tx_length_quirk: 1; unsigned int fmt_type; unsigned int pkt_offset_adj; unsigned int stream_offset_adj; unsigned int running: 1; unsigned int period_elapsed_pending; unsigned int buffer_bytes; unsigned int inflight_bytes; unsigned int hwptr_done; unsigned int transfer_done; unsigned int frame_limit; unsigned int ep_num; struct snd_usb_endpoint *data_endpoint; struct snd_usb_endpoint *sync_endpoint; unsigned long flags; unsigned int speed; u64 formats; unsigned int num_formats; struct list_head fmt_list; spinlock_t lock; unsigned int last_frame_number; struct { int marker; int channel; int byte_idx; } dsd_dop; bool trigger_tstamp_pending_update; bool lowlatency_playback; struct media_ctl *media_ctl; u64 android_kabi_reserved1; }; struct snd_usb_stream { struct snd_usb_audio *chip; struct snd_pcm *pcm; int pcm_index; unsigned int fmt_type; struct snd_usb_substream substream[2]; struct list_head list; }; struct audioformat { struct list_head list; u64 formats; unsigned int channels; unsigned int fmt_type; unsigned int fmt_bits; unsigned int frame_size; unsigned char iface; unsigned char altsetting; unsigned char ep_idx; unsigned char altset_idx; unsigned char attributes; unsigned char endpoint; unsigned char ep_attr; bool implicit_fb; unsigned char sync_ep; unsigned char sync_iface; unsigned char sync_altsetting; unsigned char sync_ep_idx; unsigned char datainterval; unsigned char protocol; unsigned int maxpacksize; unsigned int rates; unsigned int rate_min; unsigned int rate_max; unsigned int nr_rates; unsigned int *rate_table; unsigned char clock; struct snd_pcm_chmap_elem *chmap; bool dsd_dop; bool dsd_bitrev; bool dsd_raw; }; struct snd_usb_power_domain { int pd_id; int pd_d1d0_rec; int pd_d2d0_rec; }; struct snd_urb_ctx { struct urb *urb; unsigned int buffer_size; struct snd_usb_substream *subs; struct snd_usb_endpoint *ep; int index; int packets; int queued; int packet_size[48]; struct list_head ready_list; }; struct snd_usb_packet_info { uint32_t packet_size[48]; int packets; }; struct snd_usb_iface_ref; struct snd_usb_clock_ref; struct snd_usb_endpoint { struct snd_usb_audio *chip; struct snd_usb_iface_ref *iface_ref; struct snd_usb_clock_ref *clock_ref; int opened; atomic_t running; int ep_num; int type; unsigned char iface; unsigned char altsetting; unsigned char ep_idx; atomic_t state; int (*prepare_data_urb)(struct snd_usb_substream *, struct urb *, bool); void (*retire_data_urb)(struct snd_usb_substream *, struct urb *); struct snd_usb_substream *data_subs; struct snd_usb_endpoint *sync_source; struct snd_usb_endpoint *sync_sink; struct snd_urb_ctx urb[12]; struct snd_usb_packet_info next_packet[12]; unsigned int next_packet_head; unsigned int next_packet_queued; struct list_head ready_playback_urbs; unsigned int nurbs; unsigned long active_mask; unsigned long unlink_mask; atomic_t submitted_urbs; char *syncbuf; dma_addr_t sync_dma; unsigned int pipe; unsigned int packsize[2]; unsigned int sample_rem; unsigned int sample_accum; unsigned int pps; unsigned int freqn; unsigned int freqm; int freqshift; unsigned int freqmax; unsigned int phase; unsigned int maxpacksize; unsigned int maxframesize; unsigned int max_urb_frames; unsigned int curpacksize; unsigned int curframesize; unsigned int syncmaxsize; unsigned int fill_max: 1; unsigned int tenor_fb_quirk: 1; unsigned int datainterval; unsigned int syncinterval; unsigned char silence_value; unsigned int stride; int skip_packets; bool implicit_fb_sync; bool lowlatency_playback; bool need_setup; bool need_prepare; bool fixed_rate; const struct audioformat *cur_audiofmt; unsigned int cur_rate; snd_pcm_format_t cur_format; unsigned int cur_channels; unsigned int cur_frame_bytes; unsigned int cur_period_frames; unsigned int cur_period_bytes; unsigned int cur_buffer_periods; spinlock_t lock; struct list_head list; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct media_ctl { struct media_device *media_dev; struct media_entity media_entity; struct media_intf_devnode *intf_devnode; struct media_link *intf_link; struct media_pad media_pad; struct media_pipeline media_pipe; }; struct usb_mixer_elem_list; struct usbmix_connector_map; struct rc_config; struct media_mixer_ctl; struct usb_mixer_interface { struct snd_usb_audio *chip; struct usb_host_interface *hostif; struct list_head list; unsigned int ignore_ctl_error; struct urb *urb; struct usb_mixer_elem_list **id_elems; int protocol; const struct usbmix_connector_map *connector_map; const struct rc_config *rc_cfg; u32 rc_code; wait_queue_head_t rc_waitq; struct urb *rc_urb; struct usb_ctrlrequest *rc_setup_packet; u8 rc_buffer[6]; struct media_mixer_ctl *media_mixer_ctl; bool disconnected; void *private_data; void (*private_free)(struct usb_mixer_interface *); void (*private_suspend)(struct usb_mixer_interface *); }; typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *, struct usb_mixer_elem_list *); typedef int (*usb_mixer_elem_resume_func_t)(struct usb_mixer_elem_list *); struct usb_mixer_elem_list { struct usb_mixer_interface *mixer; struct usb_mixer_elem_list *next_id_elem; struct snd_kcontrol *kctl; unsigned int id; bool is_std_info; usb_mixer_elem_dump_func_t dump; usb_mixer_elem_resume_func_t resume; }; struct usbmix_connector_map { u8 id; u8 delegated_id; u8 control; u8 channel; }; struct rc_config { u32 usb_id; u8 offset; u8 length; u8 packet_length; u8 min_packet_length; u8 mute_mixer_id; u32 mute_code; }; struct media_mixer_ctl { struct media_device *media_dev; struct media_entity media_entity; struct media_intf_devnode *intf_devnode; struct media_link *intf_link; struct media_pad media_pad[3]; struct media_pipeline media_pipe; }; struct uac1_ac_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __le16 bcdADC; __le16 wTotalLength; __u8 bInCollection; __u8 baInterfaceNr[0]; } __attribute__((packed)); struct uac3_clock_source_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bmAttributes; __le32 bmControls; __u8 bReferenceTerminal; __le16 wClockSourceStr; } __attribute__((packed)); union uac23_clock_source_desc { struct uac_clock_source_descriptor v2; struct uac3_clock_source_descriptor v3; }; struct uac_clock_selector_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bNrInPins; __u8 baCSourceID[0]; }; struct uac3_clock_selector_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bNrInPins; __u8 baCSourceID[0]; }; union uac23_clock_selector_desc { struct uac_clock_selector_descriptor v2; struct uac3_clock_selector_descriptor v3; }; struct uac_clock_multiplier_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bCSourceID; __u8 bmControls; __u8 iClockMultiplier; }; union uac23_clock_multiplier_desc { struct uac_clock_multiplier_descriptor v2; struct uac_clock_multiplier_descriptor v3; }; enum { EP_STATE_STOPPED = 0, EP_STATE_RUNNING = 1, EP_STATE_STOPPING = 2, }; struct snd_usb_iface_ref { unsigned char iface; bool need_setup; int opened; int altset; struct list_head list; }; struct snd_usb_clock_ref { unsigned char clock; atomic_t locked; int opened; int rate; bool need_setup; struct list_head list; }; struct uac_format_type_ii_ext_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __le16 wMaxBitRate; __le16 wSamplesPerFrame; __u8 bHeaderLength; __u8 bSideBandProtocol; }; struct uac_format_type_ii_discrete_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __le16 wMaxBitRate; __le16 wSamplesPerFrame; __u8 bSamFreqType; __u8 tSamFreq[0]; } __attribute__((packed)); struct uac3_as_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalLink; __le32 bmControls; __le16 wClusterDescrID; __le64 bmFormats; __u8 bSubslotSize; __u8 bBitResolution; __le16 bmAuxProtocols; __u8 bControlSize; } __attribute__((packed)); struct uac_format_type_i_ext_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __u8 bSubslotSize; __u8 bBitResolution; __u8 bHeaderLength; __u8 bControlSize; __u8 bSideBandProtocol; }; struct uac_format_type_i_discrete_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __u8 bNrChannels; __u8 bSubframeSize; __u8 bBitResolution; __u8 bSamFreqType; __u8 tSamFreq[0]; }; struct uac_format_type_i_continuous_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __u8 bNrChannels; __u8 bSubframeSize; __u8 bBitResolution; __u8 bSamFreqType; __u8 tLowerSamFreq[3]; __u8 tUpperSamFreq[3]; }; struct snd_usb_implicit_fb_match { unsigned int id; unsigned int iface_class; unsigned int ep_num; unsigned int iface; int type; }; enum { IMPLICIT_FB_NONE = 0, IMPLICIT_FB_GENERIC = 1, IMPLICIT_FB_FIXED = 2, IMPLICIT_FB_BOTH = 3, }; struct usbmix_name_map; struct usbmix_selector_map; struct usbmix_ctl_map { u32 id; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; const struct usbmix_connector_map *connector_map; }; struct usbmix_dB_map; struct usbmix_name_map { int id; const char *name; int control; const struct usbmix_dB_map *dB; }; struct usbmix_dB_map { int min; int max; bool min_mute; }; struct usbmix_selector_map { int id; int count; const char **names; }; struct uac3_badd_profile { int subclass; const char *name; int c_chmask; int p_chmask; int st_chmask; }; struct iterm_name_combo { int type; char *name; }; struct usb_feature_control_info { int control; const char *name; int type; int type_uac2; }; struct procunit_value_info { int control; const char *suffix; int val_type; int min_value; }; struct procunit_info { int type; char *name; const struct procunit_value_info *values; }; enum { USB_MIXER_BOOLEAN = 0, USB_MIXER_INV_BOOLEAN = 1, USB_MIXER_S8 = 2, USB_MIXER_U8 = 3, USB_MIXER_S16 = 4, USB_MIXER_U16 = 5, USB_MIXER_S32 = 6, USB_MIXER_U32 = 7, USB_MIXER_BESPOKEN = 8, }; enum { USB_XU_CLOCK_RATE = 58113, USB_XU_CLOCK_SOURCE = 58114, USB_XU_DIGITAL_IO_STATUS = 58115, USB_XU_DEVICE_OPTIONS = 58116, USB_XU_DIRECT_MONITORING = 58117, USB_XU_METERING = 58118, }; struct usb_mixer_elem_info { struct usb_mixer_elem_list head; unsigned int control; unsigned int cmask; unsigned int idx_off; unsigned int ch_readonly; unsigned int master_readonly; int channels; int val_type; int min; int max; int res; int dBmin; int dBmax; int cached; int cache_val[16]; u8 initialized; u8 min_mute; void *private_data; }; struct usb_audio_term { int id; int type; int channels; unsigned int chconfig; int name; }; struct mixer_build { struct snd_usb_audio *chip; struct usb_mixer_interface *mixer; unsigned char *buffer; unsigned int buflen; unsigned long unitbitmap[4]; unsigned long termbitmap[4]; struct usb_audio_term oterm; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; }; struct uac3_output_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bSourceID; __u8 bCSourceID; __le32 bmControls; __le16 wExTerminalDescrID; __le16 wConnectorsDescrID; __le16 wTerminalDescrStr; } __attribute__((packed)); struct uac1_output_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bSourceID; __u8 iTerminal; } __attribute__((packed)); struct uac_feature_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bSourceID; __u8 bControlSize; __u8 bmaControls[0]; }; struct uac3_input_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bCSourceID; __le32 bmControls; __le16 wClusterDescrID; __le16 wExTerminalDescrID; __le16 wConnectorsDescrID; __le16 wTerminalDescrStr; }; struct uac_selector_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUintID; __u8 bNrInPins; __u8 baSourceID[0]; }; struct uac3_feature_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bSourceID; __u8 bmaControls[0]; }; struct uac_mixer_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bNrInPins; __u8 baSourceID[0]; }; struct uac_processing_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __le16 wProcessType; __u8 bNrInPins; __u8 baSourceID[0]; } __attribute__((packed)); struct uac2_effect_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __le16 wEffectType; __u8 bSourceID; __u8 bmaControls[0]; } __attribute__((packed)); struct uac_input_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bNrChannels; __le16 wChannelConfig; __u8 iChannelNames; __u8 iTerminal; }; struct uac3_cluster_header_descriptor { __le16 wLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __le16 wDescriptorID; __u8 bNrChannels; } __attribute__((packed)); struct uac2_connectors_ctl_blk { __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; } __attribute__((packed)); struct uac3_insertion_ctl_blk { __u8 bSize; __u8 bmConInserted; }; struct uac1_status_word { __u8 bStatusType; __u8 bOriginator; }; struct sb_jack { int unitid; const char *name; }; struct std_mono_table { unsigned int unitid; unsigned int control; unsigned int cmask; int val_type; const char *name; snd_kcontrol_tlv_rw_t *tlv_callback; }; struct snd_djm_ctl; struct snd_djm_device { const char *name; const struct snd_djm_ctl *controls; size_t ncontrols; }; struct snd_djm_ctl { const char *name; const u16 *options; size_t noptions; u16 default_value; u16 wIndex; }; enum { SNDRV_HWDEP_IFACE_OPL2 = 0, SNDRV_HWDEP_IFACE_OPL3 = 1, SNDRV_HWDEP_IFACE_OPL4 = 2, SNDRV_HWDEP_IFACE_SB16CSP = 3, SNDRV_HWDEP_IFACE_EMU10K1 = 4, SNDRV_HWDEP_IFACE_YSS225 = 5, SNDRV_HWDEP_IFACE_ICS2115 = 6, SNDRV_HWDEP_IFACE_SSCAPE = 7, SNDRV_HWDEP_IFACE_VX = 8, SNDRV_HWDEP_IFACE_MIXART = 9, SNDRV_HWDEP_IFACE_USX2Y = 10, SNDRV_HWDEP_IFACE_EMUX_WAVETABLE = 11, SNDRV_HWDEP_IFACE_BLUETOOTH = 12, SNDRV_HWDEP_IFACE_USX2Y_PCM = 13, SNDRV_HWDEP_IFACE_PCXHR = 14, SNDRV_HWDEP_IFACE_SB_RC = 15, SNDRV_HWDEP_IFACE_HDA = 16, SNDRV_HWDEP_IFACE_USB_STREAM = 17, SNDRV_HWDEP_IFACE_FW_DICE = 18, SNDRV_HWDEP_IFACE_FW_FIREWORKS = 19, SNDRV_HWDEP_IFACE_FW_BEBOB = 20, SNDRV_HWDEP_IFACE_FW_OXFW = 21, SNDRV_HWDEP_IFACE_FW_DIGI00X = 22, SNDRV_HWDEP_IFACE_FW_TASCAM = 23, SNDRV_HWDEP_IFACE_LINE6 = 24, SNDRV_HWDEP_IFACE_FW_MOTU = 25, SNDRV_HWDEP_IFACE_FW_FIREFACE = 26, SNDRV_HWDEP_IFACE_LAST = 26, }; enum snd_rme_domain { SND_RME_DOMAIN_SYSTEM = 0, SND_RME_DOMAIN_AES = 1, SND_RME_DOMAIN_SPDIF = 2, }; enum snd_rme_clock_status { SND_RME_CLOCK_NOLOCK = 0, SND_RME_CLOCK_LOCK = 1, SND_RME_CLOCK_SYNC = 2, }; enum { SND_BBFPRO_CTL_REG1 = 0, SND_BBFPRO_CTL_REG2 = 1, }; struct scarlett_mixer_elem_enum_info { int start; int len; int offsets[5]; const char * const *names; }; struct scarlett_mixer_control { unsigned char num; unsigned char type; const char *name; }; struct scarlett_device_info { int matrix_in; int matrix_out; int input_len; int output_len; struct scarlett_mixer_elem_enum_info opt_master; struct scarlett_mixer_elem_enum_info opt_matrix; int matrix_mux_init[18]; int num_controls; const struct scarlett_mixer_control controls[10]; }; enum { SCARLETT_OUTPUTS = 0, SCARLETT_SWITCH_IMPEDANCE = 1, SCARLETT_SWITCH_PAD = 2, SCARLETT_SWITCH_GAIN = 3, }; enum { SCARLETT_OFFSET_PCM = 0, SCARLETT_OFFSET_ANALOG = 1, SCARLETT_OFFSET_SPDIF = 2, SCARLETT_OFFSET_ADAT = 3, SCARLETT_OFFSET_MIX = 4, }; struct scarlett2_device_info; struct scarlett2_device_entry { const u32 usb_id; const struct scarlett2_device_info *info; const char *series_name; }; struct scarlett2_mux_entry { u8 port_type; u8 start; u8 count; }; struct scarlett2_device_info { u8 has_msd_mode; u8 config_set; u8 line_out_hw_vol; u8 has_speaker_switching; u8 has_talkback; u8 level_input_count; u8 level_input_first; u8 pad_input_count; u8 air_input_count; u8 phantom_count; u8 inputs_per_phantom; u8 direct_monitor; u8 line_out_remap_enable; u8 line_out_remap[10]; const char * const line_out_descrs[10]; const int port_count[12]; struct scarlett2_mux_entry mux_assignment[30]; }; struct scarlett2_config { u8 offset; u8 size; u8 activate; }; struct scarlett2_port { u16 id; const char * const src_descr; int src_num_offset; const char * const dst_descr; }; enum { SCARLETT2_PORT_TYPE_NONE = 0, SCARLETT2_PORT_TYPE_ANALOGUE = 1, SCARLETT2_PORT_TYPE_SPDIF = 2, SCARLETT2_PORT_TYPE_ADAT = 3, SCARLETT2_PORT_TYPE_MIX = 4, SCARLETT2_PORT_TYPE_PCM = 5, SCARLETT2_PORT_TYPE_COUNT = 6, }; enum { SCARLETT2_PORT_IN = 0, SCARLETT2_PORT_OUT = 1, SCARLETT2_PORT_DIRNS = 2, }; enum { SCARLETT2_CONFIG_DIM_MUTE = 0, SCARLETT2_CONFIG_LINE_OUT_VOLUME = 1, SCARLETT2_CONFIG_MUTE_SWITCH = 2, SCARLETT2_CONFIG_SW_HW_SWITCH = 3, SCARLETT2_CONFIG_LEVEL_SWITCH = 4, SCARLETT2_CONFIG_PAD_SWITCH = 5, SCARLETT2_CONFIG_MSD_SWITCH = 6, SCARLETT2_CONFIG_AIR_SWITCH = 7, SCARLETT2_CONFIG_STANDALONE_SWITCH = 8, SCARLETT2_CONFIG_PHANTOM_SWITCH = 9, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE = 10, SCARLETT2_CONFIG_DIRECT_MONITOR = 11, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH = 12, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE = 13, SCARLETT2_CONFIG_TALKBACK_MAP = 14, SCARLETT2_CONFIG_COUNT = 15, }; enum { SCARLETT2_CONFIG_SET_NO_MIXER = 0, SCARLETT2_CONFIG_SET_GEN_2 = 1, SCARLETT2_CONFIG_SET_GEN_3 = 2, SCARLETT2_CONFIG_SET_CLARETT = 3, SCARLETT2_CONFIG_SET_COUNT = 4, }; enum { SCARLETT2_BUTTON_MUTE = 0, SCARLETT2_BUTTON_DIM = 1, SCARLETT2_DIM_MUTE_COUNT = 2, }; struct scarlett2_data { struct usb_mixer_interface *mixer; struct mutex usb_mutex; struct mutex data_mutex; struct delayed_work work; const struct scarlett2_device_info *info; const char *series_name; __u8 bInterfaceNumber; __u8 bEndpointAddress; __u16 wMaxPacketSize; __u8 bInterval; int num_mux_srcs; int num_mux_dsts; u16 scarlett2_seq; u8 sync_updated; u8 vol_updated; u8 input_other_updated; u8 monitor_other_updated; u8 mux_updated; u8 speaker_switching_switched; u8 sync; u8 master_vol; u8 vol[10]; u8 vol_sw_hw_switch[10]; u8 mute_switch[10]; u8 level_switch[2]; u8 pad_switch[8]; u8 dim_mute[2]; u8 air_switch[8]; u8 phantom_switch[2]; u8 phantom_persistence; u8 direct_monitor_switch; u8 speaker_switching_switch; u8 talkback_switch; u8 talkback_map[12]; u8 msd_switch; u8 standalone_switch; struct snd_kcontrol *sync_ctl; struct snd_kcontrol *master_vol_ctl; struct snd_kcontrol *vol_ctls[10]; struct snd_kcontrol *sw_hw_ctls[10]; struct snd_kcontrol *mute_ctls[10]; struct snd_kcontrol *dim_mute_ctls[2]; struct snd_kcontrol *level_ctls[2]; struct snd_kcontrol *pad_ctls[8]; struct snd_kcontrol *air_ctls[8]; struct snd_kcontrol *phantom_ctls[2]; struct snd_kcontrol *mux_ctls[77]; struct snd_kcontrol *direct_monitor_ctl; struct snd_kcontrol *speaker_switching_ctl; struct snd_kcontrol *talkback_ctl; u8 mux[77]; u8 mix[300]; }; struct scarlett2_usb_volume_status { u8 dim_mute[2]; u8 pad1; s16 sw_vol[10]; s16 hw_vol[10]; u8 mute_switch[10]; u8 sw_hw_switch[10]; u8 pad3[6]; s16 master_vol; } __attribute__((packed)); struct scarlett2_usb_packet { __le32 cmd; __le16 size; __le16 seq; __le32 error; __le32 pad; u8 data[0]; }; struct snd_us16x08_control_params { const struct snd_kcontrol_new *kcontrol_new; int control_id; int type; int num_channels; const char *name; int default_val; }; struct snd_us16x08_comp_store { u8 val[96]; }; struct snd_us16x08_eq_store { u8 val[256]; }; struct snd_us16x08_meter_store { int meter_level[16]; int master_level[2]; int comp_index; int comp_active_index; int comp_level[16]; struct snd_us16x08_comp_store *comp_store; }; struct s1810c_ctl_packet { u32 a; u32 b; u32 fixed1; u32 fixed2; u32 c; u32 d; u32 e; }; struct s1810_mixer_state { uint16_t seqnum; struct mutex usb_mutex; struct mutex data_mutex; }; struct s1810c_state_packet { u32 fields[63]; }; enum { UAC3_PD_STATE_D0 = 0, UAC3_PD_STATE_D1 = 1, UAC3_PD_STATE_D2 = 2, }; struct uac3_power_domain_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bPowerDomainID; __le16 waRecoveryTime1; __le16 waRecoveryTime2; __u8 bNrEntities; __u8 baEntityID[0]; } __attribute__((packed)); typedef int (*quirk_func_t)(struct snd_usb_audio *, struct usb_interface *, struct usb_driver *, const struct snd_usb_audio_quirk *); typedef s8 int8_t; struct snd_usb_midi_endpoint_info { int8_t out_ep; uint8_t out_interval; int8_t in_ep; uint8_t in_interval; uint16_t out_cables; uint16_t in_cables; int16_t assoc_in_jacks[16]; int16_t assoc_out_jacks[16]; }; struct usb_audio_quirk_flags_table { u32 id; u32 flags; }; enum quirk_type { QUIRK_IGNORE_INTERFACE = 0, QUIRK_COMPOSITE = 1, QUIRK_AUTODETECT = 2, QUIRK_MIDI_STANDARD_INTERFACE = 3, QUIRK_MIDI_FIXED_ENDPOINT = 4, QUIRK_MIDI_YAMAHA = 5, QUIRK_MIDI_ROLAND = 6, QUIRK_MIDI_MIDIMAN = 7, QUIRK_MIDI_NOVATION = 8, QUIRK_MIDI_RAW_BYTES = 9, QUIRK_MIDI_EMAGIC = 10, QUIRK_MIDI_CME = 11, QUIRK_MIDI_AKAI = 12, QUIRK_MIDI_US122L = 13, QUIRK_MIDI_FTDI = 14, QUIRK_MIDI_CH345 = 15, QUIRK_AUDIO_STANDARD_INTERFACE = 16, QUIRK_AUDIO_FIXED_ENDPOINT = 17, QUIRK_AUDIO_EDIROL_UAXX = 18, QUIRK_AUDIO_STANDARD_MIXER = 19, QUIRK_TYPE_COUNT = 20, }; enum { EMU_QUIRK_SR_44100HZ = 0, EMU_QUIRK_SR_48000HZ = 1, EMU_QUIRK_SR_88200HZ = 2, EMU_QUIRK_SR_96000HZ = 3, EMU_QUIRK_SR_176400HZ = 4, EMU_QUIRK_SR_192000HZ = 5, }; struct usb_ms_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bNumEmbMIDIJack; __u8 baAssocJackID[0]; }; struct uac1_as_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalLink; __u8 bDelay; __le16 wFormatTag; } __attribute__((packed)); struct usb_midi_out_jack_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bJackType; __u8 bJackID; __u8 bNrInputPins; struct usb_midi_source_pin pins[0]; }; struct uac3_iso_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __le32 bmControls; __u8 bLockDelayUnits; __le16 wLockDelay; } __attribute__((packed)); struct uac3_hc_descriptor_header { __le16 wLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __le16 wDescriptorID; }; struct uac3_cluster_segment_descriptor { __le16 wLength; __u8 bSegmentType; } __attribute__((packed)); struct uac3_cluster_information_segment_descriptor { __le16 wLength; __u8 bSegmentType; __u8 bChPurpose; __u8 bChRelationship; __u8 bChGroupID; }; struct uac_iso_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bmAttributes; __u8 bLockDelayUnits; __le16 wLockDelay; } __attribute__((packed)); struct usb_desc_validator { unsigned char protocol; unsigned char type; bool (*func)(const void *, const struct usb_desc_validator *); size_t size; }; struct snd_usb_midi_in_endpoint; struct snd_usb_midi_out_endpoint; struct usb_protocol_ops { void (*input)(struct snd_usb_midi_in_endpoint *, uint8_t *, int); void (*output)(struct snd_usb_midi_out_endpoint *, struct urb *); void (*output_packet)(struct urb *, uint8_t, uint8_t, uint8_t, uint8_t); void (*init_out_endpoint)(struct snd_usb_midi_out_endpoint *); void (*finish_out_endpoint)(struct snd_usb_midi_out_endpoint *); }; struct usbmidi_in_port { struct snd_rawmidi_substream *substream; u8 running_status_length; }; struct snd_usb_midi; struct snd_usb_midi_in_endpoint { struct snd_usb_midi *umidi; struct urb *urbs[7]; struct usbmidi_in_port ports[16]; u8 seen_f5; bool in_sysex; u8 last_cin; u8 error_resubmit; int current_port; }; struct snd_usb_midi_endpoint { struct snd_usb_midi_out_endpoint *out; struct snd_usb_midi_in_endpoint *in; }; struct snd_usb_midi { struct usb_device *dev; struct snd_card *card; struct usb_interface *iface; const struct snd_usb_audio_quirk *quirk; struct snd_rawmidi *rmidi; const struct usb_protocol_ops *usb_protocol_ops; struct list_head list; struct timer_list error_timer; spinlock_t disc_lock; struct rw_semaphore disc_rwsem; struct mutex mutex; u32 usb_id; int next_midi_device; struct snd_usb_midi_endpoint endpoints[2]; unsigned long input_triggered; unsigned int opened[2]; unsigned char disconnected; unsigned char input_running; struct snd_kcontrol *roland_load_ctl; }; struct snd_seq_addr { unsigned char client; unsigned char port; }; struct snd_seq_port_info { struct snd_seq_addr addr; char name[64]; unsigned int capability; unsigned int type; int midi_channels; int midi_voices; int synth_voices; int read_use; int write_use; void *kernel; unsigned int flags; unsigned char time_queue; unsigned char direction; unsigned char ump_group; char reserved[57]; }; struct out_urb_context { struct urb *urb; struct snd_usb_midi_out_endpoint *ep; }; struct usbmidi_out_port { struct snd_usb_midi_out_endpoint *ep; struct snd_rawmidi_substream *substream; int active; uint8_t cable; uint8_t state; uint8_t data[2]; }; struct snd_usb_midi_out_endpoint { struct snd_usb_midi *umidi; struct out_urb_context urbs[7]; unsigned int active_urbs; unsigned int drain_urbs; int max_transfer; struct work_struct work; unsigned int next_urb; spinlock_t buffer_lock; struct usbmidi_out_port ports[16]; int current_port; wait_queue_head_t drain_wait; }; struct port_info { u32 id; short port; short voices; const char *name; unsigned int seq_flags; }; struct snd_soc_card; typedef void (*btf_trace_snd_soc_bias_level_start)(void *, struct snd_soc_card *, int); enum snd_soc_pcm_subclass { SND_SOC_PCM_CLASS_PCM = 0, SND_SOC_PCM_CLASS_BE = 1, }; enum snd_soc_bias_level { SND_SOC_BIAS_OFF = 0, SND_SOC_BIAS_STANDBY = 1, SND_SOC_BIAS_PREPARE = 2, SND_SOC_BIAS_ON = 3, }; struct snd_soc_component; struct snd_soc_dapm_widget; struct snd_soc_dapm_context { enum snd_soc_bias_level bias_level; unsigned int idle_bias_off: 1; unsigned int suspend_bias_off: 1; struct device *dev; struct snd_soc_component *component; struct snd_soc_card *card; enum snd_soc_bias_level target_bias_level; struct list_head list; struct snd_soc_dapm_widget *wcache_sink; struct snd_soc_dapm_widget *wcache_source; struct dentry *debugfs_dapm; }; struct snd_soc_dapm_stats { int power_checks; int path_checks; int neighbour_checks; }; struct snd_soc_dai_link; struct snd_soc_codec_conf; struct snd_soc_aux_dev; struct snd_soc_dapm_route; struct snd_soc_dapm_update; struct snd_soc_card { const char *name; const char *long_name; const char *driver_name; const char *components; unsigned short pci_subsystem_vendor; unsigned short pci_subsystem_device; bool pci_subsystem_set; char topology_shortname[32]; struct device *dev; struct snd_card *snd_card; struct module *owner; struct mutex mutex; struct mutex dapm_mutex; struct mutex pcm_mutex; enum snd_soc_pcm_subclass pcm_subclass; int (*probe)(struct snd_soc_card *); int (*late_probe)(struct snd_soc_card *); void (*fixup_controls)(struct snd_soc_card *); int (*remove)(struct snd_soc_card *); int (*suspend_pre)(struct snd_soc_card *); int (*suspend_post)(struct snd_soc_card *); int (*resume_pre)(struct snd_soc_card *); int (*resume_post)(struct snd_soc_card *); int (*set_bias_level)(struct snd_soc_card *, struct snd_soc_dapm_context *, enum snd_soc_bias_level); int (*set_bias_level_post)(struct snd_soc_card *, struct snd_soc_dapm_context *, enum snd_soc_bias_level); int (*add_dai_link)(struct snd_soc_card *, struct snd_soc_dai_link *); void (*remove_dai_link)(struct snd_soc_card *, struct snd_soc_dai_link *); long pmdown_time; struct snd_soc_dai_link *dai_link; int num_links; struct list_head rtd_list; int num_rtd; struct snd_soc_codec_conf *codec_conf; int num_configs; struct snd_soc_aux_dev *aux_dev; int num_aux_devs; struct list_head aux_comp_list; const struct snd_kcontrol_new *controls; int num_controls; const struct snd_soc_dapm_widget *dapm_widgets; int num_dapm_widgets; const struct snd_soc_dapm_route *dapm_routes; int num_dapm_routes; const struct snd_soc_dapm_widget *of_dapm_widgets; int num_of_dapm_widgets; const struct snd_soc_dapm_route *of_dapm_routes; int num_of_dapm_routes; struct list_head component_dev_list; struct list_head list; struct list_head widgets; struct list_head paths; struct list_head dapm_list; struct list_head dapm_dirty; struct list_head dobj_list; struct snd_soc_dapm_context dapm; struct snd_soc_dapm_stats dapm_stats; struct snd_soc_dapm_update *update; struct dentry *debugfs_card_root; struct work_struct deferred_resume_work; u32 pop_time; unsigned int instantiated: 1; unsigned int topology_shortname_created: 1; unsigned int fully_routed: 1; unsigned int disable_route_checks: 1; unsigned int probed: 1; unsigned int component_chaining: 1; void *drvdata; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; }; struct snd_soc_component_driver; struct snd_soc_component { const char *name; int id; const char *name_prefix; struct device *dev; struct snd_soc_card *card; unsigned int active; unsigned int suspended: 1; struct list_head list; struct list_head card_aux_list; struct list_head card_list; const struct snd_soc_component_driver *driver; struct list_head dai_list; int num_dai; struct regmap *regmap; int val_bytes; struct mutex io_mutex; struct list_head dobj_list; struct snd_soc_dapm_context dapm; int (*init)(struct snd_soc_component *); void *mark_module; struct snd_pcm_substream *mark_open; struct snd_pcm_substream *mark_hw_params; struct snd_pcm_substream *mark_trigger; struct snd_compr_stream *mark_compr_open; void *mark_pm; struct dentry *debugfs_root; const char *debugfs_prefix; }; enum snd_soc_dapm_type { snd_soc_dapm_input = 0, snd_soc_dapm_output = 1, snd_soc_dapm_mux = 2, snd_soc_dapm_demux = 3, snd_soc_dapm_mixer = 4, snd_soc_dapm_mixer_named_ctl = 5, snd_soc_dapm_pga = 6, snd_soc_dapm_out_drv = 7, snd_soc_dapm_adc = 8, snd_soc_dapm_dac = 9, snd_soc_dapm_micbias = 10, snd_soc_dapm_mic = 11, snd_soc_dapm_hp = 12, snd_soc_dapm_spk = 13, snd_soc_dapm_line = 14, snd_soc_dapm_switch = 15, snd_soc_dapm_vmid = 16, snd_soc_dapm_pre = 17, snd_soc_dapm_post = 18, snd_soc_dapm_supply = 19, snd_soc_dapm_pinctrl = 20, snd_soc_dapm_regulator_supply = 21, snd_soc_dapm_clock_supply = 22, snd_soc_dapm_aif_in = 23, snd_soc_dapm_aif_out = 24, snd_soc_dapm_siggen = 25, snd_soc_dapm_sink = 26, snd_soc_dapm_dai_in = 27, snd_soc_dapm_dai_out = 28, snd_soc_dapm_dai_link = 29, snd_soc_dapm_kcontrol = 30, snd_soc_dapm_buffer = 31, snd_soc_dapm_scheduler = 32, snd_soc_dapm_effect = 33, snd_soc_dapm_src = 34, snd_soc_dapm_asrc = 35, snd_soc_dapm_encoder = 36, snd_soc_dapm_decoder = 37, SND_SOC_DAPM_TYPE_COUNT = 38, }; enum snd_soc_trigger_order { SND_SOC_TRIGGER_ORDER_DEFAULT = 0, SND_SOC_TRIGGER_ORDER_LDC = 1, SND_SOC_TRIGGER_ORDER_MAX = 2, }; struct snd_soc_pcm_runtime; struct snd_soc_jack; struct snd_compress_ops; struct snd_soc_component_driver { const char *name; const struct snd_kcontrol_new *controls; unsigned int num_controls; const struct snd_soc_dapm_widget *dapm_widgets; unsigned int num_dapm_widgets; const struct snd_soc_dapm_route *dapm_routes; unsigned int num_dapm_routes; int (*probe)(struct snd_soc_component *); void (*remove)(struct snd_soc_component *); int (*suspend)(struct snd_soc_component *); int (*resume)(struct snd_soc_component *); unsigned int (*read)(struct snd_soc_component *, unsigned int); int (*write)(struct snd_soc_component *, unsigned int, unsigned int); int (*pcm_construct)(struct snd_soc_component *, struct snd_soc_pcm_runtime *); void (*pcm_destruct)(struct snd_soc_component *, struct snd_pcm *); int (*set_sysclk)(struct snd_soc_component *, int, int, unsigned int, int); int (*set_pll)(struct snd_soc_component *, int, int, unsigned int, unsigned int); int (*set_jack)(struct snd_soc_component *, struct snd_soc_jack *, void *); int (*get_jack_type)(struct snd_soc_component *); int (*of_xlate_dai_name)(struct snd_soc_component *, const struct of_phandle_args *, const char **); int (*of_xlate_dai_id)(struct snd_soc_component *, struct device_node *); void (*seq_notifier)(struct snd_soc_component *, enum snd_soc_dapm_type, int); int (*stream_event)(struct snd_soc_component *, int); int (*set_bias_level)(struct snd_soc_component *, enum snd_soc_bias_level); int (*open)(struct snd_soc_component *, struct snd_pcm_substream *); int (*close)(struct snd_soc_component *, struct snd_pcm_substream *); int (*ioctl)(struct snd_soc_component *, struct snd_pcm_substream *, unsigned int, void *); int (*hw_params)(struct snd_soc_component *, struct snd_pcm_substream *, struct snd_pcm_hw_params *); int (*hw_free)(struct snd_soc_component *, struct snd_pcm_substream *); int (*prepare)(struct snd_soc_component *, struct snd_pcm_substream *); int (*trigger)(struct snd_soc_component *, struct snd_pcm_substream *, int); int (*sync_stop)(struct snd_soc_component *, struct snd_pcm_substream *); snd_pcm_uframes_t (*pointer)(struct snd_soc_component *, struct snd_pcm_substream *); int (*get_time_info)(struct snd_soc_component *, struct snd_pcm_substream *, struct timespec64 *, struct timespec64 *, struct snd_pcm_audio_tstamp_config *, struct snd_pcm_audio_tstamp_report *); int (*copy)(struct snd_soc_component *, struct snd_pcm_substream *, int, unsigned long, struct iov_iter *, unsigned long); struct page * (*page)(struct snd_soc_component *, struct snd_pcm_substream *, unsigned long); int (*mmap)(struct snd_soc_component *, struct snd_pcm_substream *, struct vm_area_struct *); int (*ack)(struct snd_soc_component *, struct snd_pcm_substream *); snd_pcm_sframes_t (*delay)(struct snd_soc_component *, struct snd_pcm_substream *); const struct snd_compress_ops *compress_ops; int probe_order; int remove_order; enum snd_soc_trigger_order trigger_start; enum snd_soc_trigger_order trigger_stop; unsigned int module_get_upon_open: 1; unsigned int idle_bias_on: 1; unsigned int suspend_bias_off: 1; unsigned int use_pmdown_time: 1; unsigned int endianness: 1; unsigned int legacy_dai_naming: 1; const char *ignore_machine; const char *topology_name_prefix; int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *, struct snd_pcm_hw_params *); bool use_dai_pcm_id; int be_pcm_base; const char *debugfs_prefix; }; enum snd_soc_dobj_type { SND_SOC_DOBJ_NONE = 0, SND_SOC_DOBJ_MIXER = 1, SND_SOC_DOBJ_BYTES = 2, SND_SOC_DOBJ_ENUM = 3, SND_SOC_DOBJ_GRAPH = 4, SND_SOC_DOBJ_WIDGET = 5, SND_SOC_DOBJ_DAI_LINK = 6, SND_SOC_DOBJ_PCM = 7, SND_SOC_DOBJ_CODEC_LINK = 8, SND_SOC_DOBJ_BACKEND_LINK = 9, }; struct snd_soc_dobj_control { struct snd_kcontrol *kcontrol; char **dtexts; unsigned long *dvalues; }; struct snd_soc_dobj_widget { unsigned int *kcontrol_type; }; struct snd_soc_dobj { enum snd_soc_dobj_type type; unsigned int index; struct list_head list; int (*unload)(struct snd_soc_component *, struct snd_soc_dobj *); union { struct snd_soc_dobj_control control; struct snd_soc_dobj_widget widget; }; void *private; }; struct snd_soc_dapm_widget { enum snd_soc_dapm_type id; const char *name; const char *sname; struct list_head list; struct snd_soc_dapm_context *dapm; void *priv; struct regulator *regulator; struct pinctrl *pinctrl; int reg; unsigned char shift; unsigned int mask; unsigned int on_val; unsigned int off_val; unsigned char power: 1; unsigned char active: 1; unsigned char connected: 1; unsigned char new: 1; unsigned char force: 1; unsigned char ignore_suspend: 1; unsigned char new_power: 1; unsigned char power_checked: 1; unsigned char is_supply: 1; unsigned char is_ep: 2; unsigned char no_wname_in_kcontrol_name: 1; int subseq; int (*power_check)(struct snd_soc_dapm_widget *); unsigned short event_flags; int (*event)(struct snd_soc_dapm_widget *, struct snd_kcontrol *, int); int num_kcontrols; const struct snd_kcontrol_new *kcontrol_news; struct snd_kcontrol **kcontrols; struct snd_soc_dobj dobj; struct list_head edges[2]; struct list_head work_list; struct list_head power_list; struct list_head dirty; int endpoints[2]; struct clk *clk; int channel; }; struct snd_soc_dapm_route { const char *sink; const char *control; const char *source; int (*connected)(struct snd_soc_dapm_widget *, struct snd_soc_dapm_widget *); struct snd_soc_dobj dobj; }; enum snd_soc_dpcm_update { SND_SOC_DPCM_UPDATE_NO = 0, SND_SOC_DPCM_UPDATE_BE = 1, SND_SOC_DPCM_UPDATE_FE = 2, }; enum snd_soc_dpcm_state { SND_SOC_DPCM_STATE_NEW = 0, SND_SOC_DPCM_STATE_OPEN = 1, SND_SOC_DPCM_STATE_HW_PARAMS = 2, SND_SOC_DPCM_STATE_PREPARE = 3, SND_SOC_DPCM_STATE_START = 4, SND_SOC_DPCM_STATE_STOP = 5, SND_SOC_DPCM_STATE_PAUSED = 6, SND_SOC_DPCM_STATE_SUSPEND = 7, SND_SOC_DPCM_STATE_HW_FREE = 8, SND_SOC_DPCM_STATE_CLOSE = 9, }; struct snd_soc_dpcm_runtime { struct list_head be_clients; struct list_head fe_clients; int users; struct snd_pcm_hw_params hw_params; enum snd_soc_dpcm_update runtime_update; enum snd_soc_dpcm_state state; int trigger_pending; int be_start; int be_pause; bool fe_pause; }; struct snd_soc_dai; struct snd_soc_pcm_runtime { struct device *dev; struct snd_soc_card *card; struct snd_soc_dai_link *dai_link; struct snd_pcm_ops ops; unsigned int c2c_params_select; struct snd_soc_dpcm_runtime dpcm[2]; struct snd_soc_dapm_widget *c2c_widget[2]; long pmdown_time; struct snd_pcm *pcm; struct snd_compr *compr; struct snd_soc_dai **dais; struct delayed_work delayed_work; void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *); struct dentry *debugfs_dpcm_root; unsigned int num; struct list_head list; struct snd_pcm_substream *mark_startup; struct snd_pcm_substream *mark_hw_params; struct snd_pcm_substream *mark_trigger; struct snd_compr_stream *mark_compr_startup; unsigned int pop_wait: 1; unsigned int fe_compr: 1; bool initialized; int num_components; u64 android_kabi_reserved1; struct snd_soc_component *components[0]; }; enum snd_soc_dpcm_trigger { SND_SOC_DPCM_TRIGGER_PRE = 0, SND_SOC_DPCM_TRIGGER_POST = 1, SND_SOC_DPCM_TRIGGER_BESPOKE = 2, }; struct snd_soc_dai_link_component; struct snd_soc_dai_link_codec_ch_map; struct snd_soc_pcm_stream; struct snd_soc_ops; struct snd_soc_compr_ops; struct snd_soc_dai_link { const char *name; const char *stream_name; struct snd_soc_dai_link_component *cpus; unsigned int num_cpus; struct snd_soc_dai_link_component *codecs; unsigned int num_codecs; struct snd_soc_dai_link_codec_ch_map *codec_ch_maps; struct snd_soc_dai_link_component *platforms; unsigned int num_platforms; int id; const struct snd_soc_pcm_stream *c2c_params; unsigned int num_c2c_params; unsigned int dai_fmt; enum snd_soc_dpcm_trigger trigger[2]; int (*init)(struct snd_soc_pcm_runtime *); void (*exit)(struct snd_soc_pcm_runtime *); int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *, struct snd_pcm_hw_params *); const struct snd_soc_ops *ops; const struct snd_soc_compr_ops *compr_ops; enum snd_soc_trigger_order trigger_start; enum snd_soc_trigger_order trigger_stop; unsigned int nonatomic: 1; unsigned int playback_only: 1; unsigned int capture_only: 1; unsigned int ignore_suspend: 1; unsigned int symmetric_rate: 1; unsigned int symmetric_channels: 1; unsigned int symmetric_sample_bits: 1; unsigned int no_pcm: 1; unsigned int dynamic: 1; unsigned int dpcm_capture: 1; unsigned int dpcm_playback: 1; unsigned int dpcm_merged_format: 1; unsigned int dpcm_merged_chan: 1; unsigned int dpcm_merged_rate: 1; unsigned int ignore_pmdown_time: 1; unsigned int ignore: 1; struct snd_soc_dobj dobj; u64 android_kabi_reserved1; }; struct snd_soc_dai_link_component { const char *name; struct device_node *of_node; const char *dai_name; struct of_phandle_args *dai_args; }; struct snd_soc_dai_link_codec_ch_map { unsigned int connected_cpu_id; unsigned int ch_mask; }; struct snd_soc_pcm_stream { const char *stream_name; u64 formats; unsigned int rates; unsigned int rate_min; unsigned int rate_max; unsigned int channels_min; unsigned int channels_max; unsigned int sig_bits; }; struct snd_soc_ops { int (*startup)(struct snd_pcm_substream *); void (*shutdown)(struct snd_pcm_substream *); int (*hw_params)(struct snd_pcm_substream *, struct snd_pcm_hw_params *); int (*hw_free)(struct snd_pcm_substream *); int (*prepare)(struct snd_pcm_substream *); int (*trigger)(struct snd_pcm_substream *, int); }; struct snd_soc_compr_ops { int (*startup)(struct snd_compr_stream *); void (*shutdown)(struct snd_compr_stream *); int (*set_params)(struct snd_compr_stream *); }; struct snd_soc_dai_stream { struct snd_soc_dapm_widget *widget; unsigned int active; unsigned int tdm_mask; void *dma_data; }; struct snd_soc_dai_driver; struct snd_soc_dai { const char *name; int id; struct device *dev; struct snd_soc_dai_driver *driver; struct snd_soc_dai_stream stream[2]; unsigned int rate; unsigned int channels; unsigned int sample_bits; struct snd_soc_component *component; struct list_head list; struct snd_pcm_substream *mark_startup; struct snd_pcm_substream *mark_hw_params; struct snd_pcm_substream *mark_trigger; struct snd_compr_stream *mark_compr_startup; unsigned int probed: 1; }; struct snd_soc_dai_ops; struct snd_soc_cdai_ops; struct snd_soc_dai_driver { const char *name; unsigned int id; unsigned int base; struct snd_soc_dobj dobj; struct of_phandle_args *dai_args; const struct snd_soc_dai_ops *ops; const struct snd_soc_cdai_ops *cops; struct snd_soc_pcm_stream capture; struct snd_soc_pcm_stream playback; unsigned int symmetric_rate: 1; unsigned int symmetric_channels: 1; unsigned int symmetric_sample_bits: 1; }; struct snd_soc_dai_ops { int (*probe)(struct snd_soc_dai *); int (*remove)(struct snd_soc_dai *); int (*compress_new)(struct snd_soc_pcm_runtime *, int); int (*pcm_new)(struct snd_soc_pcm_runtime *, struct snd_soc_dai *); int (*set_sysclk)(struct snd_soc_dai *, int, unsigned int, int); int (*set_pll)(struct snd_soc_dai *, int, int, unsigned int, unsigned int); int (*set_clkdiv)(struct snd_soc_dai *, int, int); int (*set_bclk_ratio)(struct snd_soc_dai *, unsigned int); int (*set_fmt)(struct snd_soc_dai *, unsigned int); int (*xlate_tdm_slot_mask)(unsigned int, unsigned int *, unsigned int *); int (*set_tdm_slot)(struct snd_soc_dai *, unsigned int, unsigned int, int, int); int (*set_channel_map)(struct snd_soc_dai *, unsigned int, unsigned int *, unsigned int, unsigned int *); int (*get_channel_map)(struct snd_soc_dai *, unsigned int *, unsigned int *, unsigned int *, unsigned int *); int (*set_tristate)(struct snd_soc_dai *, int); int (*set_stream)(struct snd_soc_dai *, void *, int); void * (*get_stream)(struct snd_soc_dai *, int); int (*mute_stream)(struct snd_soc_dai *, int, int); int (*startup)(struct snd_pcm_substream *, struct snd_soc_dai *); void (*shutdown)(struct snd_pcm_substream *, struct snd_soc_dai *); int (*hw_params)(struct snd_pcm_substream *, struct snd_pcm_hw_params *, struct snd_soc_dai *); int (*hw_free)(struct snd_pcm_substream *, struct snd_soc_dai *); int (*prepare)(struct snd_pcm_substream *, struct snd_soc_dai *); int (*trigger)(struct snd_pcm_substream *, int, struct snd_soc_dai *); int (*bespoke_trigger)(struct snd_pcm_substream *, int, struct snd_soc_dai *); snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *, struct snd_soc_dai *); u64 *auto_selectable_formats; int num_auto_selectable_formats; int probe_order; int remove_order; unsigned int no_capture_mute: 1; unsigned int mute_unmute_on_trigger: 1; }; struct snd_soc_cdai_ops { int (*startup)(struct snd_compr_stream *, struct snd_soc_dai *); int (*shutdown)(struct snd_compr_stream *, struct snd_soc_dai *); int (*set_params)(struct snd_compr_stream *, struct snd_compr_params *, struct snd_soc_dai *); int (*get_params)(struct snd_compr_stream *, struct snd_codec *, struct snd_soc_dai *); int (*set_metadata)(struct snd_compr_stream *, struct snd_compr_metadata *, struct snd_soc_dai *); int (*get_metadata)(struct snd_compr_stream *, struct snd_compr_metadata *, struct snd_soc_dai *); int (*trigger)(struct snd_compr_stream *, int, struct snd_soc_dai *); int (*pointer)(struct snd_compr_stream *, struct snd_compr_tstamp *, struct snd_soc_dai *); int (*ack)(struct snd_compr_stream *, size_t, struct snd_soc_dai *); }; struct snd_soc_jack { struct mutex mutex; struct snd_jack *jack; struct snd_soc_card *card; struct list_head pins; int status; struct blocking_notifier_head notifier; struct list_head jack_zones; }; struct snd_compress_ops { int (*open)(struct snd_soc_component *, struct snd_compr_stream *); int (*free)(struct snd_soc_component *, struct snd_compr_stream *); int (*set_params)(struct snd_soc_component *, struct snd_compr_stream *, struct snd_compr_params *); int (*get_params)(struct snd_soc_component *, struct snd_compr_stream *, struct snd_codec *); int (*set_metadata)(struct snd_soc_component *, struct snd_compr_stream *, struct snd_compr_metadata *); int (*get_metadata)(struct snd_soc_component *, struct snd_compr_stream *, struct snd_compr_metadata *); int (*trigger)(struct snd_soc_component *, struct snd_compr_stream *, int); int (*pointer)(struct snd_soc_component *, struct snd_compr_stream *, struct snd_compr_tstamp *); int (*copy)(struct snd_soc_component *, struct snd_compr_stream *, char __attribute__((btf_type_tag("user"))) *, size_t); int (*mmap)(struct snd_soc_component *, struct snd_compr_stream *, struct vm_area_struct *); int (*ack)(struct snd_soc_component *, struct snd_compr_stream *, size_t); int (*get_caps)(struct snd_soc_component *, struct snd_compr_stream *, struct snd_compr_caps *); int (*get_codec_caps)(struct snd_soc_component *, struct snd_compr_stream *, struct snd_compr_codec_caps *); }; struct snd_soc_codec_conf { struct snd_soc_dai_link_component dlc; const char *name_prefix; }; struct snd_soc_aux_dev { struct snd_soc_dai_link_component dlc; int (*init)(struct snd_soc_component *); }; struct snd_soc_dapm_update { struct snd_kcontrol *kcontrol; int reg; int mask; int val; int reg2; int mask2; int val2; bool has_second_set; }; typedef void (*btf_trace_snd_soc_bias_level_done)(void *, struct snd_soc_card *, int); typedef void (*btf_trace_snd_soc_dapm_start)(void *, struct snd_soc_card *); typedef void (*btf_trace_snd_soc_dapm_done)(void *, struct snd_soc_card *); typedef void (*btf_trace_snd_soc_dapm_widget_power)(void *, struct snd_soc_dapm_widget *, int); typedef void (*btf_trace_snd_soc_dapm_widget_event_start)(void *, struct snd_soc_dapm_widget *, int); typedef void (*btf_trace_snd_soc_dapm_widget_event_done)(void *, struct snd_soc_dapm_widget *, int); typedef void (*btf_trace_snd_soc_dapm_walk_done)(void *, struct snd_soc_card *); enum snd_soc_dapm_direction { SND_SOC_DAPM_DIR_IN = 0, SND_SOC_DAPM_DIR_OUT = 1, }; struct snd_soc_dapm_path; typedef void (*btf_trace_snd_soc_dapm_path)(void *, struct snd_soc_dapm_widget *, enum snd_soc_dapm_direction, struct snd_soc_dapm_path *); struct snd_soc_dapm_path { const char *name; union { struct { struct snd_soc_dapm_widget *source; struct snd_soc_dapm_widget *sink; }; struct snd_soc_dapm_widget *node[2]; }; u32 connect: 1; u32 walking: 1; u32 weak: 1; u32 is_supply: 1; int (*connected)(struct snd_soc_dapm_widget *, struct snd_soc_dapm_widget *); struct list_head list_node[2]; struct list_head list_kcontrol; struct list_head list; }; typedef void (*btf_trace_snd_soc_dapm_connected)(void *, int, int); typedef void (*btf_trace_snd_soc_jack_irq)(void *, const char *); typedef void (*btf_trace_snd_soc_jack_report)(void *, struct snd_soc_jack *, int, int); typedef void (*btf_trace_snd_soc_jack_notify)(void *, struct snd_soc_jack *, int); struct trace_event_raw_snd_soc_card { struct trace_entry ent; u32 __data_loc_name; int val; char __data[0]; }; struct trace_event_raw_snd_soc_dapm_basic { struct trace_entry ent; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_snd_soc_dapm_widget { struct trace_entry ent; u32 __data_loc_name; int val; char __data[0]; }; struct trace_event_raw_snd_soc_dapm_walk_done { struct trace_entry ent; u32 __data_loc_name; int power_checks; int path_checks; int neighbour_checks; char __data[0]; }; struct trace_event_raw_snd_soc_dapm_path { struct trace_entry ent; u32 __data_loc_wname; u32 __data_loc_pname; u32 __data_loc_pnname; int path_node; int path_connect; int path_dir; char __data[0]; }; struct trace_event_raw_snd_soc_dapm_connected { struct trace_entry ent; int paths; int stream; char __data[0]; }; struct trace_event_raw_snd_soc_jack_irq { struct trace_entry ent; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_snd_soc_jack_report { struct trace_entry ent; u32 __data_loc_name; int mask; int val; char __data[0]; }; struct trace_event_raw_snd_soc_jack_notify { struct trace_entry ent; u32 __data_loc_name; int val; char __data[0]; }; struct trace_event_data_offsets_snd_soc_card { u32 name; }; struct trace_event_data_offsets_snd_soc_dapm_basic { u32 name; }; struct trace_event_data_offsets_snd_soc_dapm_widget { u32 name; }; struct trace_event_data_offsets_snd_soc_dapm_walk_done { u32 name; }; struct trace_event_data_offsets_snd_soc_jack_irq { u32 name; }; struct trace_event_data_offsets_snd_soc_jack_report { u32 name; }; struct trace_event_data_offsets_snd_soc_jack_notify { u32 name; }; struct trace_event_data_offsets_snd_soc_dapm_path { u32 wname; u32 pname; u32 pnname; }; struct trace_event_data_offsets_snd_soc_dapm_connected {}; struct soc_mixer_control { int min; int max; int platform_max; int reg; int rreg; unsigned int shift; unsigned int rshift; unsigned int sign_bit; unsigned int invert: 1; unsigned int autodisable: 1; struct snd_soc_dobj dobj; u64 android_kabi_reserved1; }; struct soc_enum { int reg; unsigned char shift_l; unsigned char shift_r; unsigned int items; unsigned int mask; const char * const *texts; const unsigned int *values; unsigned int autodisable: 1; struct snd_soc_dobj dobj; u64 android_kabi_reserved1; }; struct snd_soc_dapm_widget_list { int num_widgets; struct snd_soc_dapm_widget *widgets[0]; }; struct dapm_kcontrol_data { unsigned int value; struct snd_soc_dapm_widget *widget; struct list_head paths; struct snd_soc_dapm_widget_list *wlist; }; struct snd_soc_dapm_pinctrl_priv { const char *active_state; const char *sleep_state; }; struct snd_soc_jack_pin { struct list_head list; const char *pin; int mask; bool invert; }; struct snd_soc_jack_zone { unsigned int min_mv; unsigned int max_mv; unsigned int jack_type; unsigned int debounce_time; struct list_head list; }; struct snd_soc_jack_gpio { unsigned int gpio; unsigned int idx; struct device *gpiod_dev; const char *name; int report; int invert; int debounce_time; bool wake; struct snd_soc_jack *jack; struct delayed_work work; struct notifier_block pm_notifier; struct gpio_desc *desc; void *data; int (*jack_status_check)(void *); }; struct jack_gpio_tbl { int count; struct snd_soc_jack *jack; struct snd_soc_jack_gpio *gpios; }; enum snd_soc_dpcm_link_state { SND_SOC_DPCM_LINK_STATE_NEW = 0, SND_SOC_DPCM_LINK_STATE_FREE = 1, }; struct snd_soc_dpcm { struct snd_soc_pcm_runtime *be; struct snd_soc_pcm_runtime *fe; enum snd_soc_dpcm_link_state state; struct list_head list_be; struct list_head list_fe; struct dentry *debugfs_state; }; struct snd_dmaengine_pcm_config { int (*prepare_slave_config)(struct snd_pcm_substream *, struct snd_pcm_hw_params *, struct dma_slave_config *); struct dma_chan * (*compat_request_channel)(struct snd_soc_pcm_runtime *, struct snd_pcm_substream *); int (*process)(struct snd_pcm_substream *, int, unsigned long, unsigned long); dma_filter_fn compat_filter_fn; struct device *dma_dev; const char *chan_names[2]; const struct snd_pcm_hardware *pcm_hardware; unsigned int prealloc_buffer_size; }; struct soc_mreg_control { long min; long max; unsigned int regbase; unsigned int regcount; unsigned int nbits; unsigned int invert; }; struct soc_bytes { int base; int num_regs; u32 mask; }; struct soc_bytes_ext { int max; struct snd_soc_dobj dobj; int (*get)(struct snd_kcontrol *, unsigned int __attribute__((btf_type_tag("user"))) *, unsigned int); int (*put)(struct snd_kcontrol *, const unsigned int __attribute__((btf_type_tag("user"))) *, unsigned int); }; struct snd_soc_tplg_kcontrol_ops { u32 id; int (*get)(struct snd_kcontrol *, struct snd_ctl_elem_value *); int (*put)(struct snd_kcontrol *, struct snd_ctl_elem_value *); int (*info)(struct snd_kcontrol *, struct snd_ctl_elem_info *); }; struct soc_tplg_map { int uid; int kid; }; struct snd_soc_tplg_hdr { __le32 magic; __le32 abi; __le32 version; __le32 type; __le32 size; __le32 vendor_type; __le32 payload_size; __le32 index; __le32 count; }; struct snd_soc_tplg_io_ops { __le32 get; __le32 put; __le32 info; }; struct snd_soc_tplg_tlv_dbscale { __le32 min; __le32 step; __le32 mute; }; struct snd_soc_tplg_ctl_tlv { __le32 size; __le32 type; union { __le32 data[32]; struct snd_soc_tplg_tlv_dbscale scale; }; }; struct snd_soc_tplg_ctl_hdr { __le32 size; __le32 type; char name[44]; __le32 access; struct snd_soc_tplg_io_ops ops; struct snd_soc_tplg_ctl_tlv tlv; }; struct snd_soc_tplg_channel { __le32 size; __le32 reg; __le32 shift; __le32 id; }; struct snd_soc_tplg_vendor_uuid_elem { __le32 token; char uuid[16]; }; struct snd_soc_tplg_vendor_value_elem { __le32 token; __le32 value; }; struct snd_soc_tplg_vendor_string_elem { __le32 token; char string[44]; }; struct snd_soc_tplg_vendor_array { __le32 size; __le32 type; __le32 num_elems; union { struct { struct {} __empty_uuid; struct snd_soc_tplg_vendor_uuid_elem uuid[0]; }; struct { struct {} __empty_value; struct snd_soc_tplg_vendor_value_elem value[0]; }; struct { struct {} __empty_string; struct snd_soc_tplg_vendor_string_elem string[0]; }; }; }; struct snd_soc_tplg_private { __le32 size; union { struct { struct {} __empty_data; char data[0]; }; struct { struct {} __empty_array; struct snd_soc_tplg_vendor_array array[0]; }; }; }; struct snd_soc_tplg_mixer_control { struct snd_soc_tplg_ctl_hdr hdr; __le32 size; __le32 min; __le32 max; __le32 platform_max; __le32 invert; __le32 num_channels; struct snd_soc_tplg_channel channel[8]; struct snd_soc_tplg_private priv; }; struct snd_soc_tplg_bytes_control { struct snd_soc_tplg_ctl_hdr hdr; __le32 size; __le32 max; __le32 mask; __le32 base; __le32 num_regs; struct snd_soc_tplg_io_ops ext_ops; struct snd_soc_tplg_private priv; }; struct snd_soc_tplg_enum_control { struct snd_soc_tplg_ctl_hdr hdr; __le32 size; __le32 num_channels; struct snd_soc_tplg_channel channel[8]; __le32 items; __le32 mask; __le32 count; char texts[704]; __le32 values[176]; struct snd_soc_tplg_private priv; }; struct snd_soc_tplg_dapm_graph_elem { char sink[44]; char control[44]; char source[44]; }; struct snd_soc_tplg_dapm_widget { __le32 size; __le32 id; char name[44]; char sname[44]; __le32 reg; __le32 shift; __le32 mask; __le32 subseq; __le32 invert; __le32 ignore_suspend; __le16 event_flags; __le16 event_type; __le32 num_kcontrols; struct snd_soc_tplg_private priv; }; struct snd_soc_tplg_stream { __le32 size; char name[44]; __le64 format; __le32 rate; __le32 period_bytes; __le32 buffer_bytes; __le32 channels; }; struct snd_soc_tplg_stream_caps { __le32 size; char name[44]; __le64 formats; __le32 rates; __le32 rate_min; __le32 rate_max; __le32 channels_min; __le32 channels_max; __le32 periods_min; __le32 periods_max; __le32 period_size_min; __le32 period_size_max; __le32 buffer_size_min; __le32 buffer_size_max; __le32 sig_bits; }; struct snd_soc_tplg_pcm { __le32 size; char pcm_name[44]; char dai_name[44]; __le32 pcm_id; __le32 dai_id; __le32 playback; __le32 capture; __le32 compress; struct snd_soc_tplg_stream stream[8]; __le32 num_streams; struct snd_soc_tplg_stream_caps caps[2]; __le32 flag_mask; __le32 flags; struct snd_soc_tplg_private priv; } __attribute__((packed)); struct snd_soc_tplg_stream_caps_v4 { __le32 size; char name[44]; __le64 formats; __le32 rates; __le32 rate_min; __le32 rate_max; __le32 channels_min; __le32 channels_max; __le32 periods_min; __le32 periods_max; __le32 period_size_min; __le32 period_size_max; __le32 buffer_size_min; __le32 buffer_size_max; } __attribute__((packed)); struct snd_soc_tplg_pcm_v4 { __le32 size; char pcm_name[44]; char dai_name[44]; __le32 pcm_id; __le32 dai_id; __le32 playback; __le32 capture; __le32 compress; struct snd_soc_tplg_stream stream[8]; __le32 num_streams; struct snd_soc_tplg_stream_caps_v4 caps[2]; } __attribute__((packed)); struct snd_soc_tplg_dai { __le32 size; char dai_name[44]; __le32 dai_id; __le32 playback; __le32 capture; struct snd_soc_tplg_stream_caps caps[2]; __le32 flag_mask; __le32 flags; struct snd_soc_tplg_private priv; } __attribute__((packed)); struct snd_soc_tplg_hw_config { __le32 size; __le32 id; __le32 fmt; __u8 clock_gated; __u8 invert_bclk; __u8 invert_fsync; __u8 bclk_provider; __u8 fsync_provider; __u8 mclk_direction; __le16 reserved; __le32 mclk_rate; __le32 bclk_rate; __le32 fsync_rate; __le32 tdm_slots; __le32 tdm_slot_width; __le32 tx_slots; __le32 rx_slots; __le32 tx_channels; __le32 tx_chanmap[8]; __le32 rx_channels; __le32 rx_chanmap[8]; }; struct snd_soc_tplg_link_config { __le32 size; __le32 id; char name[44]; char stream_name[44]; struct snd_soc_tplg_stream stream[8]; __le32 num_streams; struct snd_soc_tplg_hw_config hw_config[8]; __le32 num_hw_configs; __le32 default_hw_config_id; __le32 flag_mask; __le32 flags; struct snd_soc_tplg_private priv; }; struct snd_soc_tplg_link_config_v4 { __le32 size; __le32 id; struct snd_soc_tplg_stream stream[8]; __le32 num_streams; } __attribute__((packed)); struct snd_soc_tplg_manifest { __le32 size; __le32 control_elems; __le32 widget_elems; __le32 graph_elems; __le32 pcm_elems; __le32 dai_link_elems; __le32 dai_elems; __le32 reserved[20]; struct snd_soc_tplg_private priv; }; struct snd_soc_tplg_manifest_v4 { __le32 size; __le32 control_elems; __le32 widget_elems; __le32 graph_elems; __le32 pcm_elems; __le32 dai_link_elems; struct snd_soc_tplg_private priv; }; struct snd_soc_tplg_bytes_ext_ops; struct snd_soc_tplg_ops; struct soc_tplg { const struct firmware *fw; const u8 *pos; const u8 *hdr_pos; unsigned int pass; struct device *dev; struct snd_soc_component *comp; u32 index; const struct snd_soc_tplg_kcontrol_ops *io_ops; int io_ops_count; const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops; int bytes_ext_ops_count; struct snd_soc_tplg_ops *ops; }; struct snd_soc_tplg_bytes_ext_ops { u32 id; int (*get)(struct snd_kcontrol *, unsigned int __attribute__((btf_type_tag("user"))) *, unsigned int); int (*put)(struct snd_kcontrol *, const unsigned int __attribute__((btf_type_tag("user"))) *, unsigned int); }; struct snd_soc_tplg_ops { int (*control_load)(struct snd_soc_component *, int, struct snd_kcontrol_new *, struct snd_soc_tplg_ctl_hdr *); int (*control_unload)(struct snd_soc_component *, struct snd_soc_dobj *); int (*dapm_route_load)(struct snd_soc_component *, int, struct snd_soc_dapm_route *); int (*dapm_route_unload)(struct snd_soc_component *, struct snd_soc_dobj *); int (*widget_load)(struct snd_soc_component *, int, struct snd_soc_dapm_widget *, struct snd_soc_tplg_dapm_widget *); int (*widget_ready)(struct snd_soc_component *, int, struct snd_soc_dapm_widget *, struct snd_soc_tplg_dapm_widget *); int (*widget_unload)(struct snd_soc_component *, struct snd_soc_dobj *); int (*dai_load)(struct snd_soc_component *, int, struct snd_soc_dai_driver *, struct snd_soc_tplg_pcm *, struct snd_soc_dai *); int (*dai_unload)(struct snd_soc_component *, struct snd_soc_dobj *); int (*link_load)(struct snd_soc_component *, int, struct snd_soc_dai_link *, struct snd_soc_tplg_link_config *); int (*link_unload)(struct snd_soc_component *, struct snd_soc_dobj *); int (*vendor_load)(struct snd_soc_component *, int, struct snd_soc_tplg_hdr *); int (*vendor_unload)(struct snd_soc_component *, struct snd_soc_tplg_hdr *); int (*complete)(struct snd_soc_component *); int (*manifest)(struct snd_soc_component *, int, struct snd_soc_tplg_manifest *); const struct snd_soc_tplg_kcontrol_ops *io_ops; int io_ops_count; const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops; int bytes_ext_ops_count; }; struct snd_soc_tplg_widget_events { u16 type; int (*event_handler)(struct snd_soc_dapm_widget *, struct snd_kcontrol *, int); }; struct dmaengine_pcm { struct dma_chan *chan[2]; const struct snd_dmaengine_pcm_config *config; struct snd_soc_component component; unsigned int flags; }; struct net_device_devres { struct net_device *ndev; }; struct net_proto_family { int family; int (*create)(struct net *, struct socket *, int, int); struct module *owner; }; struct iw_request_info { __u16 cmd; __u16 flags; }; struct iw_point { void __attribute__((btf_type_tag("user"))) *pointer; __u16 length; __u16 flags; }; struct iw_param { __s32 value; __u8 fixed; __u8 disabled; __u16 flags; }; struct iw_freq { __s32 m; __s16 e; __u8 i; __u8 flags; }; struct iw_quality { __u8 qual; __u8 level; __u8 noise; __u8 updated; }; union iwreq_data { char name[16]; struct iw_point essid; struct iw_param nwid; struct iw_freq freq; struct iw_param sens; struct iw_param bitrate; struct iw_param txpower; struct iw_param rts; struct iw_param frag; __u32 mode; struct iw_param retry; struct iw_point encoding; struct iw_param power; struct iw_quality qual; struct sockaddr ap_addr; struct sockaddr addr; struct iw_param param; struct iw_point data; }; struct iw_priv_args { __u32 cmd; __u16 set_args; __u16 get_args; char name[16]; }; struct iw_discarded { __u32 nwid; __u32 code; __u32 fragment; __u32 retries; __u32 misc; }; struct iw_missed { __u32 beacon; }; struct iw_statistics { __u16 status; struct iw_quality qual; struct iw_discarded discard; struct iw_missed miss; }; struct libipw_device; struct iw_spy_data; struct iw_public_data { struct iw_spy_data *spy_data; struct libipw_device *libipw; }; typedef unsigned char u_char; struct iw_spy_data { int spy_number; u_char spy_address[48]; struct iw_quality spy_stat[8]; struct iw_quality spy_thr_low; struct iw_quality spy_thr_high; u_char spy_thr_under[8]; }; enum { SOF_TIMESTAMPING_TX_HARDWARE = 1, SOF_TIMESTAMPING_TX_SOFTWARE = 2, SOF_TIMESTAMPING_RX_HARDWARE = 4, SOF_TIMESTAMPING_RX_SOFTWARE = 8, SOF_TIMESTAMPING_SOFTWARE = 16, SOF_TIMESTAMPING_SYS_HARDWARE = 32, SOF_TIMESTAMPING_RAW_HARDWARE = 64, SOF_TIMESTAMPING_OPT_ID = 128, SOF_TIMESTAMPING_TX_SCHED = 256, SOF_TIMESTAMPING_TX_ACK = 512, SOF_TIMESTAMPING_OPT_CMSG = 1024, SOF_TIMESTAMPING_OPT_TSONLY = 2048, SOF_TIMESTAMPING_OPT_STATS = 4096, SOF_TIMESTAMPING_OPT_PKTINFO = 8192, SOF_TIMESTAMPING_OPT_TX_SWHW = 16384, SOF_TIMESTAMPING_BIND_PHC = 32768, SOF_TIMESTAMPING_OPT_ID_TCP = 65536, SOF_TIMESTAMPING_LAST = 65536, SOF_TIMESTAMPING_MASK = 131071, }; enum { SOCK_WAKE_IO = 0, SOCK_WAKE_WAITD = 1, SOCK_WAKE_SPACE = 2, SOCK_WAKE_URG = 3, }; enum sock_shutdown_cmd { SHUT_RD = 0, SHUT_WR = 1, SHUT_RDWR = 2, }; struct sock_ee_data_rfc4884 { __u16 len; __u8 flags; __u8 reserved; }; struct sock_extended_err { __u32 ee_errno; __u8 ee_origin; __u8 ee_type; __u8 ee_code; __u8 ee_pad; __u32 ee_info; union { __u32 ee_data; struct sock_ee_data_rfc4884 ee_rfc4884; }; }; struct sock_exterr_skb { union { struct inet_skb_parm h4; struct inet6_skb_parm h6; } header; struct sock_extended_err ee; u16 addr_offset; __be16 port; u8 opt_stats: 1; u8 unused: 7; }; struct compat_mmsghdr { struct compat_msghdr msg_hdr; compat_uint_t msg_len; }; struct compat_ifmap { compat_ulong_t mem_start; compat_ulong_t mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; }; struct compat_if_settings { unsigned int type; unsigned int size; compat_uptr_t ifs_ifsu; }; struct compat_ifreq { union { char ifrn_name[16]; } ifr_ifrn; union { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; compat_int_t ifru_ivalue; compat_int_t ifru_mtu; struct compat_ifmap ifru_map; char ifru_slave[16]; char ifru_newname[16]; compat_caddr_t ifru_data; struct compat_if_settings ifru_settings; } ifr_ifru; }; struct sock_skb_cb { u32 dropcount; }; struct mmsghdr { struct user_msghdr msg_hdr; unsigned int msg_len; }; typedef __kernel_long_t __kernel_old_time_t; struct __kernel_old_timespec { __kernel_old_time_t tv_sec; long tv_nsec; }; struct __kernel_sock_timeval { __s64 tv_sec; __s64 tv_usec; }; struct scm_ts_pktinfo { __u32 if_index; __u32 pkt_length; __u32 reserved[2]; }; struct scm_timestamping_internal { struct timespec64 ts[3]; }; struct ifconf { int ifc_len; union { char __attribute__((btf_type_tag("user"))) *ifcu_buf; struct ifreq __attribute__((btf_type_tag("user"))) *ifcu_req; } ifc_ifcu; }; struct used_address { struct __kernel_sockaddr_storage name; unsigned int name_len; }; struct netdev_name_node { struct hlist_node hlist; struct list_head list; struct net_device *dev; const char *name; }; enum { INET_FLAGS_PKTINFO = 0, INET_FLAGS_TTL = 1, INET_FLAGS_TOS = 2, INET_FLAGS_RECVOPTS = 3, INET_FLAGS_RETOPTS = 4, INET_FLAGS_PASSSEC = 5, INET_FLAGS_ORIGDSTADDR = 6, INET_FLAGS_CHECKSUM = 7, INET_FLAGS_RECVFRAGSIZE = 8, INET_FLAGS_RECVERR = 9, INET_FLAGS_RECVERR_RFC4884 = 10, INET_FLAGS_FREEBIND = 11, INET_FLAGS_HDRINCL = 12, INET_FLAGS_MC_LOOP = 13, INET_FLAGS_MC_ALL = 14, INET_FLAGS_TRANSPARENT = 15, INET_FLAGS_IS_ICSK = 16, INET_FLAGS_NODEFRAG = 17, INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, INET_FLAGS_DEFER_CONNECT = 19, }; enum sk_pacing { SK_PACING_NONE = 0, SK_PACING_NEEDED = 1, SK_PACING_FQ = 2, }; enum txtime_flags { SOF_TXTIME_DEADLINE_MODE = 1, SOF_TXTIME_REPORT_ERRORS = 2, SOF_TXTIME_FLAGS_LAST = 2, SOF_TXTIME_FLAGS_MASK = 3, }; enum { SK_MEMINFO_RMEM_ALLOC = 0, SK_MEMINFO_RCVBUF = 1, SK_MEMINFO_WMEM_ALLOC = 2, SK_MEMINFO_SNDBUF = 3, SK_MEMINFO_FWD_ALLOC = 4, SK_MEMINFO_WMEM_QUEUED = 5, SK_MEMINFO_OPTMEM = 6, SK_MEMINFO_BACKLOG = 7, SK_MEMINFO_DROPS = 8, SK_MEMINFO_VARS = 9, }; enum sknetlink_groups { SKNLGRP_NONE = 0, SKNLGRP_INET_TCP_DESTROY = 1, SKNLGRP_INET_UDP_DESTROY = 2, SKNLGRP_INET6_TCP_DESTROY = 3, SKNLGRP_INET6_UDP_DESTROY = 4, __SKNLGRP_MAX = 5, }; enum { XFRM_POLICY_IN = 0, XFRM_POLICY_OUT = 1, XFRM_POLICY_FWD = 2, XFRM_POLICY_MASK = 3, XFRM_POLICY_MAX = 3, }; struct fastopen_queue { struct request_sock *rskq_rst_head; struct request_sock *rskq_rst_tail; spinlock_t lock; int qlen; int max_qlen; struct tcp_fastopen_context __attribute__((btf_type_tag("rcu"))) *ctx; }; struct request_sock_queue { spinlock_t rskq_lock; u8 rskq_defer_accept; u32 synflood_warned; atomic_t qlen; atomic_t young; struct request_sock *rskq_accept_head; struct request_sock *rskq_accept_tail; struct fastopen_queue fastopenq; }; struct inet_bind_bucket; struct inet_bind2_bucket; struct inet_connection_sock_af_ops; struct tcp_ulp_ops; struct inet_connection_sock { struct inet_sock icsk_inet; struct request_sock_queue icsk_accept_queue; struct inet_bind_bucket *icsk_bind_hash; struct inet_bind2_bucket *icsk_bind2_hash; unsigned long icsk_timeout; struct timer_list icsk_retransmit_timer; struct timer_list icsk_delack_timer; __u32 icsk_rto; __u32 icsk_rto_min; __u32 icsk_delack_max; __u32 icsk_pmtu_cookie; const struct tcp_congestion_ops *icsk_ca_ops; const struct inet_connection_sock_af_ops *icsk_af_ops; const struct tcp_ulp_ops *icsk_ulp_ops; void __attribute__((btf_type_tag("rcu"))) *icsk_ulp_data; void (*icsk_clean_acked)(struct sock *, u32); unsigned int (*icsk_sync_mss)(struct sock *, u32); __u8 icsk_ca_state: 5; __u8 icsk_ca_initialized: 1; __u8 icsk_ca_setsockopt: 1; __u8 icsk_ca_dst_locked: 1; __u8 icsk_retransmits; __u8 icsk_pending; __u8 icsk_backoff; __u8 icsk_syn_retries; __u8 icsk_probes_out; __u16 icsk_ext_hdr_len; struct { __u8 pending; __u8 quick; __u8 pingpong; __u8 retry; __u32 ato; unsigned long timeout; __u32 lrcvtime; __u16 last_seg_size; __u16 rcv_mss; } icsk_ack; struct { int search_high; int search_low; u32 probe_size: 31; u32 enabled: 1; u32 probe_timestamp; } icsk_mtup; u32 icsk_probes_tstamp; u32 icsk_user_timeout; u64 android_kabi_reserved1; u64 icsk_ca_priv[13]; }; struct tcp_rack { u64 mstamp; u32 rtt_us; u32 end_seq; u32 last_delivered; u8 reo_wnd_steps; u8 reo_wnd_persist: 5; u8 dsack_seen: 1; u8 advanced: 1; }; struct minmax_sample { u32 t; u32 v; }; struct minmax { struct minmax_sample s[3]; }; struct tcp_options_received { int ts_recent_stamp; u32 ts_recent; u32 rcv_tsval; u32 rcv_tsecr; u16 saw_tstamp: 1; u16 tstamp_ok: 1; u16 dsack: 1; u16 wscale_ok: 1; u16 sack_ok: 3; u16 smc_ok: 1; u16 snd_wscale: 4; u16 rcv_wscale: 4; u8 saw_unknown: 1; u8 unused: 7; u8 num_sacks; u16 user_mss; u16 mss_clamp; }; struct tcp_sack_block { u32 start_seq; u32 end_seq; }; struct tcp_fastopen_request; struct tcp_sock { struct inet_connection_sock inet_conn; u16 tcp_header_len; u16 gso_segs; __be32 pred_flags; u64 bytes_received; u32 segs_in; u32 data_segs_in; u32 rcv_nxt; u32 copied_seq; u32 rcv_wup; u32 snd_nxt; u32 segs_out; u32 data_segs_out; u64 bytes_sent; u64 bytes_acked; u32 dsack_dups; u32 snd_una; u32 snd_sml; u32 rcv_tstamp; u32 lsndtime; u32 last_oow_ack_time; u32 compressed_ack_rcv_nxt; u32 tsoffset; struct list_head tsq_node; struct list_head tsorted_sent_queue; u32 snd_wl1; u32 snd_wnd; u32 max_window; u32 mss_cache; u32 window_clamp; u32 rcv_ssthresh; u8 scaling_ratio; struct tcp_rack rack; u16 advmss; u8 compressed_ack; u8 dup_ack_counter: 2; u8 tlp_retrans: 1; u8 unused: 5; u32 chrono_start; u32 chrono_stat[3]; u8 chrono_type: 2; u8 rate_app_limited: 1; u8 fastopen_connect: 1; u8 fastopen_no_cookie: 1; u8 is_sack_reneg: 1; u8 fastopen_client_fail: 2; u8 nonagle: 4; u8 thin_lto: 1; u8 recvmsg_inq: 1; u8 repair: 1; u8 frto: 1; u8 repair_queue; u8 save_syn: 2; u8 syn_data: 1; u8 syn_fastopen: 1; u8 syn_fastopen_exp: 1; u8 syn_fastopen_ch: 1; u8 syn_data_acked: 1; u8 is_cwnd_limited: 1; u32 tlp_high_seq; u32 tcp_tx_delay; u64 tcp_wstamp_ns; u64 tcp_clock_cache; u64 tcp_mstamp; u32 srtt_us; u32 mdev_us; u32 mdev_max_us; u32 rttvar_us; u32 rtt_seq; struct minmax rtt_min; u32 packets_out; u32 retrans_out; u32 max_packets_out; u32 cwnd_usage_seq; u16 urg_data; u8 ecn_flags; u8 keepalive_probes; u32 reordering; u32 reord_seen; u32 snd_up; struct tcp_options_received rx_opt; u32 snd_ssthresh; u32 snd_cwnd; u32 snd_cwnd_cnt; u32 snd_cwnd_clamp; u32 snd_cwnd_used; u32 snd_cwnd_stamp; u32 prior_cwnd; u32 prr_delivered; u32 prr_out; u32 delivered; u32 delivered_ce; u32 lost; u32 app_limited; u64 first_tx_mstamp; u64 delivered_mstamp; u32 rate_delivered; u32 rate_interval_us; u32 rcv_wnd; u32 write_seq; u32 notsent_lowat; u32 pushed_seq; u32 lost_out; u32 sacked_out; struct hrtimer pacing_timer; struct hrtimer compressed_ack_timer; struct sk_buff *lost_skb_hint; struct sk_buff *retransmit_skb_hint; struct rb_root out_of_order_queue; struct sk_buff *ooo_last_skb; struct tcp_sack_block duplicate_sack[1]; struct tcp_sack_block selective_acks[4]; struct tcp_sack_block recv_sack_cache[4]; struct sk_buff *highest_sack; int lost_cnt_hint; u32 prior_ssthresh; u32 high_seq; u32 retrans_stamp; u32 undo_marker; int undo_retrans; u64 bytes_retrans; u32 total_retrans; u32 urg_seq; unsigned int keepalive_time; unsigned int keepalive_intvl; int linger2; u8 bpf_sock_ops_cb_flags; u8 bpf_chg_cc_inprogress: 1; u16 timeout_rehash; u32 rcv_ooopack; u32 rcv_rtt_last_tsecr; struct { u32 rtt_us; u32 seq; u64 time; } rcv_rtt_est; struct { u32 space; u32 seq; u64 time; } rcvq_space; struct { u32 probe_seq_start; u32 probe_seq_end; } mtu_probe; u32 plb_rehash; u32 mtu_info; struct tcp_fastopen_request *fastopen_req; struct request_sock __attribute__((btf_type_tag("rcu"))) *fastopen_rsk; struct saved_syn *saved_syn; u64 android_oem_data1; u64 android_kabi_reserved1; }; struct inet_bind_bucket { possible_net_t ib_net; int l3mdev; unsigned short port; signed char fastreuse; signed char fastreuseport; kuid_t fastuid; struct in6_addr fast_v6_rcv_saddr; __be32 fast_rcv_saddr; unsigned short fast_sk_family; bool fast_ipv6_only; struct hlist_node node; struct hlist_head owners; }; struct inet_bind2_bucket { possible_net_t ib_net; int l3mdev; unsigned short port; unsigned short family; union { struct in6_addr v6_rcv_saddr; __be32 rcv_saddr; }; struct hlist_node node; struct hlist_head owners; struct hlist_head deathrow; }; struct inet_connection_sock_af_ops { int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *); void (*send_check)(struct sock *, struct sk_buff *); int (*rebuild_header)(struct sock *); void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *); int (*conn_request)(struct sock *, struct sk_buff *); struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *); u16 net_header_len; u16 net_frag_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); int (*getsockopt)(struct sock *, int, int, char __attribute__((btf_type_tag("user"))) *, int __attribute__((btf_type_tag("user"))) *); void (*addr2sockaddr)(struct sock *, struct sockaddr *); void (*mtu_reduced)(struct sock *); u64 android_kabi_reserved1; }; struct tcp_ulp_ops { struct list_head list; int (*init)(struct sock *); void (*update)(struct sock *, struct proto *, void (*)(struct sock *)); void (*release)(struct sock *); int (*get_info)(struct sock *, struct sk_buff *); size_t (*get_info_size)(const struct sock *); void (*clone)(const struct request_sock *, struct sock *, const gfp_t); char name[16]; struct module *owner; }; struct tcp_fastopen_cookie { __le64 val[2]; s8 len; bool exp; }; struct tcp_fastopen_request { struct tcp_fastopen_cookie cookie; struct msghdr *data; size_t size; int copied; struct ubuf_info *uarg; }; struct cmsghdr { __kernel_size_t cmsg_len; int cmsg_level; int cmsg_type; }; struct net_protocol { int (*handler)(struct sk_buff *); int (*err_handler)(struct sk_buff *, u32); unsigned int no_policy: 1; unsigned int icmp_strict_tag_validation: 1; }; struct udp_sock { struct inet_sock inet; unsigned long udp_flags; int pending; __u8 encap_type; __u16 len; __u16 gso_size; __u16 pcslen; __u16 pcrlen; int (*encap_rcv)(struct sock *, struct sk_buff *); void (*encap_err_rcv)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); int (*encap_err_lookup)(struct sock *, struct sk_buff *); void (*encap_destroy)(struct sock *); struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *); int (*gro_complete)(struct sock *, struct sk_buff *, int); long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct sk_buff_head reader_queue; int forward_deficit; int forward_threshold; long: 64; long: 64; long: 64; long: 64; }; typedef void (*android_dst_ops_negative_advice_new_t)(struct sock *, struct dst_entry *); struct ucred { __u32 pid; __u32 uid; __u32 gid; }; struct linger { int l_onoff; int l_linger; }; struct sock_txtime { __kernel_clockid_t clockid; __u32 flags; }; struct so_timestamping { int flags; int bind_phc; }; typedef unsigned short mifi_t; struct sioc_mif_req6 { mifi_t mifi; unsigned long icount; unsigned long ocount; unsigned long ibytes; unsigned long obytes; }; struct sioc_sg_req6 { struct sockaddr_in6 src; struct sockaddr_in6 grp; unsigned long pktcnt; unsigned long bytecnt; unsigned long wrong_if; }; struct sockcm_cookie { u64 transmit_time; u32 mark; u32 tsflags; }; struct inet_request_sock { struct request_sock req; u16 snd_wscale: 4; u16 rcv_wscale: 4; u16 tstamp_ok: 1; u16 sack_ok: 1; u16 wscale_ok: 1; u16 ecn_ok: 1; u16 acked: 1; u16 no_srccheck: 1; u16 smc_ok: 1; u32 ir_mark; union { struct ip_options_rcu __attribute__((btf_type_tag("rcu"))) *ireq_opt; struct { struct ipv6_txoptions *ipv6_opt; struct sk_buff *pktopts; }; }; }; struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; u64 snt_synack; bool tfo_listener; bool is_mptcp; u32 txhash; u32 rcv_isn; u32 snt_isn; u32 ts_off; u32 last_oow_ack_time; u32 rcv_nxt; u8 syn_tos; }; enum tcp_synack_type { TCP_SYNACK_NORMAL = 0, TCP_SYNACK_FASTOPEN = 1, TCP_SYNACK_COOKIE = 2, }; struct tcp_request_sock_ops { u16 mss_clamp; struct dst_entry * (*route_req)(const struct sock *, struct sk_buff *, struct flowi *, struct request_sock *); u32 (*init_seq)(const struct sk_buff *); u32 (*init_ts_off)(const struct net *, const struct sk_buff *); int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type, struct sk_buff *); }; struct drop_reason_list { const char * const *reasons; size_t n_reasons; }; struct skb_checksum_ops { __wsum (*update)(const void *, int, __wsum); __wsum (*combine)(__wsum, __wsum, int, int); }; struct page_frag_1k { void *va; u16 offset; bool pfmemalloc; }; struct napi_alloc_cache { struct page_frag_cache page; struct page_frag_1k page_small; unsigned int skb_count; void *skb_cache[64]; }; enum skb_drop_reason_subsys { SKB_DROP_REASON_SUBSYS_CORE = 0, SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE = 1, SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR = 2, SKB_DROP_REASON_SUBSYS_OPENVSWITCH = 3, SKB_DROP_REASON_SUBSYS_NUM = 4, }; enum { SKB_FCLONE_UNAVAILABLE = 0, SKB_FCLONE_ORIG = 1, SKB_FCLONE_CLONE = 2, }; enum { SCM_TSTAMP_SND = 0, SCM_TSTAMP_SCHED = 1, SCM_TSTAMP_ACK = 2, }; enum skb_ext_id { SKB_EXT_SEC_PATH = 0, SKB_EXT_NUM = 1, }; struct sk_buff_fclones { struct sk_buff skb1; struct sk_buff skb2; refcount_t fclone_ref; }; struct mmpin { struct user_struct *user; unsigned int num_pg; }; struct ubuf_info_msgzc { struct ubuf_info ubuf; union { struct { unsigned long desc; void *ctx; }; struct { u32 id; u16 len; u16 zerocopy: 1; u32 bytelen; }; }; struct mmpin mmp; }; struct skb_seq_state { __u32 lower_offset; __u32 upper_offset; __u32 frag_idx; __u32 stepped_offset; struct sk_buff *root_skb; struct sk_buff *cur_skb; __u8 *frag_data; __u32 frag_off; }; struct skb_gso_cb { union { int mac_offset; int data_offset; }; int encap_level; __wsum csum; __u16 csum_start; }; struct sd_flow_limit; struct softnet_data { struct list_head poll_list; struct sk_buff_head process_queue; unsigned int processed; unsigned int time_squeeze; struct softnet_data *rps_ipi_list; bool in_net_rx_action; bool in_napi_threaded_poll; struct sd_flow_limit __attribute__((btf_type_tag("rcu"))) *flow_limit; struct Qdisc *output_queue; struct Qdisc **output_queue_tailp; struct sk_buff *completion_queue; struct { u16 recursion; u8 more; u8 skip_txqueue; } xmit; long: 64; long: 64; long: 64; unsigned int input_queue_head; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; call_single_data_t csd; struct softnet_data *rps_ipi_next; unsigned int cpu; unsigned int input_queue_tail; unsigned int received_rps; unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; spinlock_t defer_lock; int defer_count; int defer_ipi_scheduled; struct sk_buff *defer_list; long: 64; call_single_data_t defer_csd; }; struct sd_flow_limit { u64 count; unsigned int num_buckets; unsigned int history_head; u16 history[128]; u8 buckets[0]; }; struct ip_auth_hdr { __u8 nexthdr; __u8 hdrlen; __be16 reserved; __be32 spi; __be32 seq_no; __u8 auth_data[0]; }; struct frag_hdr { __u8 nexthdr; __u8 reserved; __be16 frag_off; __be32 identification; }; struct vlan_ethhdr { union { struct { unsigned char h_dest[6]; unsigned char h_source[6]; }; struct { unsigned char h_dest[6]; unsigned char h_source[6]; } addrs; }; __be16 h_vlan_proto; __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; }; struct mpls_shim_hdr { __be32 label_stack_entry; }; struct skb_free_array { unsigned int skb_count; void *skb_array[16]; }; struct xfrm_offload { struct { __u32 low; __u32 hi; } seq; __u32 flags; __u32 status; __u8 proto; __u8 inner_ipproto; }; struct sec_path { int len; int olen; int verified_cnt; struct xfrm_state *xvec[6]; struct xfrm_offload ovec[1]; }; typedef int (*sendmsg_func)(struct sock *, struct msghdr *); struct scm_fp_list { short count; short max; struct user_struct *user; struct file *fp[253]; }; struct scm_cookie { struct pid *pid; struct scm_fp_list *fp; struct scm_creds creds; u32 secid; }; struct scm_timestamping64 { struct __kernel_timespec ts[3]; }; struct scm_timestamping { struct __kernel_old_timespec ts[3]; }; enum { TCA_STATS_UNSPEC = 0, TCA_STATS_BASIC = 1, TCA_STATS_RATE_EST = 2, TCA_STATS_QUEUE = 3, TCA_STATS_APP = 4, TCA_STATS_RATE_EST64 = 5, TCA_STATS_PAD = 6, TCA_STATS_BASIC_HW = 7, TCA_STATS_PKT64 = 8, __TCA_STATS_MAX = 9, }; struct gnet_stats_rate_est64 { __u64 bps; __u64 pps; }; struct gnet_stats_basic { __u64 bytes; __u32 packets; }; struct gnet_stats_rate_est { __u32 bps; __u32 pps; }; struct gnet_estimator { signed char interval; unsigned char ewma_log; }; struct pcpu_gen_cookie; struct gen_cookie { struct pcpu_gen_cookie __attribute__((btf_type_tag("percpu"))) *local; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; atomic64_t forward_last; atomic64_t reverse_last; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct pcpu_gen_cookie { local_t nesting; u64 last; }; enum rtnl_link_flags { RTNL_FLAG_DOIT_UNLOCKED = 1, RTNL_FLAG_BULK_DEL_SUPPORTED = 2, }; enum rtnetlink_groups { RTNLGRP_NONE = 0, RTNLGRP_LINK = 1, RTNLGRP_NOTIFY = 2, RTNLGRP_NEIGH = 3, RTNLGRP_TC = 4, RTNLGRP_IPV4_IFADDR = 5, RTNLGRP_IPV4_MROUTE = 6, RTNLGRP_IPV4_ROUTE = 7, RTNLGRP_IPV4_RULE = 8, RTNLGRP_IPV6_IFADDR = 9, RTNLGRP_IPV6_MROUTE = 10, RTNLGRP_IPV6_ROUTE = 11, RTNLGRP_IPV6_IFINFO = 12, RTNLGRP_DECnet_IFADDR = 13, RTNLGRP_NOP2 = 14, RTNLGRP_DECnet_ROUTE = 15, RTNLGRP_DECnet_RULE = 16, RTNLGRP_NOP4 = 17, RTNLGRP_IPV6_PREFIX = 18, RTNLGRP_IPV6_RULE = 19, RTNLGRP_ND_USEROPT = 20, RTNLGRP_PHONET_IFADDR = 21, RTNLGRP_PHONET_ROUTE = 22, RTNLGRP_DCB = 23, RTNLGRP_IPV4_NETCONF = 24, RTNLGRP_IPV6_NETCONF = 25, RTNLGRP_MDB = 26, RTNLGRP_MPLS_ROUTE = 27, RTNLGRP_NSID = 28, RTNLGRP_MPLS_NETCONF = 29, RTNLGRP_IPV4_MROUTE_R = 30, RTNLGRP_IPV6_MROUTE_R = 31, RTNLGRP_NEXTHOP = 32, RTNLGRP_BRVLAN = 33, RTNLGRP_MCTP_IFADDR = 34, RTNLGRP_TUNNEL = 35, RTNLGRP_STATS = 36, __RTNLGRP_MAX = 37, }; enum { NETNSA_NONE = 0, NETNSA_NSID = 1, NETNSA_PID = 2, NETNSA_FD = 3, NETNSA_TARGET_NSID = 4, NETNSA_CURRENT_NSID = 5, __NETNSA_MAX = 6, }; struct net_fill_args { u32 portid; u32 seq; int flags; int cmd; int nsid; bool add_ref; int ref_nsid; }; struct rtnl_net_dump_cb { struct net *tgt_net; struct net *ref_net; struct sk_buff *skb; struct net_fill_args fillargs; int idx; int s_idx; }; typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); struct rtgenmsg { unsigned char rtgen_family; }; struct pppoe_tag { __be16 tag_type; __be16 tag_len; char tag_data[0]; }; struct pppoe_hdr { __u8 type: 4; __u8 ver: 4; __u8 code; __be16 sid; __be16 length; struct pppoe_tag tag[0]; }; enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL = 0, FLOW_DISSECTOR_KEY_BASIC = 1, FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, FLOW_DISSECTOR_KEY_PORTS = 4, FLOW_DISSECTOR_KEY_PORTS_RANGE = 5, FLOW_DISSECTOR_KEY_ICMP = 6, FLOW_DISSECTOR_KEY_ETH_ADDRS = 7, FLOW_DISSECTOR_KEY_TIPC = 8, FLOW_DISSECTOR_KEY_ARP = 9, FLOW_DISSECTOR_KEY_VLAN = 10, FLOW_DISSECTOR_KEY_FLOW_LABEL = 11, FLOW_DISSECTOR_KEY_GRE_KEYID = 12, FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13, FLOW_DISSECTOR_KEY_ENC_KEYID = 14, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16, FLOW_DISSECTOR_KEY_ENC_CONTROL = 17, FLOW_DISSECTOR_KEY_ENC_PORTS = 18, FLOW_DISSECTOR_KEY_MPLS = 19, FLOW_DISSECTOR_KEY_TCP = 20, FLOW_DISSECTOR_KEY_IP = 21, FLOW_DISSECTOR_KEY_CVLAN = 22, FLOW_DISSECTOR_KEY_ENC_IP = 23, FLOW_DISSECTOR_KEY_ENC_OPTS = 24, FLOW_DISSECTOR_KEY_META = 25, FLOW_DISSECTOR_KEY_CT = 26, FLOW_DISSECTOR_KEY_HASH = 27, FLOW_DISSECTOR_KEY_NUM_OF_VLANS = 28, FLOW_DISSECTOR_KEY_PPPOE = 29, FLOW_DISSECTOR_KEY_L2TPV3 = 30, FLOW_DISSECTOR_KEY_CFM = 31, FLOW_DISSECTOR_KEY_IPSEC = 32, FLOW_DISSECTOR_KEY_MAX = 33, }; struct flow_dissector_key { enum flow_dissector_key_id key_id; size_t offset; }; struct nf_conn; struct nf_ct_event { struct nf_conn *ct; u32 portid; int report; }; union nf_inet_addr { __u32 all[4]; __be32 ip; __be32 ip6[4]; struct in_addr in; struct in6_addr in6; }; union nf_conntrack_man_proto { __be16 all; struct { __be16 port; } tcp; struct { __be16 port; } udp; struct { __be16 id; } icmp; struct { __be16 port; } dccp; struct { __be16 port; } sctp; struct { __be16 key; } gre; }; typedef u16 u_int16_t; struct nf_conntrack_man { union nf_inet_addr u3; union nf_conntrack_man_proto u; u_int16_t l3num; }; struct nf_conntrack_tuple { struct nf_conntrack_man src; struct { union nf_inet_addr u3; union { __be16 all; struct { __be16 port; } tcp; struct { __be16 port; } udp; struct { u_int8_t type; u_int8_t code; } icmp; struct { __be16 port; } dccp; struct { __be16 port; } sctp; struct { __be16 key; } gre; } u; u_int8_t protonum; struct {} __nfct_hash_offsetend; u_int8_t dir; } dst; }; struct nf_conntrack_tuple_hash { struct hlist_nulls_node hnnode; struct nf_conntrack_tuple tuple; }; struct nf_ct_dccp { u_int8_t role[2]; u_int8_t state; u_int8_t last_pkt; u_int8_t last_dir; u_int64_t handshake_seq; }; enum sctp_conntrack { SCTP_CONNTRACK_NONE = 0, SCTP_CONNTRACK_CLOSED = 1, SCTP_CONNTRACK_COOKIE_WAIT = 2, SCTP_CONNTRACK_COOKIE_ECHOED = 3, SCTP_CONNTRACK_ESTABLISHED = 4, SCTP_CONNTRACK_SHUTDOWN_SENT = 5, SCTP_CONNTRACK_SHUTDOWN_RECD = 6, SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7, SCTP_CONNTRACK_HEARTBEAT_SENT = 8, SCTP_CONNTRACK_HEARTBEAT_ACKED = 9, SCTP_CONNTRACK_MAX = 10, }; struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[2]; u8 init[2]; u8 last_dir; u8 flags; }; struct ip_ct_tcp_state { u_int32_t td_end; u_int32_t td_maxend; u_int32_t td_maxwin; u_int32_t td_maxack; u_int8_t td_scale; u_int8_t flags; }; struct ip_ct_tcp { struct ip_ct_tcp_state seen[2]; u_int8_t state; u_int8_t last_dir; u_int8_t retrans; u_int8_t last_index; u_int32_t last_seq; u_int32_t last_ack; u_int32_t last_end; u_int16_t last_win; u_int8_t last_wscale; u_int8_t last_flags; }; struct nf_ct_udp { unsigned long stream_ts; }; struct nf_ct_gre { unsigned int stream_timeout; unsigned int timeout; }; union nf_conntrack_proto { struct nf_ct_dccp dccp; struct ip_ct_sctp sctp; struct ip_ct_tcp tcp; struct nf_ct_udp udp; struct nf_ct_gre gre; unsigned int tmpl_padto; }; struct nf_ct_ext; struct nf_conn { struct nf_conntrack ct_general; spinlock_t lock; u32 timeout; struct nf_conntrack_tuple_hash tuplehash[2]; unsigned long status; possible_net_t ct_net; struct hlist_node nat_bysource; struct {} __nfct_init_offset; struct nf_conn *master; u_int32_t mark; u_int32_t secmark; struct nf_ct_ext *ext; union nf_conntrack_proto proto; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_oem_data1; }; struct nf_ct_ext { u8 offset[5]; u8 len; unsigned int gen_id; long: 0; char data[0]; }; struct nf_conntrack_expect; struct nf_exp_event { struct nf_conntrack_expect *exp; u32 portid; int report; }; struct nf_conntrack_tuple_mask { struct { union nf_inet_addr u3; union nf_conntrack_man_proto u; } src; }; enum ip_conntrack_dir { IP_CT_DIR_ORIGINAL = 0, IP_CT_DIR_REPLY = 1, IP_CT_DIR_MAX = 2, }; struct nf_conntrack_helper; struct nf_conntrack_expect { struct hlist_node lnode; struct hlist_node hnode; struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_mask mask; refcount_t use; unsigned int flags; unsigned int class; void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); struct nf_conntrack_helper *helper; struct nf_conn *master; struct timer_list timeout; union nf_inet_addr saved_addr; union nf_conntrack_man_proto saved_proto; enum ip_conntrack_dir dir; struct callback_head rcu; }; struct tcf_walker { int stop; int skip; int count; bool nonempty; unsigned long cookie; int (*fn)(struct tcf_proto *, void *, struct tcf_walker *); }; struct tc_action; struct tcf_exts_miss_cookie_node; struct tcf_exts { __u32 type; int nr_actions; struct tc_action **actions; struct net *net; netns_tracker ns_tracker; struct tcf_exts_miss_cookie_node *miss_cookie_node; int action; int police; }; struct tcf_t { __u64 install; __u64 lastuse; __u64 expires; __u64 firstuse; }; struct tc_action_ops; struct tcf_idrinfo; struct tc_cookie; struct tc_action { const struct tc_action_ops *ops; __u32 type; struct tcf_idrinfo *idrinfo; u32 tcfa_index; refcount_t tcfa_refcnt; atomic_t tcfa_bindcnt; int tcfa_action; struct tcf_t tcfa_tm; long: 64; struct gnet_stats_basic_sync tcfa_bstats; struct gnet_stats_basic_sync tcfa_bstats_hw; struct gnet_stats_queue tcfa_qstats; struct net_rate_estimator __attribute__((btf_type_tag("rcu"))) *tcfa_rate_est; spinlock_t tcfa_lock; struct gnet_stats_basic_sync __attribute__((btf_type_tag("percpu"))) *cpu_bstats; struct gnet_stats_basic_sync __attribute__((btf_type_tag("percpu"))) *cpu_bstats_hw; struct gnet_stats_queue __attribute__((btf_type_tag("percpu"))) *cpu_qstats; struct tc_cookie __attribute__((btf_type_tag("rcu"))) *user_cookie; struct tcf_chain __attribute__((btf_type_tag("rcu"))) *goto_chain; u32 tcfa_flags; u8 hw_stats; u8 used_hw_stats; bool used_hw_stats_valid; u32 in_hw_count; }; enum tca_id { TCA_ID_UNSPEC = 0, TCA_ID_POLICE = 1, TCA_ID_GACT = 5, TCA_ID_IPT = 6, TCA_ID_PEDIT = 7, TCA_ID_MIRRED = 8, TCA_ID_NAT = 9, TCA_ID_XT = 10, TCA_ID_SKBEDIT = 11, TCA_ID_VLAN = 12, TCA_ID_BPF = 13, TCA_ID_CONNMARK = 14, TCA_ID_SKBMOD = 15, TCA_ID_CSUM = 16, TCA_ID_TUNNEL_KEY = 17, TCA_ID_SIMP = 22, TCA_ID_IFE = 25, TCA_ID_SAMPLE = 26, TCA_ID_CTINFO = 27, TCA_ID_MPLS = 28, TCA_ID_CT = 29, TCA_ID_GATE = 30, __TCA_ID_MAX = 255, }; typedef void (*tc_action_priv_destructor)(void *); struct psample_group; struct tc_action_ops { struct list_head head; char kind[16]; enum tca_id id; unsigned int net_id; size_t size; struct module *owner; int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); int (*dump)(struct sk_buff *, struct tc_action *, int, int); void (*cleanup)(struct tc_action *); int (*lookup)(struct net *, struct tc_action **, u32); int (*init)(struct net *, struct nlattr *, struct nlattr *, struct tc_action **, struct tcf_proto *, u32, struct netlink_ext_ack *); int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *); void (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool); size_t (*get_fill_size)(const struct tc_action *); struct net_device * (*get_dev)(const struct tc_action *, tc_action_priv_destructor *); struct psample_group * (*get_psample_group)(const struct tc_action *, tc_action_priv_destructor *); int (*offload_act_setup)(struct tc_action *, void *, u32 *, bool, struct netlink_ext_ack *); }; struct tcf_idrinfo { struct mutex lock; struct idr action_idr; struct net *net; }; struct tc_cookie { u8 *data; u32 len; struct callback_head rcu; }; enum devlink_port_type { DEVLINK_PORT_TYPE_NOTSET = 0, DEVLINK_PORT_TYPE_AUTO = 1, DEVLINK_PORT_TYPE_ETH = 2, DEVLINK_PORT_TYPE_IB = 3, }; enum devlink_port_flavour { DEVLINK_PORT_FLAVOUR_PHYSICAL = 0, DEVLINK_PORT_FLAVOUR_CPU = 1, DEVLINK_PORT_FLAVOUR_DSA = 2, DEVLINK_PORT_FLAVOUR_PCI_PF = 3, DEVLINK_PORT_FLAVOUR_PCI_VF = 4, DEVLINK_PORT_FLAVOUR_VIRTUAL = 5, DEVLINK_PORT_FLAVOUR_UNUSED = 6, DEVLINK_PORT_FLAVOUR_PCI_SF = 7, }; struct devlink_port_phys_attrs { u32 port_number; u32 split_subport_number; }; struct devlink_port_pci_pf_attrs { u32 controller; u16 pf; u8 external: 1; }; struct devlink_port_pci_vf_attrs { u32 controller; u16 pf; u16 vf; u8 external: 1; }; struct devlink_port_pci_sf_attrs { u32 controller; u32 sf; u16 pf; u8 external: 1; }; struct devlink_port_attrs { u8 split: 1; u8 splittable: 1; u32 lanes; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; union { struct devlink_port_phys_attrs phys; struct devlink_port_pci_pf_attrs pci_pf; struct devlink_port_pci_vf_attrs pci_vf; struct devlink_port_pci_sf_attrs pci_sf; }; }; struct devlink; struct devlink_port_ops; struct ib_device; struct devlink_rate; struct devlink_linecard; struct devlink_port { struct list_head list; struct list_head region_list; struct devlink *devlink; const struct devlink_port_ops *ops; unsigned int index; spinlock_t type_lock; enum devlink_port_type type; enum devlink_port_type desired_type; union { struct { struct net_device *netdev; int ifindex; char ifname[16]; } type_eth; struct { struct ib_device *ibdev; } type_ib; }; struct devlink_port_attrs attrs; u8 attrs_set: 1; u8 switch_port: 1; u8 registered: 1; u8 initialized: 1; struct delayed_work type_warn_dw; struct list_head reporter_list; struct devlink_rate *devlink_rate; struct devlink_linecard *linecard; }; enum devlink_port_fn_state { DEVLINK_PORT_FN_STATE_INACTIVE = 0, DEVLINK_PORT_FN_STATE_ACTIVE = 1, }; enum devlink_port_fn_opstate { DEVLINK_PORT_FN_OPSTATE_DETACHED = 0, DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1, }; struct devlink_port_ops { int (*port_split)(struct devlink *, struct devlink_port *, unsigned int, struct netlink_ext_ack *); int (*port_unsplit)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); int (*port_type_set)(struct devlink_port *, enum devlink_port_type); int (*port_del)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); int (*port_fn_hw_addr_get)(struct devlink_port *, u8 *, int *, struct netlink_ext_ack *); int (*port_fn_hw_addr_set)(struct devlink_port *, const u8 *, int, struct netlink_ext_ack *); int (*port_fn_roce_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); int (*port_fn_roce_set)(struct devlink_port *, bool, struct netlink_ext_ack *); int (*port_fn_migratable_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); int (*port_fn_migratable_set)(struct devlink_port *, bool, struct netlink_ext_ack *); int (*port_fn_state_get)(struct devlink_port *, enum devlink_port_fn_state *, enum devlink_port_fn_opstate *, struct netlink_ext_ack *); int (*port_fn_state_set)(struct devlink_port *, enum devlink_port_fn_state, struct netlink_ext_ack *); int (*port_fn_ipsec_crypto_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); int (*port_fn_ipsec_crypto_set)(struct devlink_port *, bool, struct netlink_ext_ack *); int (*port_fn_ipsec_packet_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); int (*port_fn_ipsec_packet_set)(struct devlink_port *, bool, struct netlink_ext_ack *); }; enum devlink_rate_type { DEVLINK_RATE_TYPE_LEAF = 0, DEVLINK_RATE_TYPE_NODE = 1, }; struct devlink_rate { struct list_head list; enum devlink_rate_type type; struct devlink *devlink; void *priv; u64 tx_share; u64 tx_max; struct devlink_rate *parent; union { struct devlink_port *devlink_port; struct { char *name; refcount_t refcnt; }; }; u32 tx_priority; u32 tx_weight; }; enum ip_conntrack_info { IP_CT_ESTABLISHED = 0, IP_CT_RELATED = 1, IP_CT_NEW = 2, IP_CT_IS_REPLY = 3, IP_CT_ESTABLISHED_REPLY = 3, IP_CT_RELATED_REPLY = 4, IP_CT_NUMBER = 5, IP_CT_UNTRACKED = 7, }; enum { TCA_FLOWER_KEY_CT_FLAGS_NEW = 1, TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 2, TCA_FLOWER_KEY_CT_FLAGS_RELATED = 4, TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 8, TCA_FLOWER_KEY_CT_FLAGS_INVALID = 16, TCA_FLOWER_KEY_CT_FLAGS_REPLY = 32, __TCA_FLOWER_KEY_CT_FLAGS_MAX = 33, }; enum flow_dissect_ret { FLOW_DISSECT_RET_OUT_GOOD = 0, FLOW_DISSECT_RET_OUT_BAD = 1, FLOW_DISSECT_RET_PROTO_AGAIN = 2, FLOW_DISSECT_RET_IPPROTO_AGAIN = 3, FLOW_DISSECT_RET_CONTINUE = 4, }; enum bpf_ret_code { BPF_OK = 0, BPF_DROP = 2, BPF_REDIRECT = 7, BPF_LWT_REROUTE = 128, BPF_FLOW_DISSECTOR_CONTINUE = 129, }; enum lwtunnel_encap_types { LWTUNNEL_ENCAP_NONE = 0, LWTUNNEL_ENCAP_MPLS = 1, LWTUNNEL_ENCAP_IP = 2, LWTUNNEL_ENCAP_ILA = 3, LWTUNNEL_ENCAP_IP6 = 4, LWTUNNEL_ENCAP_SEG6 = 5, LWTUNNEL_ENCAP_BPF = 6, LWTUNNEL_ENCAP_SEG6_LOCAL = 7, LWTUNNEL_ENCAP_RPL = 8, LWTUNNEL_ENCAP_IOAM6 = 9, LWTUNNEL_ENCAP_XFRM = 10, __LWTUNNEL_ENCAP_MAX = 11, }; enum batadv_packettype { BATADV_IV_OGM = 0, BATADV_BCAST = 1, BATADV_CODED = 2, BATADV_ELP = 3, BATADV_OGM2 = 4, BATADV_MCAST = 5, BATADV_UNICAST = 64, BATADV_UNICAST_FRAG = 65, BATADV_UNICAST_4ADDR = 66, BATADV_ICMP = 67, BATADV_UNICAST_TVLV = 68, }; struct _flow_keys_digest_data { __be16 n_proto; u8 ip_proto; u8 padding; __be32 ports; __be32 src; __be32 dst; }; union tcp_word_hdr { struct tcphdr hdr; __be32 words[5]; }; struct mpls_label { __be32 entry; }; struct flow_dissector_mpls_lse { u32 mpls_ttl: 8; u32 mpls_bos: 1; u32 mpls_tc: 3; u32 mpls_label: 20; }; struct flow_dissector_key_mpls { struct flow_dissector_mpls_lse ls[7]; u8 used_lses; }; struct flow_dissector_key_keyid { __be32 keyid; }; struct flow_dissector_key_ip { __u8 tos; __u8 ttl; }; struct batadv_unicast_packet { __u8 packet_type; __u8 version; __u8 ttl; __u8 ttvn; __u8 dest[6]; }; struct arphdr { __be16 ar_hrd; __be16 ar_pro; unsigned char ar_hln; unsigned char ar_pln; __be16 ar_op; }; struct flow_dissector_key_arp { __u32 sip; __u32 tip; __u8 op; unsigned char sha[6]; unsigned char tha[6]; }; struct flow_dissector_key_cfm { u8 mdl_ver; u8 opcode; }; struct tipc_basic_hdr { __be32 w[4]; }; struct flow_dissector_key_icmp { struct { u8 type; u8 code; }; u16 id; }; struct icmphdr { __u8 type; __u8 code; __sum16 checksum; union { struct { __be16 id; __be16 sequence; } echo; __be32 gateway; struct { __be16 __unused; __be16 mtu; } frag; __u8 reserved[4]; } un; }; struct gre_base_hdr { __be16 flags; __be16 protocol; }; struct ip_esp_hdr { __be32 spi; __be32 seq_no; __u8 enc_data[0]; }; struct flow_dissector_key_ipsec { __be32 spi; }; struct flow_dissector_key_tcp { __be16 flags; }; struct flow_dissector_key_l2tpv3 { __be32 session_id; }; struct flow_dissector_key_ports { union { __be32 ports; struct { __be16 src; __be16 dst; }; }; }; struct flow_dissector_key_ipv4_addrs { __be32 src; __be32 dst; }; struct flow_dissector_key_ipv6_addrs { struct in6_addr src; struct in6_addr dst; }; struct flow_dissector_key_tipc { __be32 key; }; struct flow_dissector_key_addrs { union { struct flow_dissector_key_ipv4_addrs v4addrs; struct flow_dissector_key_ipv6_addrs v6addrs; struct flow_dissector_key_tipc tipckey; }; }; struct flow_dissector_key_tags { u32 flow_label; }; struct flow_dissector_key_vlan { union { struct { u16 vlan_id: 12; u16 vlan_dei: 1; u16 vlan_priority: 3; }; __be16 vlan_tci; }; __be16 vlan_tpid; __be16 vlan_eth_type; u16 padding; }; struct flow_keys { struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; struct flow_dissector_key_tags tags; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_vlan cvlan; struct flow_dissector_key_keyid keyid; struct flow_dissector_key_ports ports; struct flow_dissector_key_icmp icmp; struct flow_dissector_key_addrs addrs; long: 0; }; struct flow_dissector_key_meta { int ingress_ifindex; u16 ingress_iftype; u8 l2_miss; }; struct nf_conn_labels { unsigned long bits[2]; }; struct flow_dissector_key_ct { u16 ct_state; u16 ct_zone; u32 ct_mark; u32 ct_labels[4]; }; struct flow_dissector_key_enc_opts { u8 data[255]; u8 len; __be16 dst_opt_type; }; struct flow_dissector_key_hash { u32 hash; }; struct clock_identity { u8 id[8]; }; struct port_identity { struct clock_identity clock_identity; __be16 port_number; }; struct ptp_header { u8 tsmt; u8 ver; __be16 message_length; u8 domain_number; u8 reserved1; u8 flag_field[2]; __be64 correction; __be32 reserved2; struct port_identity source_port_identity; __be16 sequence_id; u8 control; u8 log_message_interval; } __attribute__((packed)); struct hsr_tag { __be16 path_and_LSDU_size; __be16 sequence_nr; __be16 encap_proto; }; struct flow_dissector_key_eth_addrs { unsigned char dst[6]; unsigned char src[6]; }; struct flow_dissector_key_num_of_vlans { u8 num_of_vlans; }; struct flow_dissector_key_pppoe { __be16 session_id; __be16 ppp_proto; __be16 type; }; struct flow_keys_digest { u8 data[16]; }; struct rps_map; struct rps_dev_flow_table; struct netdev_rx_queue { struct xdp_rxq_info xdp_rxq; struct rps_map __attribute__((btf_type_tag("rcu"))) *rps_map; struct rps_dev_flow_table __attribute__((btf_type_tag("rcu"))) *rps_flow_table; struct kobject kobj; struct net_device *dev; netdevice_tracker dev_tracker; struct xsk_buff_pool *pool; u64 android_kabi_reserved1; u64 android_kabi_reserved2; u64 android_kabi_reserved3; u64 android_kabi_reserved4; long: 64; long: 64; long: 64; long: 64; }; struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0]; }; struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; }; struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0]; }; struct bpf_xdp_link { struct bpf_link link; struct net_device *dev; int flags; }; enum xps_map_type { XPS_CPUS = 0, XPS_RXQS = 1, XPS_MAPS_MAX = 2, }; enum qdisc_state_t { __QDISC_STATE_SCHED = 0, __QDISC_STATE_DEACTIVATED = 1, __QDISC_STATE_MISSED = 2, __QDISC_STATE_DRAINING = 3, }; enum { NAPIF_STATE_SCHED = 1, NAPIF_STATE_MISSED = 2, NAPIF_STATE_DISABLE = 4, NAPIF_STATE_NPSVC = 8, NAPIF_STATE_LISTED = 16, NAPIF_STATE_NO_BUSY_POLL = 32, NAPIF_STATE_IN_BUSY_POLL = 64, NAPIF_STATE_PREFER_BUSY_POLL = 128, NAPIF_STATE_THREADED = 256, NAPIF_STATE_SCHED_THREADED = 512, }; enum { LINUX_MIB_NUM = 0, LINUX_MIB_SYNCOOKIESSENT = 1, LINUX_MIB_SYNCOOKIESRECV = 2, LINUX_MIB_SYNCOOKIESFAILED = 3, LINUX_MIB_EMBRYONICRSTS = 4, LINUX_MIB_PRUNECALLED = 5, LINUX_MIB_RCVPRUNED = 6, LINUX_MIB_OFOPRUNED = 7, LINUX_MIB_OUTOFWINDOWICMPS = 8, LINUX_MIB_LOCKDROPPEDICMPS = 9, LINUX_MIB_ARPFILTER = 10, LINUX_MIB_TIMEWAITED = 11, LINUX_MIB_TIMEWAITRECYCLED = 12, LINUX_MIB_TIMEWAITKILLED = 13, LINUX_MIB_PAWSACTIVEREJECTED = 14, LINUX_MIB_PAWSESTABREJECTED = 15, LINUX_MIB_DELAYEDACKS = 16, LINUX_MIB_DELAYEDACKLOCKED = 17, LINUX_MIB_DELAYEDACKLOST = 18, LINUX_MIB_LISTENOVERFLOWS = 19, LINUX_MIB_LISTENDROPS = 20, LINUX_MIB_TCPHPHITS = 21, LINUX_MIB_TCPPUREACKS = 22, LINUX_MIB_TCPHPACKS = 23, LINUX_MIB_TCPRENORECOVERY = 24, LINUX_MIB_TCPSACKRECOVERY = 25, LINUX_MIB_TCPSACKRENEGING = 26, LINUX_MIB_TCPSACKREORDER = 27, LINUX_MIB_TCPRENOREORDER = 28, LINUX_MIB_TCPTSREORDER = 29, LINUX_MIB_TCPFULLUNDO = 30, LINUX_MIB_TCPPARTIALUNDO = 31, LINUX_MIB_TCPDSACKUNDO = 32, LINUX_MIB_TCPLOSSUNDO = 33, LINUX_MIB_TCPLOSTRETRANSMIT = 34, LINUX_MIB_TCPRENOFAILURES = 35, LINUX_MIB_TCPSACKFAILURES = 36, LINUX_MIB_TCPLOSSFAILURES = 37, LINUX_MIB_TCPFASTRETRANS = 38, LINUX_MIB_TCPSLOWSTARTRETRANS = 39, LINUX_MIB_TCPTIMEOUTS = 40, LINUX_MIB_TCPLOSSPROBES = 41, LINUX_MIB_TCPLOSSPROBERECOVERY = 42, LINUX_MIB_TCPRENORECOVERYFAIL = 43, LINUX_MIB_TCPSACKRECOVERYFAIL = 44, LINUX_MIB_TCPRCVCOLLAPSED = 45, LINUX_MIB_TCPDSACKOLDSENT = 46, LINUX_MIB_TCPDSACKOFOSENT = 47, LINUX_MIB_TCPDSACKRECV = 48, LINUX_MIB_TCPDSACKOFORECV = 49, LINUX_MIB_TCPABORTONDATA = 50, LINUX_MIB_TCPABORTONCLOSE = 51, LINUX_MIB_TCPABORTONMEMORY = 52, LINUX_MIB_TCPABORTONTIMEOUT = 53, LINUX_MIB_TCPABORTONLINGER = 54, LINUX_MIB_TCPABORTFAILED = 55, LINUX_MIB_TCPMEMORYPRESSURES = 56, LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57, LINUX_MIB_TCPSACKDISCARD = 58, LINUX_MIB_TCPDSACKIGNOREDOLD = 59, LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60, LINUX_MIB_TCPSPURIOUSRTOS = 61, LINUX_MIB_TCPMD5NOTFOUND = 62, LINUX_MIB_TCPMD5UNEXPECTED = 63, LINUX_MIB_TCPMD5FAILURE = 64, LINUX_MIB_SACKSHIFTED = 65, LINUX_MIB_SACKMERGED = 66, LINUX_MIB_SACKSHIFTFALLBACK = 67, LINUX_MIB_TCPBACKLOGDROP = 68, LINUX_MIB_PFMEMALLOCDROP = 69, LINUX_MIB_TCPMINTTLDROP = 70, LINUX_MIB_TCPDEFERACCEPTDROP = 71, LINUX_MIB_IPRPFILTER = 72, LINUX_MIB_TCPTIMEWAITOVERFLOW = 73, LINUX_MIB_TCPREQQFULLDOCOOKIES = 74, LINUX_MIB_TCPREQQFULLDROP = 75, LINUX_MIB_TCPRETRANSFAIL = 76, LINUX_MIB_TCPRCVCOALESCE = 77, LINUX_MIB_TCPBACKLOGCOALESCE = 78, LINUX_MIB_TCPOFOQUEUE = 79, LINUX_MIB_TCPOFODROP = 80, LINUX_MIB_TCPOFOMERGE = 81, LINUX_MIB_TCPCHALLENGEACK = 82, LINUX_MIB_TCPSYNCHALLENGE = 83, LINUX_MIB_TCPFASTOPENACTIVE = 84, LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85, LINUX_MIB_TCPFASTOPENPASSIVE = 86, LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87, LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88, LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89, LINUX_MIB_TCPFASTOPENBLACKHOLE = 90, LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91, LINUX_MIB_BUSYPOLLRXPACKETS = 92, LINUX_MIB_TCPAUTOCORKING = 93, LINUX_MIB_TCPFROMZEROWINDOWADV = 94, LINUX_MIB_TCPTOZEROWINDOWADV = 95, LINUX_MIB_TCPWANTZEROWINDOWADV = 96, LINUX_MIB_TCPSYNRETRANS = 97, LINUX_MIB_TCPORIGDATASENT = 98, LINUX_MIB_TCPHYSTARTTRAINDETECT = 99, LINUX_MIB_TCPHYSTARTTRAINCWND = 100, LINUX_MIB_TCPHYSTARTDELAYDETECT = 101, LINUX_MIB_TCPHYSTARTDELAYCWND = 102, LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103, LINUX_MIB_TCPACKSKIPPEDPAWS = 104, LINUX_MIB_TCPACKSKIPPEDSEQ = 105, LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107, LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108, LINUX_MIB_TCPWINPROBE = 109, LINUX_MIB_TCPKEEPALIVE = 110, LINUX_MIB_TCPMTUPFAIL = 111, LINUX_MIB_TCPMTUPSUCCESS = 112, LINUX_MIB_TCPDELIVERED = 113, LINUX_MIB_TCPDELIVEREDCE = 114, LINUX_MIB_TCPACKCOMPRESSED = 115, LINUX_MIB_TCPZEROWINDOWDROP = 116, LINUX_MIB_TCPRCVQDROP = 117, LINUX_MIB_TCPWQUEUETOOBIG = 118, LINUX_MIB_TCPFASTOPENPASSIVEALTKEY = 119, LINUX_MIB_TCPTIMEOUTREHASH = 120, LINUX_MIB_TCPDUPLICATEDATAREHASH = 121, LINUX_MIB_TCPDSACKRECVSEGS = 122, LINUX_MIB_TCPDSACKIGNOREDDUBIOUS = 123, LINUX_MIB_TCPMIGRATEREQSUCCESS = 124, LINUX_MIB_TCPMIGRATEREQFAILURE = 125, LINUX_MIB_TCPPLBREHASH = 126, __LINUX_MIB_MAX = 127, }; enum netdev_offload_xstats_type { NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, }; enum bpf_xdp_mode { XDP_MODE_SKB = 0, XDP_MODE_DRV = 1, XDP_MODE_HW = 2, __MAX_XDP_MODE = 3, }; enum { IF_OPER_UNKNOWN = 0, IF_OPER_NOTPRESENT = 1, IF_OPER_DOWN = 2, IF_OPER_LOWERLAYERDOWN = 3, IF_OPER_TESTING = 4, IF_OPER_DORMANT = 5, IF_OPER_UP = 6, }; enum { NFPROTO_UNSPEC = 0, NFPROTO_INET = 1, NFPROTO_IPV4 = 2, NFPROTO_ARP = 3, NFPROTO_NETDEV = 5, NFPROTO_BRIDGE = 7, NFPROTO_IPV6 = 10, NFPROTO_NUMPROTO = 11, }; enum nf_dev_hooks { NF_NETDEV_INGRESS = 0, NF_NETDEV_EGRESS = 1, NF_NETDEV_NUMHOOKS = 2, }; enum tcx_action_base { TCX_NEXT = -1, TCX_PASS = 0, TCX_DROP = 2, TCX_REDIRECT = 7, }; enum qdisc_state2_t { __QDISC_STATE2_RUNNING = 0, }; struct netdev_adjacent { struct net_device *dev; netdevice_tracker dev_tracker; bool master; bool ignore; u16 ref_nr; void *private; struct list_head list; struct callback_head rcu; }; struct dev_kfree_skb_cb { enum skb_drop_reason reason; }; struct tc_skb_cb { struct qdisc_skb_cb qdisc_cb; u16 mru; u8 post_ct: 1; u8 post_ct_snat: 1; u8 post_ct_dnat: 1; u16 zone; }; struct netdev_net_notifier { struct list_head list; struct notifier_block *nb; }; struct net_device_path_stack { int num_paths; struct net_device_path path[5]; }; struct netdev_nested_priv { unsigned char flags; void *data; }; struct netdev_notifier_offload_xstats_rd; struct netdev_notifier_offload_xstats_ru; struct netdev_notifier_offload_xstats_info { struct netdev_notifier_info info; enum netdev_offload_xstats_type type; union { struct netdev_notifier_offload_xstats_rd *report_delta; struct netdev_notifier_offload_xstats_ru *report_used; }; }; struct netdev_notifier_offload_xstats_rd { struct rtnl_hw_stats64 stats; bool used; }; struct netdev_notifier_offload_xstats_ru { bool used; }; struct netdev_notifier_pre_changeaddr_info { struct netdev_notifier_info info; const unsigned char *dev_addr; }; typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *); struct netdev_notifier_change_info { struct netdev_notifier_info info; unsigned int flags_changed; }; struct netdev_notifier_changeupper_info { struct netdev_notifier_info info; struct net_device *upper_dev; bool master; bool linking; void *upper_info; }; struct ifslave { __s32 slave_id; char slave_name[16]; __s8 link; __s8 state; __u32 link_failure_count; }; typedef struct ifslave ifslave; struct ifbond { __s32 bond_mode; __s32 num_slaves; __s32 miimon; }; typedef struct ifbond ifbond; struct netdev_bonding_info { ifslave slave; ifbond master; }; struct netdev_notifier_bonding_info { struct netdev_notifier_info info; struct netdev_bonding_info bonding_info; }; struct netdev_notifier_changelowerstate_info { struct netdev_notifier_info info; void *lower_state_info; }; struct netdev_notifier_info_ext { struct netdev_notifier_info info; union { u32 mtu; } ext; }; struct netdev_hw_addr { struct list_head list; struct rb_node node; unsigned char addr[32]; unsigned char type; bool global_use; int sync_cnt; int refcount; int synced; struct callback_head callback_head; }; struct xfrm_dst { union { struct dst_entry dst; struct rtable rt; struct rt6_info rt6; } u; struct dst_entry *route; struct dst_entry *child; struct dst_entry *path; struct xfrm_policy *pols[2]; int num_pols; int num_xfrms; u32 xfrm_genid; u32 policy_genid; u32 route_mtu_cached; u32 child_mtu_cached; u32 route_cookie; u32 path_cookie; }; struct neigh_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table neigh_vars[22]; }; enum { NEIGH_VAR_MCAST_PROBES = 0, NEIGH_VAR_UCAST_PROBES = 1, NEIGH_VAR_APP_PROBES = 2, NEIGH_VAR_MCAST_REPROBES = 3, NEIGH_VAR_RETRANS_TIME = 4, NEIGH_VAR_BASE_REACHABLE_TIME = 5, NEIGH_VAR_DELAY_PROBE_TIME = 6, NEIGH_VAR_INTERVAL_PROBE_TIME_MS = 7, NEIGH_VAR_GC_STALETIME = 8, NEIGH_VAR_QUEUE_LEN_BYTES = 9, NEIGH_VAR_PROXY_QLEN = 10, NEIGH_VAR_ANYCAST_DELAY = 11, NEIGH_VAR_PROXY_DELAY = 12, NEIGH_VAR_LOCKTIME = 13, NEIGH_VAR_QUEUE_LEN = 14, NEIGH_VAR_RETRANS_TIME_MS = 15, NEIGH_VAR_BASE_REACHABLE_TIME_MS = 16, NEIGH_VAR_GC_INTERVAL = 17, NEIGH_VAR_GC_THRESH1 = 18, NEIGH_VAR_GC_THRESH2 = 19, NEIGH_VAR_GC_THRESH3 = 20, NEIGH_VAR_MAX = 21, }; enum { NEIGH_ARP_TABLE = 0, NEIGH_ND_TABLE = 1, NEIGH_DN_TABLE = 2, NEIGH_NR_TABLES = 3, NEIGH_LINK_TABLE = 3, }; enum netevent_notif_type { NETEVENT_NEIGH_UPDATE = 1, NETEVENT_REDIRECT = 2, NETEVENT_DELAY_PROBE_TIME_UPDATE = 3, NETEVENT_IPV4_MPATH_HASH_UPDATE = 4, NETEVENT_IPV6_MPATH_HASH_UPDATE = 5, NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6, }; enum { NDA_UNSPEC = 0, NDA_DST = 1, NDA_LLADDR = 2, NDA_CACHEINFO = 3, NDA_PROBES = 4, NDA_VLAN = 5, NDA_PORT = 6, NDA_VNI = 7, NDA_IFINDEX = 8, NDA_MASTER = 9, NDA_LINK_NETNSID = 10, NDA_SRC_VNI = 11, NDA_PROTOCOL = 12, NDA_NH_ID = 13, NDA_FDB_EXT_ATTRS = 14, NDA_FLAGS_EXT = 15, NDA_NDM_STATE_MASK = 16, NDA_NDM_FLAGS_MASK = 17, __NDA_MAX = 18, }; enum { RTN_UNSPEC = 0, RTN_UNICAST = 1, RTN_LOCAL = 2, RTN_BROADCAST = 3, RTN_ANYCAST = 4, RTN_MULTICAST = 5, RTN_BLACKHOLE = 6, RTN_UNREACHABLE = 7, RTN_PROHIBIT = 8, RTN_THROW = 9, RTN_NAT = 10, RTN_XRESOLVE = 11, __RTN_MAX = 12, }; enum { NDTA_UNSPEC = 0, NDTA_NAME = 1, NDTA_THRESH1 = 2, NDTA_THRESH2 = 3, NDTA_THRESH3 = 4, NDTA_CONFIG = 5, NDTA_PARMS = 6, NDTA_STATS = 7, NDTA_GC_INTERVAL = 8, NDTA_PAD = 9, __NDTA_MAX = 10, }; enum { NDTPA_UNSPEC = 0, NDTPA_IFINDEX = 1, NDTPA_REFCNT = 2, NDTPA_REACHABLE_TIME = 3, NDTPA_BASE_REACHABLE_TIME = 4, NDTPA_RETRANS_TIME = 5, NDTPA_GC_STALETIME = 6, NDTPA_DELAY_PROBE_TIME = 7, NDTPA_QUEUE_LEN = 8, NDTPA_APP_PROBES = 9, NDTPA_UCAST_PROBES = 10, NDTPA_MCAST_PROBES = 11, NDTPA_ANYCAST_DELAY = 12, NDTPA_PROXY_DELAY = 13, NDTPA_PROXY_QLEN = 14, NDTPA_LOCKTIME = 15, NDTPA_QUEUE_LENBYTES = 16, NDTPA_MCAST_REPROBES = 17, NDTPA_PAD = 18, NDTPA_INTERVAL_PROBE_TIME_MS = 19, __NDTPA_MAX = 20, }; struct neighbour_cb { unsigned long sched_next; unsigned int flags; }; struct neigh_seq_state { struct seq_net_private p; struct neigh_table *tbl; struct neigh_hash_table *nht; void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *); unsigned int bucket; unsigned int flags; }; struct neigh_dump_filter { int master_idx; int dev_idx; }; struct ndtmsg { __u8 ndtm_family; __u8 ndtm_pad1; __u16 ndtm_pad2; }; struct ndt_config { __u16 ndtc_key_len; __u16 ndtc_entry_size; __u32 ndtc_entries; __u32 ndtc_last_flush; __u32 ndtc_last_rand; __u32 ndtc_hash_rnd; __u32 ndtc_hash_mask; __u32 ndtc_hash_chain_gc; __u32 ndtc_proxy_qlen; }; struct ndt_stats { __u64 ndts_allocs; __u64 ndts_destroys; __u64 ndts_hash_grows; __u64 ndts_res_failed; __u64 ndts_lookups; __u64 ndts_hits; __u64 ndts_rcv_probes_mcast; __u64 ndts_rcv_probes_ucast; __u64 ndts_periodic_gc_runs; __u64 ndts_forced_gc_runs; __u64 ndts_table_fulls; }; struct nda_cacheinfo { __u32 ndm_confirmed; __u32 ndm_used; __u32 ndm_updated; __u32 ndm_refcnt; }; struct rtnl_link { rtnl_doit_func doit; rtnl_dumpit_func dumpit; struct module *owner; unsigned int flags; struct callback_head rcu; }; enum rtattr_type_t { RTA_UNSPEC = 0, RTA_DST = 1, RTA_SRC = 2, RTA_IIF = 3, RTA_OIF = 4, RTA_GATEWAY = 5, RTA_PRIORITY = 6, RTA_PREFSRC = 7, RTA_METRICS = 8, RTA_MULTIPATH = 9, RTA_PROTOINFO = 10, RTA_FLOW = 11, RTA_CACHEINFO = 12, RTA_SESSION = 13, RTA_MP_ALGO = 14, RTA_TABLE = 15, RTA_MARK = 16, RTA_MFC_STATS = 17, RTA_VIA = 18, RTA_NEWDST = 19, RTA_PREF = 20, RTA_ENCAP_TYPE = 21, RTA_ENCAP = 22, RTA_EXPIRES = 23, RTA_PAD = 24, RTA_UID = 25, RTA_TTL_PROPAGATE = 26, RTA_IP_PROTO = 27, RTA_SPORT = 28, RTA_DPORT = 29, RTA_NH_ID = 30, __RTA_MAX = 31, }; enum { IFLA_BRIDGE_FLAGS = 0, IFLA_BRIDGE_MODE = 1, IFLA_BRIDGE_VLAN_INFO = 2, IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3, IFLA_BRIDGE_MRP = 4, IFLA_BRIDGE_CFM = 5, IFLA_BRIDGE_MST = 6, __IFLA_BRIDGE_MAX = 7, }; enum { IFLA_BRPORT_UNSPEC = 0, IFLA_BRPORT_STATE = 1, IFLA_BRPORT_PRIORITY = 2, IFLA_BRPORT_COST = 3, IFLA_BRPORT_MODE = 4, IFLA_BRPORT_GUARD = 5, IFLA_BRPORT_PROTECT = 6, IFLA_BRPORT_FAST_LEAVE = 7, IFLA_BRPORT_LEARNING = 8, IFLA_BRPORT_UNICAST_FLOOD = 9, IFLA_BRPORT_PROXYARP = 10, IFLA_BRPORT_LEARNING_SYNC = 11, IFLA_BRPORT_PROXYARP_WIFI = 12, IFLA_BRPORT_ROOT_ID = 13, IFLA_BRPORT_BRIDGE_ID = 14, IFLA_BRPORT_DESIGNATED_PORT = 15, IFLA_BRPORT_DESIGNATED_COST = 16, IFLA_BRPORT_ID = 17, IFLA_BRPORT_NO = 18, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19, IFLA_BRPORT_CONFIG_PENDING = 20, IFLA_BRPORT_MESSAGE_AGE_TIMER = 21, IFLA_BRPORT_FORWARD_DELAY_TIMER = 22, IFLA_BRPORT_HOLD_TIMER = 23, IFLA_BRPORT_FLUSH = 24, IFLA_BRPORT_MULTICAST_ROUTER = 25, IFLA_BRPORT_PAD = 26, IFLA_BRPORT_MCAST_FLOOD = 27, IFLA_BRPORT_MCAST_TO_UCAST = 28, IFLA_BRPORT_VLAN_TUNNEL = 29, IFLA_BRPORT_BCAST_FLOOD = 30, IFLA_BRPORT_GROUP_FWD_MASK = 31, IFLA_BRPORT_NEIGH_SUPPRESS = 32, IFLA_BRPORT_ISOLATED = 33, IFLA_BRPORT_BACKUP_PORT = 34, IFLA_BRPORT_MRP_RING_OPEN = 35, IFLA_BRPORT_MRP_IN_OPEN = 36, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 37, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 38, IFLA_BRPORT_LOCKED = 39, IFLA_BRPORT_MAB = 40, IFLA_BRPORT_MCAST_N_GROUPS = 41, IFLA_BRPORT_MCAST_MAX_GROUPS = 42, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 43, IFLA_BRPORT_BACKUP_NHID = 44, __IFLA_BRPORT_MAX = 45, }; enum { IFLA_STATS_UNSPEC = 0, IFLA_STATS_LINK_64 = 1, IFLA_STATS_LINK_XSTATS = 2, IFLA_STATS_LINK_XSTATS_SLAVE = 3, IFLA_STATS_LINK_OFFLOAD_XSTATS = 4, IFLA_STATS_AF_SPEC = 5, __IFLA_STATS_MAX = 6, }; enum { IFLA_OFFLOAD_XSTATS_UNSPEC = 0, IFLA_OFFLOAD_XSTATS_CPU_HIT = 1, IFLA_OFFLOAD_XSTATS_HW_S_INFO = 2, IFLA_OFFLOAD_XSTATS_L3_STATS = 3, __IFLA_OFFLOAD_XSTATS_MAX = 4, }; enum rtnl_kinds { RTNL_KIND_NEW = 0, RTNL_KIND_DEL = 1, RTNL_KIND_GET = 2, RTNL_KIND_SET = 3, }; enum { IFLA_EVENT_NONE = 0, IFLA_EVENT_REBOOT = 1, IFLA_EVENT_FEATURES = 2, IFLA_EVENT_BONDING_FAILOVER = 3, IFLA_EVENT_NOTIFY_PEERS = 4, IFLA_EVENT_IGMP_RESEND = 5, IFLA_EVENT_BONDING_OPTIONS = 6, }; enum { IFLA_PROTO_DOWN_REASON_UNSPEC = 0, IFLA_PROTO_DOWN_REASON_MASK = 1, IFLA_PROTO_DOWN_REASON_VALUE = 2, __IFLA_PROTO_DOWN_REASON_CNT = 3, IFLA_PROTO_DOWN_REASON_MAX = 2, }; enum { IFLA_VF_INFO_UNSPEC = 0, IFLA_VF_INFO = 1, __IFLA_VF_INFO_MAX = 2, }; enum { IFLA_VF_UNSPEC = 0, IFLA_VF_MAC = 1, IFLA_VF_VLAN = 2, IFLA_VF_TX_RATE = 3, IFLA_VF_SPOOFCHK = 4, IFLA_VF_LINK_STATE = 5, IFLA_VF_RATE = 6, IFLA_VF_RSS_QUERY_EN = 7, IFLA_VF_STATS = 8, IFLA_VF_TRUST = 9, IFLA_VF_IB_NODE_GUID = 10, IFLA_VF_IB_PORT_GUID = 11, IFLA_VF_VLAN_LIST = 12, IFLA_VF_BROADCAST = 13, __IFLA_VF_MAX = 14, }; enum { IFLA_VF_VLAN_INFO_UNSPEC = 0, IFLA_VF_VLAN_INFO = 1, __IFLA_VF_VLAN_INFO_MAX = 2, }; enum { IFLA_VF_STATS_RX_PACKETS = 0, IFLA_VF_STATS_TX_PACKETS = 1, IFLA_VF_STATS_RX_BYTES = 2, IFLA_VF_STATS_TX_BYTES = 3, IFLA_VF_STATS_BROADCAST = 4, IFLA_VF_STATS_MULTICAST = 5, IFLA_VF_STATS_PAD = 6, IFLA_VF_STATS_RX_DROPPED = 7, IFLA_VF_STATS_TX_DROPPED = 8, __IFLA_VF_STATS_MAX = 9, }; enum { IFLA_VF_PORT_UNSPEC = 0, IFLA_VF_PORT = 1, __IFLA_VF_PORT_MAX = 2, }; enum { IFLA_PORT_UNSPEC = 0, IFLA_PORT_VF = 1, IFLA_PORT_PROFILE = 2, IFLA_PORT_VSI_TYPE = 3, IFLA_PORT_INSTANCE_UUID = 4, IFLA_PORT_HOST_UUID = 5, IFLA_PORT_REQUEST = 6, IFLA_PORT_RESPONSE = 7, __IFLA_PORT_MAX = 8, }; enum { XDP_ATTACHED_NONE = 0, XDP_ATTACHED_DRV = 1, XDP_ATTACHED_SKB = 2, XDP_ATTACHED_HW = 3, XDP_ATTACHED_MULTI = 4, }; enum { IFLA_XDP_UNSPEC = 0, IFLA_XDP_FD = 1, IFLA_XDP_ATTACHED = 2, IFLA_XDP_FLAGS = 3, IFLA_XDP_PROG_ID = 4, IFLA_XDP_DRV_PROG_ID = 5, IFLA_XDP_SKB_PROG_ID = 6, IFLA_XDP_HW_PROG_ID = 7, IFLA_XDP_EXPECTED_FD = 8, __IFLA_XDP_MAX = 9, }; enum { IFLA_INFO_UNSPEC = 0, IFLA_INFO_KIND = 1, IFLA_INFO_DATA = 2, IFLA_INFO_XSTATS = 3, IFLA_INFO_SLAVE_KIND = 4, IFLA_INFO_SLAVE_DATA = 5, __IFLA_INFO_MAX = 6, }; enum { IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 1, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 2, __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX = 3, }; enum { IFLA_STATS_GETSET_UNSPEC = 0, IFLA_STATS_GET_FILTERS = 1, IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 2, __IFLA_STATS_GETSET_MAX = 3, }; enum { MDBA_SET_ENTRY_UNSPEC = 0, MDBA_SET_ENTRY = 1, MDBA_SET_ENTRY_ATTRS = 2, __MDBA_SET_ENTRY_MAX = 3, }; struct rtnl_af_ops { struct list_head list; int family; int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32); size_t (*get_link_af_size)(const struct net_device *, u32); int (*validate_link_af)(const struct net_device *, const struct nlattr *, struct netlink_ext_ack *); int (*set_link_af)(struct net_device *, const struct nlattr *, struct netlink_ext_ack *); int (*fill_stats_af)(struct sk_buff *, const struct net_device *); size_t (*get_stats_af_size)(const struct net_device *); }; struct rtnl_offload_xstats_request_used { bool request; bool used; }; struct rtnl_newlink_tbs { struct nlattr *tb[65]; struct nlattr *attr[51]; struct nlattr *slave_attr[45]; }; struct if_stats_msg { __u8 family; __u8 pad1; __u16 pad2; __u32 ifindex; __u32 filter_mask; }; struct br_port_msg { __u8 family; __u32 ifindex; }; struct rtnl_link_stats { __u32 rx_packets; __u32 tx_packets; __u32 rx_bytes; __u32 tx_bytes; __u32 rx_errors; __u32 tx_errors; __u32 rx_dropped; __u32 tx_dropped; __u32 multicast; __u32 collisions; __u32 rx_length_errors; __u32 rx_over_errors; __u32 rx_crc_errors; __u32 rx_frame_errors; __u32 rx_fifo_errors; __u32 rx_missed_errors; __u32 tx_aborted_errors; __u32 tx_carrier_errors; __u32 tx_fifo_errors; __u32 tx_heartbeat_errors; __u32 tx_window_errors; __u32 rx_compressed; __u32 tx_compressed; __u32 rx_nohandler; }; struct netlink_dump_control { int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); struct netlink_ext_ack *extack; void *data; struct module *module; u32 min_dump_alloc; }; struct ifla_vf_mac { __u32 vf; __u8 mac[32]; }; struct ifla_vf_vlan { __u32 vf; __u32 vlan; __u32 qos; }; struct ifla_vf_vlan_info { __u32 vf; __u32 vlan; __u32 qos; __be16 vlan_proto; }; struct ifla_vf_tx_rate { __u32 vf; __u32 rate; }; struct ifla_vf_rate { __u32 vf; __u32 min_tx_rate; __u32 max_tx_rate; }; struct ifla_vf_spoofchk { __u32 vf; __u32 setting; }; struct ifla_vf_link_state { __u32 vf; __u32 link_state; }; struct ifla_vf_rss_query_en { __u32 vf; __u32 setting; }; struct ifla_vf_trust { __u32 vf; __u32 setting; }; struct rtnl_stats_dump_filters { u32 mask[6]; }; struct rta_cacheinfo { __u32 rta_clntref; __u32 rta_lastuse; __s32 rta_expires; __u32 rta_error; __u32 rta_used; __u32 rta_id; __u32 rta_ts; __u32 rta_tsage; }; struct rtnl_mdb_dump_ctx { long idx; }; struct rtnl_link_ifmap { __u64 mem_start; __u64 mem_end; __u64 base_addr; __u16 irq; __u8 dma; __u8 port; }; struct ifla_vf_broadcast { __u8 broadcast[32]; }; struct br_mdb_entry { __u32 ifindex; __u8 state; __u8 flags; __u16 vid; struct { union { __be32 ip4; struct in6_addr ip6; unsigned char mac_addr[6]; } u; __be16 proto; } addr; }; enum { IF_LINK_MODE_DEFAULT = 0, IF_LINK_MODE_DORMANT = 1, IF_LINK_MODE_TESTING = 2, }; enum lw_bits { LW_URGENT = 0, }; struct xdp_umem; struct xsk_queue; struct xdp_buff_xsk; struct xdp_desc; struct xsk_buff_pool { struct device *dev; struct net_device *netdev; struct list_head xsk_tx_list; spinlock_t xsk_tx_list_lock; refcount_t users; struct xdp_umem *umem; struct work_struct work; struct list_head free_list; struct list_head xskb_list; u32 heads_cnt; u16 queue_id; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct xsk_queue *fq; struct xsk_queue *cq; dma_addr_t *dma_pages; struct xdp_buff_xsk *heads; struct xdp_desc *tx_descs; u64 chunk_mask; u64 addrs_cnt; u32 free_list_cnt; u32 dma_pages_cnt; u32 free_heads_cnt; u32 headroom; u32 chunk_size; u32 chunk_shift; u32 frame_len; u8 cached_need_wakeup; bool uses_need_wakeup; bool dma_need_sync; bool unaligned; void *addrs; spinlock_t cq_lock; struct xdp_buff_xsk *free_heads[0]; long: 64; long: 64; long: 64; }; struct xdp_umem { void *addrs; u64 size; u32 headroom; u32 chunk_size; u32 chunks; u32 npgs; struct user_struct *user; refcount_t users; u8 flags; bool zc; struct page **pgs; int id; struct list_head xsk_dma_list; struct work_struct work; }; struct xdp_buff_xsk { struct xdp_buff xdp; u8 cb[24]; dma_addr_t dma; dma_addr_t frame_dma; struct xsk_buff_pool *pool; u64 orig_addr; struct list_head free_list_node; struct list_head xskb_list_node; }; struct xdp_desc { __u64 addr; __u32 len; __u32 options; }; struct seg6_pernet_data { struct mutex lock; struct in6_addr __attribute__((btf_type_tag("rcu"))) *tun_src; }; struct ipv6_bpf_stub { int (*inet6_bind)(struct sock *, struct sockaddr *, int, u32); struct sock * (*udp6_lib_lookup)(struct net *, const struct in6_addr *, __be16, const struct in6_addr *, __be16, int, int, struct udp_table *, struct sk_buff *); int (*ipv6_setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); int (*ipv6_getsockopt)(struct sock *, int, int, sockptr_t, sockptr_t); int (*ipv6_dev_get_saddr)(struct net *, const struct net_device *, const struct in6_addr *, unsigned int, struct in6_addr *); }; struct bpf_scratchpad { union { __be32 diff[128]; u8 buff[512]; }; }; enum { BPF_F_NEIGH = 2, BPF_F_PEER = 4, BPF_F_NEXTHOP = 8, }; enum { BPF_F_RECOMPUTE_CSUM = 1, BPF_F_INVALIDATE_HASH = 2, }; enum bpf_hdr_start_off { BPF_HDR_START_MAC = 0, BPF_HDR_START_NET = 1, }; enum { BPF_F_HDR_FIELD_MASK = 15, }; enum { BPF_F_PSEUDO_HDR = 16, BPF_F_MARK_MANGLED_0 = 32, BPF_F_MARK_ENFORCE = 64, }; enum { BPF_CSUM_LEVEL_QUERY = 0, BPF_CSUM_LEVEL_INC = 1, BPF_CSUM_LEVEL_DEC = 2, BPF_CSUM_LEVEL_RESET = 3, }; enum { BPF_F_INGRESS = 1, }; enum { IPSTATS_MIB_NUM = 0, IPSTATS_MIB_INPKTS = 1, IPSTATS_MIB_INOCTETS = 2, IPSTATS_MIB_INDELIVERS = 3, IPSTATS_MIB_OUTFORWDATAGRAMS = 4, IPSTATS_MIB_OUTREQUESTS = 5, IPSTATS_MIB_OUTOCTETS = 6, IPSTATS_MIB_INHDRERRORS = 7, IPSTATS_MIB_INTOOBIGERRORS = 8, IPSTATS_MIB_INNOROUTES = 9, IPSTATS_MIB_INADDRERRORS = 10, IPSTATS_MIB_INUNKNOWNPROTOS = 11, IPSTATS_MIB_INTRUNCATEDPKTS = 12, IPSTATS_MIB_INDISCARDS = 13, IPSTATS_MIB_OUTDISCARDS = 14, IPSTATS_MIB_OUTNOROUTES = 15, IPSTATS_MIB_REASMTIMEOUT = 16, IPSTATS_MIB_REASMREQDS = 17, IPSTATS_MIB_REASMOKS = 18, IPSTATS_MIB_REASMFAILS = 19, IPSTATS_MIB_FRAGOKS = 20, IPSTATS_MIB_FRAGFAILS = 21, IPSTATS_MIB_FRAGCREATES = 22, IPSTATS_MIB_INMCASTPKTS = 23, IPSTATS_MIB_OUTMCASTPKTS = 24, IPSTATS_MIB_INBCASTPKTS = 25, IPSTATS_MIB_OUTBCASTPKTS = 26, IPSTATS_MIB_INMCASTOCTETS = 27, IPSTATS_MIB_OUTMCASTOCTETS = 28, IPSTATS_MIB_INBCASTOCTETS = 29, IPSTATS_MIB_OUTBCASTOCTETS = 30, IPSTATS_MIB_CSUMERRORS = 31, IPSTATS_MIB_NOECTPKTS = 32, IPSTATS_MIB_ECT1PKTS = 33, IPSTATS_MIB_ECT0PKTS = 34, IPSTATS_MIB_CEPKTS = 35, IPSTATS_MIB_REASM_OVERLAPS = 36, IPSTATS_MIB_OUTPKTS = 37, __IPSTATS_MIB_MAX = 38, }; enum { BPF_F_ADJ_ROOM_FIXED_GSO = 1, BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2, BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4, BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8, BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16, BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32, BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64, BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128, BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256, }; enum { BPF_ADJ_ROOM_ENCAP_L2_MASK = 255, BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, }; enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET = 0, BPF_ADJ_ROOM_MAC = 1, }; struct xdp_sock { struct sock sk; long: 64; long: 64; long: 64; long: 64; long: 64; struct xsk_queue *rx; struct net_device *dev; struct xdp_umem *umem; struct list_head flush_node; struct xsk_buff_pool *pool; u16 queue_id; bool zc; bool sg; enum { XSK_READY = 0, XSK_BOUND = 1, XSK_UNBOUND = 2, } state; long: 64; struct xsk_queue *tx; struct list_head tx_list; spinlock_t rx_lock; u64 rx_dropped; u64 rx_queue_full; struct sk_buff *skb; struct list_head map_list; spinlock_t map_list_lock; struct mutex mutex; struct xsk_queue *fq_tmp; struct xsk_queue *cq_tmp; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; enum { BPF_F_TUNINFO_IPV6 = 1, }; enum { BPF_F_TUNINFO_FLAGS = 16, }; enum { BPF_F_ZERO_CSUM_TX = 2, BPF_F_DONT_FRAGMENT = 4, BPF_F_SEQ_NUMBER = 8, BPF_F_NO_TUNNEL_KEY = 16, }; enum { TCP_BPF_IW = 1001, TCP_BPF_SNDCWND_CLAMP = 1002, TCP_BPF_DELACK_MAX = 1003, TCP_BPF_RTO_MIN = 1004, TCP_BPF_SYN = 1005, TCP_BPF_SYN_IP = 1006, TCP_BPF_SYN_MAC = 1007, }; enum { BPF_SOCK_OPS_RTO_CB_FLAG = 1, BPF_SOCK_OPS_RETRANS_CB_FLAG = 2, BPF_SOCK_OPS_STATE_CB_FLAG = 4, BPF_SOCK_OPS_RTT_CB_FLAG = 8, BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16, BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32, BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64, BPF_SOCK_OPS_ALL_CB_FLAGS = 127, }; enum { BPF_FIB_LOOKUP_DIRECT = 1, BPF_FIB_LOOKUP_OUTPUT = 2, BPF_FIB_LOOKUP_SKIP_NEIGH = 4, BPF_FIB_LOOKUP_TBID = 8, BPF_FIB_LOOKUP_SRC = 16, }; enum { BPF_FIB_LKUP_RET_SUCCESS = 0, BPF_FIB_LKUP_RET_BLACKHOLE = 1, BPF_FIB_LKUP_RET_UNREACHABLE = 2, BPF_FIB_LKUP_RET_PROHIBIT = 3, BPF_FIB_LKUP_RET_NOT_FWDED = 4, BPF_FIB_LKUP_RET_FWD_DISABLED = 5, BPF_FIB_LKUP_RET_UNSUPP_LWT = 6, BPF_FIB_LKUP_RET_NO_NEIGH = 7, BPF_FIB_LKUP_RET_FRAG_NEEDED = 8, BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9, }; enum rt_class_t { RT_TABLE_UNSPEC = 0, RT_TABLE_COMPAT = 252, RT_TABLE_DEFAULT = 253, RT_TABLE_MAIN = 254, RT_TABLE_LOCAL = 255, RT_TABLE_MAX = 4294967295, }; enum bpf_check_mtu_ret { BPF_MTU_CHK_RET_SUCCESS = 0, BPF_MTU_CHK_RET_FRAG_NEEDED = 1, BPF_MTU_CHK_RET_SEGS_TOOBIG = 2, }; enum bpf_check_mtu_flags { BPF_MTU_CHK_SEGS = 1, }; enum { BPF_LOAD_HDR_OPT_TCP_SYN = 1, }; enum { BPF_SOCK_OPS_VOID = 0, BPF_SOCK_OPS_TIMEOUT_INIT = 1, BPF_SOCK_OPS_RWND_INIT = 2, BPF_SOCK_OPS_TCP_CONNECT_CB = 3, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5, BPF_SOCK_OPS_NEEDS_ECN = 6, BPF_SOCK_OPS_BASE_RTT = 7, BPF_SOCK_OPS_RTO_CB = 8, BPF_SOCK_OPS_RETRANS_CB = 9, BPF_SOCK_OPS_STATE_CB = 10, BPF_SOCK_OPS_TCP_LISTEN_CB = 11, BPF_SOCK_OPS_RTT_CB = 12, BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13, BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14, BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15, }; enum { BPF_SKB_TSTAMP_UNSPEC = 0, BPF_SKB_TSTAMP_DELIVERY_MONO = 1, }; enum { BPF_SK_LOOKUP_F_REPLACE = 1, BPF_SK_LOOKUP_F_NO_REUSEPORT = 2, }; typedef u64 (*btf_bpf_skb_get_pay_offset)(struct sk_buff *); typedef u64 (*btf_bpf_skb_get_nlattr)(struct sk_buff *, u32, u32); typedef u64 (*btf_bpf_skb_get_nlattr_nest)(struct sk_buff *, u32, u32); typedef u64 (*btf_bpf_skb_load_helper_8)(const struct sk_buff *, const void *, int, int); typedef u64 (*btf_bpf_skb_load_helper_8_no_cache)(const struct sk_buff *, int); typedef u64 (*btf_bpf_skb_load_helper_16)(const struct sk_buff *, const void *, int, int); typedef u64 (*btf_bpf_skb_load_helper_16_no_cache)(const struct sk_buff *, int); typedef u64 (*btf_bpf_skb_load_helper_32)(const struct sk_buff *, const void *, int, int); typedef u64 (*btf_bpf_skb_load_helper_32_no_cache)(const struct sk_buff *, int); typedef u64 (*btf_bpf_skb_store_bytes)(struct sk_buff *, u32, const void *, u32, u64); typedef u64 (*btf_bpf_skb_load_bytes)(const struct sk_buff *, u32, void *, u32); typedef u64 (*btf_bpf_flow_dissector_load_bytes)(const struct bpf_flow_dissector *, u32, void *, u32); typedef u64 (*btf_bpf_skb_load_bytes_relative)(const struct sk_buff *, u32, void *, u32, u32); typedef u64 (*btf_bpf_skb_pull_data)(struct sk_buff *, u32); typedef u64 (*btf_bpf_sk_fullsock)(struct sock *); typedef u64 (*btf_sk_skb_pull_data)(struct sk_buff *, u32); typedef u64 (*btf_bpf_l3_csum_replace)(struct sk_buff *, u32, u64, u64, u64); typedef u64 (*btf_bpf_l4_csum_replace)(struct sk_buff *, u32, u64, u64, u64); typedef u64 (*btf_bpf_csum_diff)(__be32 *, u32, __be32 *, u32, __wsum); typedef u64 (*btf_bpf_csum_update)(struct sk_buff *, __wsum); typedef u64 (*btf_bpf_csum_level)(struct sk_buff *, u64); typedef u64 (*btf_bpf_clone_redirect)(struct sk_buff *, u32, u64); typedef u64 (*btf_bpf_redirect)(u32, u64); typedef u64 (*btf_bpf_redirect_peer)(u32, u64); struct bpf_redir_neigh; typedef u64 (*btf_bpf_redirect_neigh)(u32, struct bpf_redir_neigh *, int, u64); struct bpf_redir_neigh { __u32 nh_family; union { __be32 ipv4_nh; __u32 ipv6_nh[4]; }; }; typedef u64 (*btf_bpf_msg_apply_bytes)(struct sk_msg *, u32); typedef u64 (*btf_bpf_msg_cork_bytes)(struct sk_msg *, u32); typedef u64 (*btf_bpf_msg_pull_data)(struct sk_msg *, u32, u32, u64); typedef u64 (*btf_bpf_msg_push_data)(struct sk_msg *, u32, u32, u64); typedef u64 (*btf_bpf_msg_pop_data)(struct sk_msg *, u32, u32, u64); typedef u64 (*btf_bpf_get_cgroup_classid)(const struct sk_buff *); typedef u64 (*btf_bpf_get_route_realm)(const struct sk_buff *); typedef u64 (*btf_bpf_get_hash_recalc)(struct sk_buff *); typedef u64 (*btf_bpf_set_hash_invalid)(struct sk_buff *); typedef u64 (*btf_bpf_set_hash)(struct sk_buff *, u32); typedef u64 (*btf_bpf_skb_vlan_push)(struct sk_buff *, __be16, u16); typedef u64 (*btf_bpf_skb_vlan_pop)(struct sk_buff *); typedef u64 (*btf_bpf_skb_change_proto)(struct sk_buff *, __be16, u64); typedef u64 (*btf_bpf_skb_change_type)(struct sk_buff *, u32); typedef u64 (*btf_sk_skb_adjust_room)(struct sk_buff *, s32, u32, u64); typedef u64 (*btf_bpf_skb_adjust_room)(struct sk_buff *, s32, u32, u64); typedef u64 (*btf_bpf_skb_change_tail)(struct sk_buff *, u32, u64); typedef u64 (*btf_sk_skb_change_tail)(struct sk_buff *, u32, u64); typedef u64 (*btf_bpf_skb_change_head)(struct sk_buff *, u32, u64); typedef u64 (*btf_sk_skb_change_head)(struct sk_buff *, u32, u64); typedef u64 (*btf_bpf_xdp_get_buff_len)(struct xdp_buff *); typedef u64 (*btf_bpf_xdp_adjust_head)(struct xdp_buff *, int); typedef u64 (*btf_bpf_xdp_load_bytes)(struct xdp_buff *, u32, void *, u32); typedef u64 (*btf_bpf_xdp_store_bytes)(struct xdp_buff *, u32, void *, u32); typedef u64 (*btf_bpf_xdp_adjust_tail)(struct xdp_buff *, int); typedef u64 (*btf_bpf_xdp_adjust_meta)(struct xdp_buff *, int); typedef u64 (*btf_bpf_xdp_redirect)(u32, u64); typedef u64 (*btf_bpf_xdp_redirect_map)(struct bpf_map *, u64, u64); typedef u64 (*btf_bpf_skb_event_output)(struct sk_buff *, struct bpf_map *, u64, void *, u64); struct bpf_tunnel_key; typedef u64 (*btf_bpf_skb_get_tunnel_key)(struct sk_buff *, struct bpf_tunnel_key *, u32, u64); struct bpf_tunnel_key { __u32 tunnel_id; union { __u32 remote_ipv4; __u32 remote_ipv6[4]; }; __u8 tunnel_tos; __u8 tunnel_ttl; union { __u16 tunnel_ext; __be16 tunnel_flags; }; __u32 tunnel_label; union { __u32 local_ipv4; __u32 local_ipv6[4]; }; }; typedef u64 (*btf_bpf_skb_get_tunnel_opt)(struct sk_buff *, u8 *, u32); typedef u64 (*btf_bpf_skb_set_tunnel_key)(struct sk_buff *, const struct bpf_tunnel_key *, u32, u64); typedef u64 (*btf_bpf_skb_set_tunnel_opt)(struct sk_buff *, const u8 *, u32); typedef u64 (*btf_bpf_skb_under_cgroup)(struct sk_buff *, struct bpf_map *, u32); typedef u64 (*btf_bpf_skb_cgroup_id)(const struct sk_buff *); typedef u64 (*btf_bpf_skb_ancestor_cgroup_id)(const struct sk_buff *, int); typedef u64 (*btf_bpf_sk_cgroup_id)(struct sock *); typedef u64 (*btf_bpf_sk_ancestor_cgroup_id)(struct sock *, int); typedef u64 (*btf_bpf_xdp_event_output)(struct xdp_buff *, struct bpf_map *, u64, void *, u64); typedef u64 (*btf_bpf_get_socket_cookie)(struct sk_buff *); typedef u64 (*btf_bpf_get_socket_cookie_sock_addr)(struct bpf_sock_addr_kern *); typedef u64 (*btf_bpf_get_socket_cookie_sock)(struct sock *); typedef u64 (*btf_bpf_get_socket_ptr_cookie)(struct sock *); typedef u64 (*btf_bpf_get_socket_cookie_sock_ops)(struct bpf_sock_ops_kern *); typedef u64 (*btf_bpf_get_netns_cookie_sock)(struct sock *); typedef u64 (*btf_bpf_get_netns_cookie_sock_addr)(struct bpf_sock_addr_kern *); typedef u64 (*btf_bpf_get_netns_cookie_sock_ops)(struct bpf_sock_ops_kern *); typedef u64 (*btf_bpf_get_netns_cookie_sk_msg)(struct sk_msg *); typedef u64 (*btf_bpf_get_socket_uid)(struct sk_buff *); typedef u64 (*btf_bpf_sk_setsockopt)(struct sock *, int, int, char *, int); typedef u64 (*btf_bpf_sk_getsockopt)(struct sock *, int, int, char *, int); typedef u64 (*btf_bpf_unlocked_sk_setsockopt)(struct sock *, int, int, char *, int); typedef u64 (*btf_bpf_unlocked_sk_getsockopt)(struct sock *, int, int, char *, int); typedef u64 (*btf_bpf_sock_addr_setsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); typedef u64 (*btf_bpf_sock_addr_getsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); typedef u64 (*btf_bpf_sock_ops_setsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); typedef u64 (*btf_bpf_sock_ops_getsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); typedef u64 (*btf_bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops_kern *, int); typedef u64 (*btf_bpf_bind)(struct bpf_sock_addr_kern *, struct sockaddr *, int); struct bpf_xfrm_state; typedef u64 (*btf_bpf_skb_get_xfrm_state)(struct sk_buff *, u32, struct bpf_xfrm_state *, u32, u64); struct bpf_xfrm_state { __u32 reqid; __u32 spi; __u16 family; __u16 ext; union { __u32 remote_ipv4; __u32 remote_ipv6[4]; }; }; struct bpf_fib_lookup; typedef u64 (*btf_bpf_xdp_fib_lookup)(struct xdp_buff *, struct bpf_fib_lookup *, int, u32); struct bpf_fib_lookup { __u8 family; __u8 l4_protocol; __be16 sport; __be16 dport; union { __u16 tot_len; __u16 mtu_result; }; __u32 ifindex; union { __u8 tos; __be32 flowinfo; __u32 rt_metric; }; union { __be32 ipv4_src; __u32 ipv6_src[4]; }; union { __be32 ipv4_dst; __u32 ipv6_dst[4]; }; union { struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; }; __u32 tbid; }; __u8 smac[6]; __u8 dmac[6]; }; typedef u64 (*btf_bpf_skb_fib_lookup)(struct sk_buff *, struct bpf_fib_lookup *, int, u32); typedef u64 (*btf_bpf_skb_check_mtu)(struct sk_buff *, u32, u32 *, s32, u64); typedef u64 (*btf_bpf_xdp_check_mtu)(struct xdp_buff *, u32, u32 *, s32, u64); typedef u64 (*btf_bpf_lwt_in_push_encap)(struct sk_buff *, u32, void *, u32); typedef u64 (*btf_bpf_lwt_xmit_push_encap)(struct sk_buff *, u32, void *, u32); struct bpf_sock_tuple; typedef u64 (*btf_bpf_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); struct bpf_sock_tuple { union { struct { __be32 saddr; __be32 daddr; __be16 sport; __be16 dport; } ipv4; struct { __be32 saddr[4]; __be32 daddr[4]; __be16 sport; __be16 dport; } ipv6; }; }; typedef u64 (*btf_bpf_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_tc_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_tc_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_tc_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_sk_release)(struct sock *); typedef u64 (*btf_bpf_xdp_sk_lookup_udp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); typedef u64 (*btf_bpf_xdp_skc_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); typedef u64 (*btf_bpf_xdp_sk_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); typedef u64 (*btf_bpf_sock_addr_skc_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_sock_addr_sk_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_sock_addr_sk_lookup_udp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); struct bpf_tcp_sock { __u32 snd_cwnd; __u32 srtt_us; __u32 rtt_min; __u32 snd_ssthresh; __u32 rcv_nxt; __u32 snd_nxt; __u32 snd_una; __u32 mss_cache; __u32 ecn_flags; __u32 rate_delivered; __u32 rate_interval_us; __u32 packets_out; __u32 retrans_out; __u32 total_retrans; __u32 segs_in; __u32 data_segs_in; __u32 segs_out; __u32 data_segs_out; __u32 lost_out; __u32 sacked_out; __u64 bytes_received; __u64 bytes_acked; __u32 dsack_dups; __u32 delivered; __u32 delivered_ce; __u32 icsk_retransmits; }; typedef u64 (*btf_bpf_tcp_sock)(struct sock *); typedef u64 (*btf_bpf_get_listener_sock)(struct sock *); typedef u64 (*btf_bpf_skb_ecn_set_ce)(struct sk_buff *); typedef u64 (*btf_bpf_tcp_check_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); typedef u64 (*btf_bpf_tcp_gen_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); typedef u64 (*btf_bpf_sk_assign)(struct sk_buff *, struct sock *, u64); typedef u64 (*btf_bpf_sock_ops_load_hdr_opt)(struct bpf_sock_ops_kern *, void *, u32, u64); typedef u64 (*btf_bpf_sock_ops_store_hdr_opt)(struct bpf_sock_ops_kern *, const void *, u32, u64); typedef u64 (*btf_bpf_sock_ops_reserve_hdr_opt)(struct bpf_sock_ops_kern *, u32, u64); typedef u64 (*btf_bpf_skb_set_tstamp)(struct sk_buff *, u64, u32); typedef u64 (*btf_sk_select_reuseport)(struct sk_reuseport_kern *, struct bpf_map *, void *, u32); typedef u64 (*btf_sk_reuseport_load_bytes)(const struct sk_reuseport_kern *, u32, void *, u32); typedef u64 (*btf_sk_reuseport_load_bytes_relative)(const struct sk_reuseport_kern *, u32, void *, u32, u32); typedef u64 (*btf_bpf_sk_lookup_assign)(struct bpf_sk_lookup_kern *, struct sock *, u64); typedef u64 (*btf_bpf_skc_to_tcp6_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_tcp_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_tcp_timewait_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_tcp_request_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_udp6_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_unix_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_mptcp_sock)(struct sock *); typedef u64 (*btf_bpf_sock_from_file)(struct file *); struct strp_msg { int full_len; int offset; }; struct tls_strparser { struct sock *sk; u32 mark: 8; u32 stopped: 1; u32 copy_mode: 1; u32 mixed_decrypted: 1; bool msg_ready; struct strp_msg stm; struct sk_buff *anchor; struct work_struct work; }; struct tls_sw_context_rx { struct crypto_aead *aead_recv; struct crypto_wait async_wait; struct sk_buff_head rx_list; void (*saved_data_ready)(struct sock *); u8 reader_present; u8 async_capable: 1; u8 zc_capable: 1; u8 reader_contended: 1; struct tls_strparser strp; atomic_t decrypt_pending; struct sk_buff_head async_hold; struct wait_queue_head wq; u64 android_kabi_reserved1; }; struct tcp6_sock { struct tcp_sock tcp; struct ipv6_pinfo inet6; }; struct inet_timewait_sock { struct sock_common __tw_common; __u32 tw_mark; volatile unsigned char tw_substate; unsigned char tw_rcv_wscale; __be16 tw_sport; unsigned int tw_transparent: 1; unsigned int tw_flowlabel: 20; unsigned int tw_pad: 3; unsigned int tw_tos: 8; u32 tw_txhash; u32 tw_priority; struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; struct inet_bind2_bucket *tw_tb2; struct hlist_node tw_bind2_node; }; struct tcp_timewait_sock { struct inet_timewait_sock tw_sk; u32 tw_rcv_wnd; u32 tw_ts_offset; u32 tw_ts_recent; u32 tw_last_oow_ack_time; int tw_ts_recent_stamp; u32 tw_tx_delay; }; struct udp6_sock { struct udp_sock udp; struct ipv6_pinfo inet6; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct mptcp_sock {}; struct tls_prot_info { u16 version; u16 cipher_type; u16 prepend_size; u16 tag_size; u16 overhead_size; u16 iv_size; u16 salt_size; u16 rec_seq_size; u16 aad_size; u16 tail_size; }; struct cipher_context { char *iv; char *rec_seq; }; struct tls_crypto_info { __u16 version; __u16 cipher_type; }; struct tls12_crypto_info_aes_gcm_128 { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[16]; unsigned char salt[4]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_aes_gcm_256 { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[32]; unsigned char salt[4]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_chacha20_poly1305 { struct tls_crypto_info info; unsigned char iv[12]; unsigned char key[32]; unsigned char salt[0]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_sm4_gcm { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[16]; unsigned char salt[4]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_sm4_ccm { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[16]; unsigned char salt[4]; unsigned char rec_seq[8]; }; union tls_crypto_context { struct tls_crypto_info info; union { struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; struct tls12_crypto_info_sm4_gcm sm4_gcm; struct tls12_crypto_info_sm4_ccm sm4_ccm; }; }; struct tls_context { struct tls_prot_info prot_info; u8 tx_conf: 3; u8 rx_conf: 3; u8 zerocopy_sendfile: 1; u8 rx_no_pad: 1; int (*push_pending_record)(struct sock *, int); void (*sk_write_space)(struct sock *); void *priv_ctx_tx; void *priv_ctx_rx; struct net_device __attribute__((btf_type_tag("rcu"))) *netdev; struct cipher_context tx; struct cipher_context rx; struct scatterlist *partially_sent_record; u16 partially_sent_offset; bool splicing_pages; bool pending_open_record_frags; struct mutex tx_lock; unsigned long flags; struct proto *sk_proto; struct sock *sk; void (*sk_destruct)(struct sock *); union tls_crypto_context crypto_send; union tls_crypto_context crypto_recv; struct list_head list; refcount_t refcount; struct callback_head rcu; }; struct fib_result { __be32 prefix; unsigned char prefixlen; unsigned char nh_sel; unsigned char type; unsigned char scope; u32 tclassid; struct fib_nh_common *nhc; struct fib_info *fi; struct fib_table *table; struct hlist_head *fa_head; }; struct fib6_result { struct fib6_nh *nh; struct fib6_info *f6i; u32 fib6_flags; u8 fib6_type; struct rt6_info *rt6; }; struct sock_diag_handler { __u8 family; int (*dump)(struct sk_buff *, struct nlmsghdr *); int (*get_info)(struct sk_buff *, struct sock *); int (*destroy)(struct sk_buff *, struct nlmsghdr *); }; struct broadcast_sk { struct sock *sk; struct work_struct work; }; struct sock_diag_req { __u8 sdiag_family; __u8 sdiag_protocol; }; enum hwtstamp_tx_types { HWTSTAMP_TX_OFF = 0, HWTSTAMP_TX_ON = 1, HWTSTAMP_TX_ONESTEP_SYNC = 2, HWTSTAMP_TX_ONESTEP_P2P = 3, __HWTSTAMP_TX_CNT = 4, }; enum hwtstamp_rx_filters { HWTSTAMP_FILTER_NONE = 0, HWTSTAMP_FILTER_ALL = 1, HWTSTAMP_FILTER_SOME = 2, HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3, HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4, HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5, HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6, HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7, HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8, HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9, HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10, HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11, HWTSTAMP_FILTER_PTP_V2_EVENT = 12, HWTSTAMP_FILTER_PTP_V2_SYNC = 13, HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14, HWTSTAMP_FILTER_NTP_ALL = 15, __HWTSTAMP_FILTER_CNT = 16, }; enum hwtstamp_flags { HWTSTAMP_FLAG_BONDED_PHC_INDEX = 1, HWTSTAMP_FLAG_LAST = 1, HWTSTAMP_FLAG_MASK = 1, }; struct hwtstamp_config { int flags; int tx_type; int rx_filter; }; struct compat_ifconf { compat_int_t ifc_len; compat_caddr_t ifcbuf; }; struct tso_t { int next_frag_idx; int size; void *data; u16 ip_id; u8 tlen; bool ipv6; u32 tcp_seq; }; enum fib_event_type { FIB_EVENT_ENTRY_REPLACE = 0, FIB_EVENT_ENTRY_APPEND = 1, FIB_EVENT_ENTRY_ADD = 2, FIB_EVENT_ENTRY_DEL = 3, FIB_EVENT_RULE_ADD = 4, FIB_EVENT_RULE_DEL = 5, FIB_EVENT_NH_ADD = 6, FIB_EVENT_NH_DEL = 7, FIB_EVENT_VIF_ADD = 8, FIB_EVENT_VIF_DEL = 9, }; struct fib_notifier_net { struct list_head fib_notifier_ops; struct atomic_notifier_head fib_chain; }; struct fib_notifier_info { int family; struct netlink_ext_ack *extack; }; struct xdp_frame_bulk { int count; void *xa; void *q[16]; }; struct xdp_attachment_info { struct bpf_prog *prog; u32 flags; }; enum flow_action_id { FLOW_ACTION_ACCEPT = 0, FLOW_ACTION_DROP = 1, FLOW_ACTION_TRAP = 2, FLOW_ACTION_GOTO = 3, FLOW_ACTION_REDIRECT = 4, FLOW_ACTION_MIRRED = 5, FLOW_ACTION_REDIRECT_INGRESS = 6, FLOW_ACTION_MIRRED_INGRESS = 7, FLOW_ACTION_VLAN_PUSH = 8, FLOW_ACTION_VLAN_POP = 9, FLOW_ACTION_VLAN_MANGLE = 10, FLOW_ACTION_TUNNEL_ENCAP = 11, FLOW_ACTION_TUNNEL_DECAP = 12, FLOW_ACTION_MANGLE = 13, FLOW_ACTION_ADD = 14, FLOW_ACTION_CSUM = 15, FLOW_ACTION_MARK = 16, FLOW_ACTION_PTYPE = 17, FLOW_ACTION_PRIORITY = 18, FLOW_ACTION_RX_QUEUE_MAPPING = 19, FLOW_ACTION_WAKE = 20, FLOW_ACTION_QUEUE = 21, FLOW_ACTION_SAMPLE = 22, FLOW_ACTION_POLICE = 23, FLOW_ACTION_CT = 24, FLOW_ACTION_CT_METADATA = 25, FLOW_ACTION_MPLS_PUSH = 26, FLOW_ACTION_MPLS_POP = 27, FLOW_ACTION_MPLS_MANGLE = 28, FLOW_ACTION_GATE = 29, FLOW_ACTION_PPPOE_PUSH = 30, FLOW_ACTION_JUMP = 31, FLOW_ACTION_PIPE = 32, FLOW_ACTION_VLAN_PUSH_ETH = 33, FLOW_ACTION_VLAN_POP_ETH = 34, FLOW_ACTION_CONTINUE = 35, NUM_FLOW_ACTIONS = 36, }; enum flow_action_hw_stats { FLOW_ACTION_HW_STATS_IMMEDIATE = 1, FLOW_ACTION_HW_STATS_DELAYED = 2, FLOW_ACTION_HW_STATS_ANY = 3, FLOW_ACTION_HW_STATS_DISABLED = 4, FLOW_ACTION_HW_STATS_DONT_CARE = 7, }; enum flow_action_mangle_base { FLOW_ACT_MANGLE_UNSPEC = 0, FLOW_ACT_MANGLE_HDR_TYPE_ETH = 1, FLOW_ACT_MANGLE_HDR_TYPE_IP4 = 2, FLOW_ACT_MANGLE_HDR_TYPE_IP6 = 3, FLOW_ACT_MANGLE_HDR_TYPE_TCP = 4, FLOW_ACT_MANGLE_HDR_TYPE_UDP = 5, }; enum offload_act_command { FLOW_ACT_REPLACE = 0, FLOW_ACT_DESTROY = 1, FLOW_ACT_STATS = 2, }; enum flow_block_binder_type { FLOW_BLOCK_BINDER_TYPE_UNSPEC = 0, FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1, FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2, FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP = 3, FLOW_BLOCK_BINDER_TYPE_RED_MARK = 4, }; enum flow_block_command { FLOW_BLOCK_BIND = 0, FLOW_BLOCK_UNBIND = 1, }; struct flow_block_cb; struct flow_block_indr { struct list_head list; struct net_device *dev; struct Qdisc *sch; enum flow_block_binder_type binder_type; void *data; void *cb_priv; void (*cleanup)(struct flow_block_cb *); }; struct flow_block_cb { struct list_head driver_list; struct list_head list; flow_setup_cb_t *cb; void *cb_ident; void *cb_priv; void (*release)(void *); struct flow_block_indr indr; unsigned int refcnt; }; typedef int flow_indr_block_bind_cb_t(struct net_device *, struct Qdisc *, void *, enum tc_setup_type, void *, void *, void (*)(struct flow_block_cb *)); struct flow_indr_dev { struct list_head list; flow_indr_block_bind_cb_t *cb; void *cb_priv; refcount_t refcnt; }; struct flow_indir_dev_info { void *data; struct net_device *dev; struct Qdisc *sch; enum tc_setup_type type; void (*cleanup)(struct flow_block_cb *); struct list_head list; enum flow_block_command command; enum flow_block_binder_type binder_type; struct list_head *cb_list; }; struct flow_block_offload { enum flow_block_command command; enum flow_block_binder_type binder_type; bool block_shared; bool unlocked_driver_cb; struct net *net; struct flow_block *block; struct list_head cb_list; struct list_head *driver_block_list; struct netlink_ext_ack *extack; struct Qdisc *sch; struct list_head *cb_list_head; }; struct flow_match { struct flow_dissector *dissector; void *mask; void *key; }; typedef void (*action_destr)(void *); struct nf_flowtable; struct action_gate_entry; struct flow_action_cookie; struct flow_action_entry { enum flow_action_id id; u32 hw_index; unsigned long cookie; u64 miss_cookie; enum flow_action_hw_stats hw_stats; action_destr destructor; void *destructor_priv; union { u32 chain_index; struct net_device *dev; struct { u16 vid; __be16 proto; u8 prio; } vlan; struct { unsigned char dst[6]; unsigned char src[6]; } vlan_push_eth; struct { enum flow_action_mangle_base htype; u32 offset; u32 mask; u32 val; } mangle; struct ip_tunnel_info *tunnel; u32 csum_flags; u32 mark; u16 ptype; u16 rx_queue; u32 priority; struct { u32 ctx; u32 index; u8 vf; } queue; struct { struct psample_group *psample_group; u32 rate; u32 trunc_size; bool truncate; } sample; struct { u32 burst; u64 rate_bytes_ps; u64 peakrate_bytes_ps; u32 avrate; u16 overhead; u64 burst_pkt; u64 rate_pkt_ps; u32 mtu; struct { enum flow_action_id act_id; u32 extval; } exceed; struct { enum flow_action_id act_id; u32 extval; } notexceed; } police; struct { int action; u16 zone; struct nf_flowtable *flow_table; } ct; struct { unsigned long cookie; u32 mark; u32 labels[4]; bool orig_dir; } ct_metadata; struct { u32 label; __be16 proto; u8 tc; u8 bos; u8 ttl; } mpls_push; struct { __be16 proto; } mpls_pop; struct { u32 label; u8 tc; u8 bos; u8 ttl; } mpls_mangle; struct { s32 prio; u64 basetime; u64 cycletime; u64 cycletimeext; u32 num_entries; struct action_gate_entry *entries; } gate; struct { u16 sid; } pppoe; }; struct flow_action_cookie *user_cookie; }; struct flow_action { unsigned int num_entries; struct flow_action_entry entries[0]; }; struct flow_rule { struct flow_match match; struct flow_action action; }; struct flow_action_cookie { u32 cookie_len; u8 cookie[0]; }; struct flow_stats { u64 pkts; u64 bytes; u64 drops; u64 lastused; enum flow_action_hw_stats used_hw_stats; bool used_hw_stats_valid; }; struct flow_offload_action { struct netlink_ext_ack *extack; enum offload_act_command command; enum flow_action_id id; u32 index; unsigned long cookie; struct flow_stats stats; struct flow_action action; }; struct flow_match_meta { struct flow_dissector_key_meta *key; struct flow_dissector_key_meta *mask; }; struct flow_match_basic { struct flow_dissector_key_basic *key; struct flow_dissector_key_basic *mask; }; struct flow_match_control { struct flow_dissector_key_control *key; struct flow_dissector_key_control *mask; }; struct flow_match_eth_addrs { struct flow_dissector_key_eth_addrs *key; struct flow_dissector_key_eth_addrs *mask; }; struct flow_match_vlan { struct flow_dissector_key_vlan *key; struct flow_dissector_key_vlan *mask; }; struct flow_match_arp { struct flow_dissector_key_arp *key; struct flow_dissector_key_arp *mask; }; struct flow_match_ipv4_addrs { struct flow_dissector_key_ipv4_addrs *key; struct flow_dissector_key_ipv4_addrs *mask; }; struct flow_match_ipv6_addrs { struct flow_dissector_key_ipv6_addrs *key; struct flow_dissector_key_ipv6_addrs *mask; }; struct flow_match_ip { struct flow_dissector_key_ip *key; struct flow_dissector_key_ip *mask; }; struct flow_match_ports { struct flow_dissector_key_ports *key; struct flow_dissector_key_ports *mask; }; struct flow_dissector_key_ports_range; struct flow_match_ports_range { struct flow_dissector_key_ports_range *key; struct flow_dissector_key_ports_range *mask; }; struct flow_dissector_key_ports_range { union { struct flow_dissector_key_ports tp; struct { struct flow_dissector_key_ports tp_min; struct flow_dissector_key_ports tp_max; }; }; }; struct flow_match_tcp { struct flow_dissector_key_tcp *key; struct flow_dissector_key_tcp *mask; }; struct flow_match_ipsec { struct flow_dissector_key_ipsec *key; struct flow_dissector_key_ipsec *mask; }; struct flow_match_icmp { struct flow_dissector_key_icmp *key; struct flow_dissector_key_icmp *mask; }; struct flow_match_mpls { struct flow_dissector_key_mpls *key; struct flow_dissector_key_mpls *mask; }; struct flow_match_enc_keyid { struct flow_dissector_key_keyid *key; struct flow_dissector_key_keyid *mask; }; struct flow_match_enc_opts { struct flow_dissector_key_enc_opts *key; struct flow_dissector_key_enc_opts *mask; }; struct flow_match_ct { struct flow_dissector_key_ct *key; struct flow_dissector_key_ct *mask; }; struct flow_match_pppoe { struct flow_dissector_key_pppoe *key; struct flow_dissector_key_pppoe *mask; }; struct flow_match_l2tpv3 { struct flow_dissector_key_l2tpv3 *key; struct flow_dissector_key_l2tpv3 *mask; }; struct offload_callbacks { struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t); struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *); int (*gro_complete)(struct sk_buff *, int); }; struct packet_offload { __be16 type; u16 priority; struct offload_callbacks callbacks; struct list_head list; }; struct napi_gro_cb { union { struct { void *frag0; unsigned int frag0_len; }; struct { struct sk_buff *last; unsigned long age; }; }; int data_offset; u16 flush; u16 flush_id; u16 count; u16 proto; union { struct { u16 gro_remcsum_start; u8 same_flow: 1; u8 encap_mark: 1; u8 csum_valid: 1; u8 csum_cnt: 3; u8 free: 2; u8 is_ipv6: 1; u8 is_fou: 1; u8 is_atomic: 1; u8 recursion_counter: 4; u8 is_flist: 1; }; struct { u16 gro_remcsum_start; u8 same_flow: 1; u8 encap_mark: 1; u8 csum_valid: 1; u8 csum_cnt: 3; u8 free: 2; u8 is_ipv6: 1; u8 is_fou: 1; u8 is_atomic: 1; u8 recursion_counter: 4; u8 is_flist: 1; } zeroed; }; __wsum csum; }; enum { NETDEV_A_DEV_IFINDEX = 1, NETDEV_A_DEV_PAD = 2, NETDEV_A_DEV_XDP_FEATURES = 3, NETDEV_A_DEV_XDP_ZC_MAX_SEGS = 4, __NETDEV_A_DEV_MAX = 5, NETDEV_A_DEV_MAX = 4, }; enum { NETDEV_CMD_DEV_GET = 1, NETDEV_CMD_DEV_ADD_NTF = 2, NETDEV_CMD_DEV_DEL_NTF = 3, NETDEV_CMD_DEV_CHANGE_NTF = 4, __NETDEV_CMD_MAX = 5, NETDEV_CMD_MAX = 4, }; enum { NETDEV_NLGRP_MGMT = 0, }; struct rx_queue_attribute { struct attribute attr; ssize_t (*show)(struct netdev_rx_queue *, char *); ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t); }; struct netdev_queue_attribute { struct attribute attr; ssize_t (*show)(struct netdev_queue *, char *); ssize_t (*store)(struct netdev_queue *, const char *, size_t); }; enum { FR_ACT_UNSPEC = 0, FR_ACT_TO_TBL = 1, FR_ACT_GOTO = 2, FR_ACT_NOP = 3, FR_ACT_RES3 = 4, FR_ACT_RES4 = 5, FR_ACT_BLACKHOLE = 6, FR_ACT_UNREACHABLE = 7, FR_ACT_PROHIBIT = 8, __FR_ACT_MAX = 9, }; enum { FRA_UNSPEC = 0, FRA_DST = 1, FRA_SRC = 2, FRA_IIFNAME = 3, FRA_GOTO = 4, FRA_UNUSED2 = 5, FRA_PRIORITY = 6, FRA_UNUSED3 = 7, FRA_UNUSED4 = 8, FRA_UNUSED5 = 9, FRA_FWMARK = 10, FRA_FLOW = 11, FRA_TUN_ID = 12, FRA_SUPPRESS_IFGROUP = 13, FRA_SUPPRESS_PREFIXLEN = 14, FRA_TABLE = 15, FRA_FWMASK = 16, FRA_OIFNAME = 17, FRA_PAD = 18, FRA_L3MDEV = 19, FRA_UID_RANGE = 20, FRA_PROTOCOL = 21, FRA_IP_PROTO = 22, FRA_SPORT_RANGE = 23, FRA_DPORT_RANGE = 24, __FRA_MAX = 25, }; struct fib_rule_uid_range { __u32 start; __u32 end; }; struct fib_rule_notifier_info { struct fib_notifier_info info; struct fib_rule *rule; }; typedef void (*btf_trace_kfree_skb)(void *, struct sk_buff *, void *, enum skb_drop_reason); typedef void (*btf_trace_consume_skb)(void *, struct sk_buff *, void *); typedef void (*btf_trace_skb_copy_datagram_iovec)(void *, const struct sk_buff *, int); typedef void (*btf_trace_net_dev_start_xmit)(void *, const struct sk_buff *, const struct net_device *); typedef void (*btf_trace_net_dev_xmit)(void *, struct sk_buff *, int, struct net_device *, unsigned int); typedef void (*btf_trace_net_dev_xmit_timeout)(void *, struct net_device *, int); typedef void (*btf_trace_net_dev_queue)(void *, struct sk_buff *); typedef void (*btf_trace_netif_receive_skb)(void *, struct sk_buff *); typedef void (*btf_trace_netif_rx)(void *, struct sk_buff *); typedef void (*btf_trace_napi_gro_frags_entry)(void *, const struct sk_buff *); typedef void (*btf_trace_napi_gro_receive_entry)(void *, const struct sk_buff *); typedef void (*btf_trace_netif_receive_skb_entry)(void *, const struct sk_buff *); typedef void (*btf_trace_netif_receive_skb_list_entry)(void *, const struct sk_buff *); typedef void (*btf_trace_netif_rx_entry)(void *, const struct sk_buff *); typedef void (*btf_trace_napi_gro_frags_exit)(void *, int); typedef void (*btf_trace_napi_gro_receive_exit)(void *, int); typedef void (*btf_trace_netif_receive_skb_exit)(void *, int); typedef void (*btf_trace_netif_rx_exit)(void *, int); typedef void (*btf_trace_netif_receive_skb_list_exit)(void *, int); typedef void (*btf_trace_napi_poll)(void *, struct napi_struct *, int, int); typedef void (*btf_trace_sock_rcvqueue_full)(void *, struct sock *, struct sk_buff *); typedef void (*btf_trace_sock_exceed_buf_limit)(void *, struct sock *, struct proto *, long, int); typedef void (*btf_trace_inet_sock_set_state)(void *, const struct sock *, const int, const int); typedef void (*btf_trace_inet_sk_error_report)(void *, const struct sock *); typedef void (*btf_trace_sk_data_ready)(void *, const struct sock *); typedef void (*btf_trace_sock_send_length)(void *, struct sock *, int, int); typedef void (*btf_trace_sock_recv_length)(void *, struct sock *, int, int); typedef void (*btf_trace_udp_fail_queue_rcv_skb)(void *, int, struct sock *); typedef void (*btf_trace_tcp_retransmit_skb)(void *, const struct sock *, const struct sk_buff *); typedef void (*btf_trace_tcp_send_reset)(void *, const struct sock *, const struct sk_buff *); typedef void (*btf_trace_tcp_receive_reset)(void *, struct sock *); typedef void (*btf_trace_tcp_destroy_sock)(void *, struct sock *); typedef void (*btf_trace_tcp_rcv_space_adjust)(void *, struct sock *); typedef void (*btf_trace_tcp_retransmit_synack)(void *, const struct sock *, const struct request_sock *); typedef void (*btf_trace_tcp_probe)(void *, struct sock *, struct sk_buff *); typedef void (*btf_trace_tcp_bad_csum)(void *, const struct sk_buff *); typedef void (*btf_trace_tcp_cong_state_set)(void *, struct sock *, const u8); typedef void (*btf_trace_fib_table_lookup)(void *, u32, const struct flowi4 *, const struct fib_nh_common *, int); typedef void (*btf_trace_qdisc_dequeue)(void *, struct Qdisc *, const struct netdev_queue *, int, struct sk_buff *); typedef void (*btf_trace_qdisc_enqueue)(void *, struct Qdisc *, const struct netdev_queue *, struct sk_buff *); typedef void (*btf_trace_qdisc_reset)(void *, struct Qdisc *); typedef void (*btf_trace_qdisc_destroy)(void *, struct Qdisc *); typedef void (*btf_trace_qdisc_create)(void *, const struct Qdisc_ops *, struct net_device *, u32); typedef void (*btf_trace_br_fdb_add)(void *, struct ndmsg *, struct net_device *, const unsigned char *, u16, u16); struct net_bridge; struct net_bridge_port; typedef void (*btf_trace_br_fdb_external_learn_add)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16); struct bridge_id { unsigned char prio[2]; unsigned char addr[6]; }; typedef struct bridge_id bridge_id; struct bridge_mcast_other_query { struct timer_list timer; struct timer_list delay_timer; }; struct bridge_mcast_own_query { struct timer_list timer; u32 startup_sent; }; struct br_ip { union { __be32 ip4; struct in6_addr ip6; } src; union { __be32 ip4; struct in6_addr ip6; unsigned char mac_addr[6]; } dst; __be16 proto; __u16 vid; }; struct bridge_mcast_querier { struct br_ip addr; int port_ifidx; seqcount_spinlock_t seq; }; struct net_bridge_vlan; struct net_bridge_mcast { struct net_bridge *br; struct net_bridge_vlan *vlan; u32 multicast_last_member_count; u32 multicast_startup_query_count; u8 multicast_querier; u8 multicast_igmp_version; u8 multicast_router; u8 multicast_mld_version; unsigned long multicast_last_member_interval; unsigned long multicast_membership_interval; unsigned long multicast_querier_interval; unsigned long multicast_query_interval; unsigned long multicast_query_response_interval; unsigned long multicast_startup_query_interval; struct hlist_head ip4_mc_router_list; struct timer_list ip4_mc_router_timer; struct bridge_mcast_other_query ip4_other_query; struct bridge_mcast_own_query ip4_own_query; struct bridge_mcast_querier ip4_querier; struct hlist_head ip6_mc_router_list; struct timer_list ip6_mc_router_timer; struct bridge_mcast_other_query ip6_other_query; struct bridge_mcast_own_query ip6_own_query; struct bridge_mcast_querier ip6_querier; }; struct bridge_mcast_stats; struct net_bridge { spinlock_t lock; spinlock_t hash_lock; struct hlist_head frame_type_list; struct net_device *dev; unsigned long options; struct rhashtable fdb_hash_tbl; struct list_head port_list; u16 group_fwd_mask; u16 group_fwd_mask_required; bridge_id designated_root; bridge_id bridge_id; unsigned char topology_change; unsigned char topology_change_detected; u16 root_port; unsigned long max_age; unsigned long hello_time; unsigned long forward_delay; unsigned long ageing_time; unsigned long bridge_max_age; unsigned long bridge_hello_time; unsigned long bridge_forward_delay; unsigned long bridge_ageing_time; u32 root_path_cost; u8 group_addr[6]; enum { BR_NO_STP = 0, BR_KERNEL_STP = 1, BR_USER_STP = 2, } stp_enabled; struct net_bridge_mcast multicast_ctx; struct bridge_mcast_stats __attribute__((btf_type_tag("percpu"))) *mcast_stats; u32 hash_max; spinlock_t multicast_lock; struct rhashtable mdb_hash_tbl; struct rhashtable sg_port_tbl; struct hlist_head mcast_gc_list; struct hlist_head mdb_list; struct work_struct mcast_gc_work; struct timer_list hello_timer; struct timer_list tcn_timer; struct timer_list topology_change_timer; struct delayed_work gc_work; struct kobject *ifobj; u32 auto_cnt; struct hlist_head fdb_list; }; struct net_bridge_mcast_port { struct net_bridge_port *port; struct net_bridge_vlan *vlan; struct bridge_mcast_own_query ip4_own_query; struct timer_list ip4_mc_router_timer; struct hlist_node ip4_rlist; struct bridge_mcast_own_query ip6_own_query; struct timer_list ip6_mc_router_timer; struct hlist_node ip6_rlist; unsigned char multicast_router; u32 mdb_n_entries; u32 mdb_max_entries; }; struct br_tunnel_info { __be64 tunnel_id; struct metadata_dst __attribute__((btf_type_tag("rcu"))) *tunnel_dst; }; struct net_bridge_vlan { struct rhash_head vnode; struct rhash_head tnode; u16 vid; u16 flags; u16 priv_flags; u8 state; struct pcpu_sw_netstats __attribute__((btf_type_tag("percpu"))) *stats; union { struct net_bridge *br; struct net_bridge_port *port; }; union { refcount_t refcnt; struct net_bridge_vlan *brvlan; }; struct br_tunnel_info tinfo; union { struct net_bridge_mcast br_mcast_ctx; struct net_bridge_mcast_port port_mcast_ctx; }; u16 msti; struct list_head vlist; struct callback_head rcu; }; typedef __u16 port_id; struct bridge_stp_xstats { __u64 transition_blk; __u64 transition_fwd; __u64 rx_bpdu; __u64 tx_bpdu; __u64 rx_tcn; __u64 tx_tcn; }; struct net_bridge_port { struct net_bridge *br; struct net_device *dev; netdevice_tracker dev_tracker; struct list_head list; unsigned long flags; struct net_bridge_port __attribute__((btf_type_tag("rcu"))) *backup_port; u32 backup_nhid; u8 priority; u8 state; u16 port_no; unsigned char topology_change_ack; unsigned char config_pending; port_id port_id; port_id designated_port; bridge_id designated_root; bridge_id designated_bridge; u32 path_cost; u32 designated_cost; unsigned long designated_age; struct timer_list forward_delay_timer; struct timer_list hold_timer; struct timer_list message_age_timer; struct kobject kobj; struct callback_head rcu; struct net_bridge_mcast_port multicast_ctx; struct bridge_mcast_stats __attribute__((btf_type_tag("percpu"))) *mcast_stats; u32 multicast_eht_hosts_limit; u32 multicast_eht_hosts_cnt; struct hlist_head mglist; char sysfs_name[16]; u16 group_fwd_mask; u16 backup_redirected_cnt; struct bridge_stp_xstats stp_xstats; }; struct br_mcast_stats { __u64 igmp_v1queries[2]; __u64 igmp_v2queries[2]; __u64 igmp_v3queries[2]; __u64 igmp_leaves[2]; __u64 igmp_v1reports[2]; __u64 igmp_v2reports[2]; __u64 igmp_v3reports[2]; __u64 igmp_parse_errors; __u64 mld_v1queries[2]; __u64 mld_v2queries[2]; __u64 mld_leaves[2]; __u64 mld_v1reports[2]; __u64 mld_v2reports[2]; __u64 mld_parse_errors; __u64 mcast_bytes[2]; __u64 mcast_packets[2]; }; struct bridge_mcast_stats { struct br_mcast_stats mstats; struct u64_stats_sync syncp; }; struct net_bridge_fdb_entry; typedef void (*btf_trace_fdb_delete)(void *, struct net_bridge *, struct net_bridge_fdb_entry *); struct mac_addr { unsigned char addr[6]; }; typedef struct mac_addr mac_addr; struct net_bridge_fdb_key { mac_addr addr; u16 vlan_id; }; struct net_bridge_fdb_entry { struct rhash_head rhnode; struct net_bridge_port *dst; struct net_bridge_fdb_key key; struct hlist_node fdb_node; unsigned long flags; long: 64; long: 64; unsigned long updated; unsigned long used; struct callback_head rcu; long: 64; long: 64; long: 64; long: 64; }; typedef void (*btf_trace_br_fdb_update)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16, unsigned long); typedef void (*btf_trace_br_mdb_full)(void *, const struct net_device *, const struct br_ip *); typedef void (*btf_trace_page_pool_release)(void *, const struct page_pool *, s32, u32, u32); typedef void (*btf_trace_page_pool_state_release)(void *, const struct page_pool *, const struct page *, u32); typedef void (*btf_trace_page_pool_state_hold)(void *, const struct page_pool *, const struct page *, u32); typedef void (*btf_trace_page_pool_update_nid)(void *, const struct page_pool *, int); typedef void (*btf_trace_neigh_create)(void *, struct neigh_table *, struct net_device *, const void *, const struct neighbour *, bool); typedef void (*btf_trace_neigh_update)(void *, struct neighbour *, const u8 *, u8, u32, u32); typedef void (*btf_trace_neigh_update_done)(void *, struct neighbour *, int); typedef void (*btf_trace_neigh_timer_handler)(void *, struct neighbour *, int); typedef void (*btf_trace_neigh_event_send_done)(void *, struct neighbour *, int); typedef void (*btf_trace_neigh_event_send_dead)(void *, struct neighbour *, int); typedef void (*btf_trace_neigh_cleanup_and_release)(void *, struct neighbour *, int); enum tcp_ca_state { TCP_CA_Open = 0, TCP_CA_Disorder = 1, TCP_CA_CWR = 2, TCP_CA_Recovery = 3, TCP_CA_Loss = 4, }; struct trace_event_raw_kfree_skb { struct trace_entry ent; void *skbaddr; void *location; unsigned short protocol; enum skb_drop_reason reason; char __data[0]; }; struct trace_event_raw_consume_skb { struct trace_entry ent; void *skbaddr; void *location; char __data[0]; }; struct trace_event_raw_skb_copy_datagram_iovec { struct trace_entry ent; const void *skbaddr; int len; char __data[0]; }; struct trace_event_raw_net_dev_start_xmit { struct trace_entry ent; u32 __data_loc_name; u16 queue_mapping; const void *skbaddr; bool vlan_tagged; u16 vlan_proto; u16 vlan_tci; u16 protocol; u8 ip_summed; unsigned int len; unsigned int data_len; int network_offset; bool transport_offset_valid; int transport_offset; u8 tx_flags; u16 gso_size; u16 gso_segs; u16 gso_type; char __data[0]; }; struct trace_event_raw_net_dev_xmit { struct trace_entry ent; void *skbaddr; unsigned int len; int rc; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_net_dev_xmit_timeout { struct trace_entry ent; u32 __data_loc_name; u32 __data_loc_driver; int queue_index; char __data[0]; }; struct trace_event_raw_net_dev_template { struct trace_entry ent; void *skbaddr; unsigned int len; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_net_dev_rx_verbose_template { struct trace_entry ent; u32 __data_loc_name; unsigned int napi_id; u16 queue_mapping; const void *skbaddr; bool vlan_tagged; u16 vlan_proto; u16 vlan_tci; u16 protocol; u8 ip_summed; u32 hash; bool l4_hash; unsigned int len; unsigned int data_len; unsigned int truesize; bool mac_header_valid; int mac_header; unsigned char nr_frags; u16 gso_size; u16 gso_type; char __data[0]; }; struct trace_event_raw_net_dev_rx_exit_template { struct trace_entry ent; int ret; char __data[0]; }; struct trace_event_raw_napi_poll { struct trace_entry ent; struct napi_struct *napi; u32 __data_loc_dev_name; int work; int budget; char __data[0]; }; struct trace_event_raw_sock_rcvqueue_full { struct trace_entry ent; int rmem_alloc; unsigned int truesize; int sk_rcvbuf; char __data[0]; }; struct trace_event_raw_sock_exceed_buf_limit { struct trace_entry ent; char name[32]; long sysctl_mem[3]; long allocated; int sysctl_rmem; int rmem_alloc; int sysctl_wmem; int wmem_alloc; int wmem_queued; int kind; char __data[0]; }; struct trace_event_raw_inet_sock_set_state { struct trace_entry ent; const void *skaddr; int oldstate; int newstate; __u16 sport; __u16 dport; __u16 family; __u16 protocol; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; char __data[0]; }; struct trace_event_raw_inet_sk_error_report { struct trace_entry ent; int error; __u16 sport; __u16 dport; __u16 family; __u16 protocol; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; char __data[0]; }; struct trace_event_raw_sk_data_ready { struct trace_entry ent; const void *skaddr; __u16 family; __u16 protocol; unsigned long ip; char __data[0]; }; struct trace_event_raw_sock_msg_length { struct trace_entry ent; void *sk; __u16 family; __u16 protocol; int ret; int flags; char __data[0]; }; struct trace_event_raw_udp_fail_queue_rcv_skb { struct trace_entry ent; int rc; __u16 lport; char __data[0]; }; struct trace_event_raw_tcp_event_sk_skb { struct trace_entry ent; const void *skbaddr; const void *skaddr; int state; __u16 sport; __u16 dport; __u16 family; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; char __data[0]; }; struct trace_event_raw_tcp_event_sk { struct trace_entry ent; const void *skaddr; __u16 sport; __u16 dport; __u16 family; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; __u64 sock_cookie; char __data[0]; }; struct trace_event_raw_tcp_retransmit_synack { struct trace_entry ent; const void *skaddr; const void *req; __u16 sport; __u16 dport; __u16 family; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; char __data[0]; }; struct trace_event_raw_tcp_probe { struct trace_entry ent; __u8 saddr[28]; __u8 daddr[28]; __u16 sport; __u16 dport; __u16 family; __u32 mark; __u16 data_len; __u32 snd_nxt; __u32 snd_una; __u32 snd_cwnd; __u32 ssthresh; __u32 snd_wnd; __u32 srtt; __u32 rcv_wnd; __u64 sock_cookie; char __data[0]; }; struct trace_event_raw_tcp_event_skb { struct trace_entry ent; const void *skbaddr; __u8 saddr[28]; __u8 daddr[28]; char __data[0]; }; struct trace_event_raw_tcp_cong_state_set { struct trace_entry ent; const void *skaddr; __u16 sport; __u16 dport; __u16 family; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; __u8 cong_state; char __data[0]; }; struct trace_event_raw_fib_table_lookup { struct trace_entry ent; u32 tb_id; int err; int oif; int iif; u8 proto; __u8 tos; __u8 scope; __u8 flags; __u8 src[4]; __u8 dst[4]; __u8 gw4[4]; __u8 gw6[16]; u16 sport; u16 dport; char name[16]; char __data[0]; }; struct trace_event_raw_qdisc_dequeue { struct trace_entry ent; struct Qdisc *qdisc; const struct netdev_queue *txq; int packets; void *skbaddr; int ifindex; u32 handle; u32 parent; unsigned long txq_state; char __data[0]; }; struct trace_event_raw_qdisc_enqueue { struct trace_entry ent; struct Qdisc *qdisc; const struct netdev_queue *txq; void *skbaddr; int ifindex; u32 handle; u32 parent; char __data[0]; }; struct trace_event_raw_qdisc_reset { struct trace_entry ent; u32 __data_loc_dev; u32 __data_loc_kind; u32 parent; u32 handle; char __data[0]; }; struct trace_event_raw_qdisc_destroy { struct trace_entry ent; u32 __data_loc_dev; u32 __data_loc_kind; u32 parent; u32 handle; char __data[0]; }; struct trace_event_raw_qdisc_create { struct trace_entry ent; u32 __data_loc_dev; u32 __data_loc_kind; u32 parent; char __data[0]; }; struct trace_event_raw_br_fdb_add { struct trace_entry ent; u8 ndm_flags; u32 __data_loc_dev; unsigned char addr[6]; u16 vid; u16 nlh_flags; char __data[0]; }; struct trace_event_raw_br_fdb_external_learn_add { struct trace_entry ent; u32 __data_loc_br_dev; u32 __data_loc_dev; unsigned char addr[6]; u16 vid; char __data[0]; }; struct trace_event_raw_fdb_delete { struct trace_entry ent; u32 __data_loc_br_dev; u32 __data_loc_dev; unsigned char addr[6]; u16 vid; char __data[0]; }; struct trace_event_raw_br_fdb_update { struct trace_entry ent; u32 __data_loc_br_dev; u32 __data_loc_dev; unsigned char addr[6]; u16 vid; unsigned long flags; char __data[0]; }; struct trace_event_raw_br_mdb_full { struct trace_entry ent; u32 __data_loc_dev; int af; u16 vid; __u8 src[16]; __u8 grp[16]; __u8 grpmac[6]; char __data[0]; }; struct trace_event_raw_page_pool_release { struct trace_entry ent; const struct page_pool *pool; s32 inflight; u32 hold; u32 release; u64 cnt; char __data[0]; }; struct trace_event_raw_page_pool_state_release { struct trace_entry ent; const struct page_pool *pool; const struct page *page; u32 release; unsigned long pfn; char __data[0]; }; struct trace_event_raw_page_pool_state_hold { struct trace_entry ent; const struct page_pool *pool; const struct page *page; u32 hold; unsigned long pfn; char __data[0]; }; struct trace_event_raw_page_pool_update_nid { struct trace_entry ent; const struct page_pool *pool; int pool_nid; int new_nid; char __data[0]; }; struct trace_event_raw_neigh_create { struct trace_entry ent; u32 family; u32 __data_loc_dev; int entries; u8 created; u8 gc_exempt; u8 primary_key4[4]; u8 primary_key6[16]; char __data[0]; }; struct trace_event_raw_neigh_update { struct trace_entry ent; u32 family; u32 __data_loc_dev; u8 lladdr[32]; u8 lladdr_len; u8 flags; u8 nud_state; u8 type; u8 dead; int refcnt; __u8 primary_key4[4]; __u8 primary_key6[16]; unsigned long confirmed; unsigned long updated; unsigned long used; u8 new_lladdr[32]; u8 new_state; u32 update_flags; u32 pid; char __data[0]; }; struct trace_event_raw_neigh__update { struct trace_entry ent; u32 family; u32 __data_loc_dev; u8 lladdr[32]; u8 lladdr_len; u8 flags; u8 nud_state; u8 type; u8 dead; int refcnt; __u8 primary_key4[4]; __u8 primary_key6[16]; unsigned long confirmed; unsigned long updated; unsigned long used; u32 err; char __data[0]; }; struct trace_event_data_offsets_net_dev_start_xmit { u32 name; }; struct trace_event_data_offsets_net_dev_xmit { u32 name; }; struct trace_event_data_offsets_net_dev_template { u32 name; }; struct trace_event_data_offsets_net_dev_rx_verbose_template { u32 name; }; struct trace_event_data_offsets_napi_poll { u32 dev_name; }; struct trace_event_data_offsets_br_fdb_add { u32 dev; }; struct trace_event_data_offsets_br_mdb_full { u32 dev; }; struct trace_event_data_offsets_neigh_create { u32 dev; }; struct trace_event_data_offsets_neigh_update { u32 dev; }; struct trace_event_data_offsets_neigh__update { u32 dev; }; struct trace_event_data_offsets_kfree_skb {}; struct trace_event_data_offsets_consume_skb {}; struct trace_event_data_offsets_skb_copy_datagram_iovec {}; struct trace_event_data_offsets_net_dev_xmit_timeout { u32 name; u32 driver; }; struct trace_event_data_offsets_net_dev_rx_exit_template {}; struct trace_event_data_offsets_sock_rcvqueue_full {}; struct trace_event_data_offsets_sock_exceed_buf_limit {}; struct trace_event_data_offsets_inet_sock_set_state {}; struct trace_event_data_offsets_inet_sk_error_report {}; struct trace_event_data_offsets_sk_data_ready {}; struct trace_event_data_offsets_sock_msg_length {}; struct trace_event_data_offsets_udp_fail_queue_rcv_skb {}; struct trace_event_data_offsets_tcp_event_sk_skb {}; struct trace_event_data_offsets_tcp_event_sk {}; struct trace_event_data_offsets_tcp_retransmit_synack {}; struct trace_event_data_offsets_tcp_probe {}; struct trace_event_data_offsets_tcp_event_skb {}; struct trace_event_data_offsets_tcp_cong_state_set {}; struct trace_event_data_offsets_fib_table_lookup {}; struct trace_event_data_offsets_qdisc_dequeue {}; struct trace_event_data_offsets_qdisc_enqueue {}; struct trace_event_data_offsets_qdisc_reset { u32 dev; u32 kind; }; struct trace_event_data_offsets_qdisc_destroy { u32 dev; u32 kind; }; struct trace_event_data_offsets_qdisc_create { u32 dev; u32 kind; }; struct trace_event_data_offsets_br_fdb_external_learn_add { u32 br_dev; u32 dev; }; struct trace_event_data_offsets_fdb_delete { u32 br_dev; u32 dev; }; struct trace_event_data_offsets_br_fdb_update { u32 br_dev; u32 dev; }; struct trace_event_data_offsets_page_pool_release {}; struct trace_event_data_offsets_page_pool_state_release {}; struct trace_event_data_offsets_page_pool_state_hold {}; struct trace_event_data_offsets_page_pool_update_nid {}; struct net_test { char name[32]; int (*fn)(struct net_device *); }; enum ethtool_test_flags { ETH_TEST_FL_OFFLINE = 1, ETH_TEST_FL_FAILED = 2, ETH_TEST_FL_EXTERNAL_LB = 4, ETH_TEST_FL_EXTERNAL_LB_DONE = 8, }; struct netsfhdr { __be32 version; __be64 magic; u8 id; } __attribute__((packed)); struct net_packet_attrs { const unsigned char *src; const unsigned char *dst; u32 ip_src; u32 ip_dst; bool tcp; u16 sport; u16 dport; int timeout; int size; int max_size; u8 id; u16 queue_mapping; }; struct net_test_priv { struct net_packet_attrs *packet; struct packet_type pt; struct completion comp; int double_vlan; int vlan_id; int ok; }; struct gro_cell { struct sk_buff_head napi_skbs; struct napi_struct napi; }; struct percpu_free_defer { struct callback_head rcu; void __attribute__((btf_type_tag("percpu"))) *ptr; }; struct gro_cells { struct gro_cell __attribute__((btf_type_tag("percpu"))) *cells; }; enum __sk_action { __SK_DROP = 0, __SK_PASS = 1, __SK_REDIRECT = 2, __SK_NONE = 3, }; enum sk_psock_state_bits { SK_PSOCK_TX_ENABLED = 0, SK_PSOCK_RX_STRP_ENABLED = 1, }; struct sk_psock_link { struct list_head list; struct bpf_map *map; void *link_raw; }; typedef u64 (*btf_bpf_sock_map_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); typedef u64 (*btf_bpf_sk_redirect_map)(struct sk_buff *, struct bpf_map *, u32, u64); typedef u64 (*btf_bpf_msg_redirect_map)(struct sk_msg *, struct bpf_map *, u32, u64); typedef u64 (*btf_bpf_sock_hash_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); typedef u64 (*btf_bpf_sk_redirect_hash)(struct sk_buff *, struct bpf_map *, void *, u64); typedef u64 (*btf_bpf_msg_redirect_hash)(struct sk_msg *, struct bpf_map *, void *, u64); struct bpf_stab { struct bpf_map map; struct sock **sks; struct sk_psock_progs progs; spinlock_t lock; long: 64; long: 64; }; struct bpf_shtab_bucket; struct bpf_shtab { struct bpf_map map; struct bpf_shtab_bucket *buckets; u32 buckets_num; u32 elem_size; struct sk_psock_progs progs; atomic_t count; long: 64; }; struct bpf_shtab_bucket { struct hlist_head head; spinlock_t lock; }; struct bpf_shtab_elem { struct callback_head rcu; u32 hash; struct sock *sk; struct hlist_node node; u8 key[0]; }; struct sock_map_seq_info { struct bpf_map *map; struct sock *sk; u32 index; }; struct bpf_iter__sockmap { union { struct bpf_iter_meta *meta; }; union { struct bpf_map *map; }; union { void *key; }; union { struct sock *sk; }; }; struct sock_hash_seq_info { struct bpf_map *map; struct bpf_shtab *htab; u32 bucket_id; }; enum { SK_DIAG_BPF_STORAGE_REQ_NONE = 0, SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 1, __SK_DIAG_BPF_STORAGE_REQ_MAX = 2, }; enum { SK_DIAG_BPF_STORAGE_REP_NONE = 0, SK_DIAG_BPF_STORAGE = 1, __SK_DIAG_BPF_STORAGE_REP_MAX = 2, }; enum { SK_DIAG_BPF_STORAGE_NONE = 0, SK_DIAG_BPF_STORAGE_PAD = 1, SK_DIAG_BPF_STORAGE_MAP_ID = 2, SK_DIAG_BPF_STORAGE_MAP_VALUE = 3, __SK_DIAG_BPF_STORAGE_MAX = 4, }; typedef u64 (*btf_bpf_sk_storage_get)(struct bpf_map *, struct sock *, void *, u64, gfp_t); typedef u64 (*btf_bpf_sk_storage_delete)(struct bpf_map *, struct sock *); typedef u64 (*btf_bpf_sk_storage_get_tracing)(struct bpf_map *, struct sock *, void *, u64, gfp_t); typedef u64 (*btf_bpf_sk_storage_delete_tracing)(struct bpf_map *, struct sock *); struct bpf_sk_storage_diag { u32 nr_maps; struct bpf_map *maps[0]; }; struct bpf_iter_seq_sk_storage_map_info { struct bpf_map *map; unsigned int bucket_id; unsigned int skip_elems; }; struct bpf_iter__bpf_sk_storage_map { union { struct bpf_iter_meta *meta; }; union { struct bpf_map *map; }; union { struct sock *sk; }; union { void *value; }; }; struct compat_cmsghdr { compat_size_t cmsg_len; compat_int_t cmsg_level; compat_int_t cmsg_type; }; struct llc_addr { unsigned char lsap; unsigned char mac[6]; }; struct llc_sap { unsigned char state; unsigned char p_bit; unsigned char f_bit; refcount_t refcnt; int (*rcv_func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct llc_addr laddr; struct list_head node; spinlock_t sk_lock; int sk_count; struct hlist_nulls_head sk_laddr_hash[64]; struct hlist_head sk_dev_hash[64]; struct callback_head rcu; }; struct llc_pdu_un { u8 dsap; u8 ssap; u8 ctrl_1; }; struct llc_pdu_sn { u8 dsap; u8 ssap; u8 ctrl_1; u8 ctrl_2; }; struct datalink_proto { unsigned char type[8]; struct llc_sap *sap; unsigned short header_length; int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); int (*request)(struct datalink_proto *, struct sk_buff *, const unsigned char *); struct list_head node; }; struct stp_proto { unsigned char group_address[6]; void (*rcv)(const struct stp_proto *, struct sk_buff *, struct net_device *); void *data; }; enum { TCA_UNSPEC = 0, TCA_KIND = 1, TCA_OPTIONS = 2, TCA_STATS = 3, TCA_XSTATS = 4, TCA_RATE = 5, TCA_FCNT = 6, TCA_STATS2 = 7, TCA_STAB = 8, TCA_PAD = 9, TCA_DUMP_INVISIBLE = 10, TCA_CHAIN = 11, TCA_HW_OFFLOAD = 12, TCA_INGRESS_BLOCK = 13, TCA_EGRESS_BLOCK = 14, TCA_DUMP_FLAGS = 15, TCA_EXT_WARN_MSG = 16, __TCA_MAX = 17, }; struct skb_array { struct ptr_ring ring; }; struct pfifo_fast_priv { struct skb_array q[3]; }; struct tc_prio_qopt { int bands; __u8 priomap[16]; }; struct psched_ratecfg { u64 rate_bytes_ps; u32 mult; u16 overhead; u16 mpu; u8 linklayer; u8 shift; }; struct tc_ratespec { unsigned char cell_log; __u8 linklayer; unsigned short overhead; short cell_align; unsigned short mpu; __u32 rate; }; struct psched_pktrate { u64 rate_pkts_ps; u32 mult; u8 shift; }; struct mini_Qdisc_pair { struct mini_Qdisc miniq1; struct mini_Qdisc miniq2; struct mini_Qdisc __attribute__((btf_type_tag("rcu"))) **p_miniq; }; enum tc_mq_command { TC_MQ_CREATE = 0, TC_MQ_DESTROY = 1, TC_MQ_STATS = 2, TC_MQ_GRAFT = 3, }; struct tc_qopt_offload_stats { struct gnet_stats_basic_sync *bstats; struct gnet_stats_queue *qstats; }; struct tc_mq_opt_offload_graft_params { unsigned long queue; u32 child_handle; }; struct tc_mq_qopt_offload { enum tc_mq_command command; u32 handle; union { struct tc_qopt_offload_stats stats; struct tc_mq_opt_offload_graft_params graft_params; }; }; struct mq_sched { struct Qdisc **qdiscs; }; struct sch_frag_data { unsigned long dst; struct qdisc_skb_cb cb; __be16 inner_protocol; u16 vlan_tci; __be16 vlan_proto; unsigned int l2_len; u8 l2_data[18]; int (*xmit)(struct sk_buff *); }; struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; struct qdisc_rate_table *next; int refcnt; }; enum tc_link_layer { TC_LINKLAYER_UNAWARE = 0, TC_LINKLAYER_ETHERNET = 1, TC_LINKLAYER_ATM = 2, }; enum { TCA_STAB_UNSPEC = 0, TCA_STAB_BASE = 1, TCA_STAB_DATA = 2, __TCA_STAB_MAX = 3, }; enum tc_root_command { TC_ROOT_GRAFT = 0, }; struct Qdisc_class_common { u32 classid; unsigned int filter_cnt; struct hlist_node hnode; }; struct qdisc_watchdog { struct hrtimer timer; struct Qdisc *qdisc; }; struct check_loop_arg { struct qdisc_walker w; struct Qdisc *p; int depth; }; struct tc_bind_class_args { struct qdisc_walker w; unsigned long new_cl; u32 portid; u32 clid; }; struct qdisc_dump_args { struct qdisc_walker w; struct sk_buff *skb; struct netlink_callback *cb; }; struct tc_root_qopt_offload { enum tc_root_command command; u32 handle; bool ingress; }; struct Qdisc_class_hash { struct hlist_head *hash; unsigned int hashsize; unsigned int hashmask; unsigned int hashelems; }; struct tc_query_caps_base { enum tc_setup_type type; void *caps; }; struct tcf_bind_args { struct tcf_walker w; unsigned long base; unsigned long cl; u32 classid; }; enum net_xmit_qdisc_t { __NET_XMIT_STOLEN = 65536, __NET_XMIT_BYPASS = 131072, }; struct psample_group { struct list_head list; struct net *net; u32 group_num; u32 refcount; u32 seq; struct callback_head rcu; }; struct tcf_exts_miss_cookie_node { const struct tcf_chain *chain; const struct tcf_proto *tp; const struct tcf_exts *exts; u32 chain_index; u32 tp_prio; u32 handle; u32 miss_cookie_base; struct callback_head rcu; }; enum { TCA_ACT_UNSPEC = 0, TCA_ACT_KIND = 1, TCA_ACT_OPTIONS = 2, TCA_ACT_INDEX = 3, TCA_ACT_STATS = 4, TCA_ACT_PAD = 5, TCA_ACT_COOKIE = 6, TCA_ACT_FLAGS = 7, TCA_ACT_HW_STATS = 8, TCA_ACT_USED_HW_STATS = 9, TCA_ACT_IN_HW_COUNT = 10, __TCA_ACT_MAX = 11, }; enum pedit_header_type { TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0, TCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1, TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2, TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3, TCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4, TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5, __PEDIT_HDR_TYPE_MAX = 6, }; enum pedit_cmd { TCA_PEDIT_KEY_EX_CMD_SET = 0, TCA_PEDIT_KEY_EX_CMD_ADD = 1, __PEDIT_CMD_MAX = 2, }; enum qdisc_class_ops_flags { QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, }; enum tcf_proto_ops_flags { TCF_PROTO_OPS_DOIT_UNLOCKED = 1, }; struct tcf_block_owner_item { struct list_head list; struct Qdisc *q; enum flow_block_binder_type binder_type; }; typedef void tcf_chain_head_change_t(struct tcf_proto *, void *); struct tcf_filter_chain_list_item { struct list_head list; tcf_chain_head_change_t *chain_head_change; void *chain_head_change_priv; }; struct tc_pedit_key; struct tcf_pedit_key_ex; struct tcf_pedit_parms { struct tc_pedit_key *tcfp_keys; struct tcf_pedit_key_ex *tcfp_keys_ex; u32 tcfp_off_max_hint; unsigned char tcfp_nkeys; unsigned char tcfp_flags; struct callback_head rcu; }; struct tc_pedit_key { __u32 mask; __u32 val; __u32 off; __u32 at; __u32 offmask; __u32 shift; }; struct tcf_pedit_key_ex { enum pedit_header_type htype; enum pedit_cmd cmd; }; struct tcf_pedit { struct tc_action common; struct tcf_pedit_parms __attribute__((btf_type_tag("rcu"))) *parms; long: 64; }; struct tcf_block_ext_info { enum flow_block_binder_type binder_type; tcf_chain_head_change_t *chain_head_change; void *chain_head_change_priv; u32 block_index; }; struct tcf_net { spinlock_t idr_lock; struct idr idr; }; struct action_gate_entry { u8 gate_state; u32 interval; s32 ipv; s32 maxoctets; }; struct tcf_chain_info { struct tcf_proto __attribute__((btf_type_tag("rcu"))) **pprev; struct tcf_proto __attribute__((btf_type_tag("rcu"))) *next; }; struct tcf_dump_args { struct tcf_walker w; struct sk_buff *skb; struct netlink_callback *cb; struct tcf_block *block; struct Qdisc *q; u32 parent; bool terse_dump; }; struct tcf_qevent { struct tcf_block *block; struct tcf_block_ext_info info; struct tcf_proto __attribute__((btf_type_tag("rcu"))) *filter_chain; }; enum { TCA_ROOT_UNSPEC = 0, TCA_ROOT_TAB = 1, TCA_ROOT_FLAGS = 2, TCA_ROOT_COUNT = 3, TCA_ROOT_TIME_DELTA = 4, TCA_ROOT_EXT_WARN_MSG = 5, __TCA_ROOT_MAX = 6, }; struct tc_act_pernet_id { struct list_head list; unsigned int id; }; struct tcamsg { unsigned char tca_family; unsigned char tca__pad1; unsigned short tca__pad2; }; struct tc_action_net { struct tcf_idrinfo *idrinfo; const struct tc_action_ops *ops; }; enum { TCA_POLICE_UNSPEC = 0, TCA_POLICE_TBF = 1, TCA_POLICE_RATE = 2, TCA_POLICE_PEAKRATE = 3, TCA_POLICE_AVRATE = 4, TCA_POLICE_RESULT = 5, TCA_POLICE_TM = 6, TCA_POLICE_PAD = 7, TCA_POLICE_RATE64 = 8, TCA_POLICE_PEAKRATE64 = 9, TCA_POLICE_PKTRATE64 = 10, TCA_POLICE_PKTBURST64 = 11, __TCA_POLICE_MAX = 12, }; struct tcf_police_params; struct tcf_police { struct tc_action common; struct tcf_police_params __attribute__((btf_type_tag("rcu"))) *params; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t tcfp_lock; s64 tcfp_toks; s64 tcfp_ptoks; s64 tcfp_pkttoks; s64 tcfp_t_c; long: 64; long: 64; long: 64; }; struct tcf_police_params { int tcfp_result; u32 tcfp_ewma_rate; s64 tcfp_burst; u32 tcfp_mtu; s64 tcfp_mtu_ptoks; s64 tcfp_pkt_burst; struct psched_ratecfg rate; bool rate_present; struct psched_ratecfg peak; bool peak_present; struct psched_pktrate ppsrate; bool pps_present; struct callback_head rcu; }; struct tc_police { __u32 index; int action; __u32 limit; __u32 burst; __u32 mtu; struct tc_ratespec rate; struct tc_ratespec peakrate; int refcnt; int bindcnt; __u32 capab; }; enum { TCA_GACT_UNSPEC = 0, TCA_GACT_TM = 1, TCA_GACT_PARMS = 2, TCA_GACT_PROB = 3, TCA_GACT_PAD = 4, __TCA_GACT_MAX = 5, }; struct tcf_gact { struct tc_action common; }; struct tc_gact { __u32 index; __u32 capab; int action; int refcnt; int bindcnt; }; enum { TCA_MIRRED_UNSPEC = 0, TCA_MIRRED_TM = 1, TCA_MIRRED_PARMS = 2, TCA_MIRRED_PAD = 3, __TCA_MIRRED_MAX = 4, }; struct tcf_mirred { struct tc_action common; int tcfm_eaction; bool tcfm_mac_header_xmit; struct net_device __attribute__((btf_type_tag("rcu"))) *tcfm_dev; netdevice_tracker tcfm_dev_tracker; struct list_head tcfm_list; }; struct tc_mirred { __u32 index; __u32 capab; int action; int refcnt; int bindcnt; int eaction; __u32 ifindex; }; enum { TCA_SKBEDIT_UNSPEC = 0, TCA_SKBEDIT_TM = 1, TCA_SKBEDIT_PARMS = 2, TCA_SKBEDIT_PRIORITY = 3, TCA_SKBEDIT_QUEUE_MAPPING = 4, TCA_SKBEDIT_MARK = 5, TCA_SKBEDIT_PAD = 6, TCA_SKBEDIT_PTYPE = 7, TCA_SKBEDIT_MASK = 8, TCA_SKBEDIT_FLAGS = 9, TCA_SKBEDIT_QUEUE_MAPPING_MAX = 10, __TCA_SKBEDIT_MAX = 11, }; struct tcf_skbedit_params; struct tcf_skbedit { struct tc_action common; struct tcf_skbedit_params __attribute__((btf_type_tag("rcu"))) *params; long: 64; }; struct tcf_skbedit_params { u32 flags; u32 priority; u32 mark; u32 mask; u16 queue_mapping; u16 mapping_mod; u16 ptype; struct callback_head rcu; }; struct tc_skbedit { __u32 index; __u32 capab; int action; int refcnt; int bindcnt; }; enum { TCA_ACT_BPF_UNSPEC = 0, TCA_ACT_BPF_TM = 1, TCA_ACT_BPF_PARMS = 2, TCA_ACT_BPF_OPS_LEN = 3, TCA_ACT_BPF_OPS = 4, TCA_ACT_BPF_FD = 5, TCA_ACT_BPF_NAME = 6, TCA_ACT_BPF_PAD = 7, TCA_ACT_BPF_TAG = 8, TCA_ACT_BPF_ID = 9, __TCA_ACT_BPF_MAX = 10, }; struct tcf_bpf { struct tc_action common; struct bpf_prog __attribute__((btf_type_tag("rcu"))) *filter; union { u32 bpf_fd; u16 bpf_num_ops; }; struct sock_filter *bpf_ops; const char *bpf_name; }; struct tcf_bpf_cfg { struct bpf_prog *filter; struct sock_filter *bpf_ops; const char *bpf_name; u16 bpf_num_ops; bool is_ebpf; }; struct tc_act_bpf { __u32 index; __u32 capab; int action; int refcnt; int bindcnt; }; enum tc_fifo_command { TC_FIFO_REPLACE = 0, TC_FIFO_DESTROY = 1, TC_FIFO_STATS = 2, }; struct tc_fifo_qopt { __u32 limit; }; struct tc_fifo_qopt_offload { enum tc_fifo_command command; u32 handle; u32 parent; union { struct tc_qopt_offload_stats stats; }; }; enum tc_htb_command { TC_HTB_CREATE = 0, TC_HTB_DESTROY = 1, TC_HTB_LEAF_ALLOC_QUEUE = 2, TC_HTB_LEAF_TO_INNER = 3, TC_HTB_LEAF_DEL = 4, TC_HTB_LEAF_DEL_LAST = 5, TC_HTB_LEAF_DEL_LAST_FORCE = 6, TC_HTB_NODE_MODIFY = 7, TC_HTB_LEAF_QUERY_QUEUE = 8, }; enum htb_cmode { HTB_CANT_SEND = 0, HTB_MAY_BORROW = 1, HTB_CAN_SEND = 2, }; enum { TCA_HTB_UNSPEC = 0, TCA_HTB_PARMS = 1, TCA_HTB_INIT = 2, TCA_HTB_CTAB = 3, TCA_HTB_RTAB = 4, TCA_HTB_DIRECT_QLEN = 5, TCA_HTB_RATE64 = 6, TCA_HTB_CEIL64 = 7, TCA_HTB_PAD = 8, TCA_HTB_OFFLOAD = 9, __TCA_HTB_MAX = 10, }; struct htb_class_leaf { int deficit[8]; struct Qdisc *q; struct netdev_queue *offload_queue; }; struct htb_prio { union { struct rb_root row; struct rb_root feed; }; struct rb_node *ptr; u32 last_ptr_id; }; struct htb_class_inner { struct htb_prio clprio[8]; }; struct tc_htb_xstats { __u32 lends; __u32 borrows; __u32 giants; __s32 tokens; __s32 ctokens; }; struct htb_class { struct Qdisc_class_common common; struct psched_ratecfg rate; struct psched_ratecfg ceil; s64 buffer; s64 cbuffer; s64 mbuffer; u32 prio; int quantum; struct tcf_proto __attribute__((btf_type_tag("rcu"))) *filter_list; struct tcf_block *block; int level; unsigned int children; struct htb_class *parent; struct net_rate_estimator __attribute__((btf_type_tag("rcu"))) *rate_est; struct gnet_stats_basic_sync bstats; struct gnet_stats_basic_sync bstats_bias; struct tc_htb_xstats xstats; s64 tokens; s64 ctokens; s64 t_c; union { struct htb_class_leaf leaf; struct htb_class_inner inner; }; s64 pq_key; int prio_activity; enum htb_cmode cmode; struct rb_node pq_node; struct rb_node node[8]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; unsigned int drops; unsigned int overlimits; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct htb_level { struct rb_root wait_pq; struct htb_prio hprio[8]; }; struct htb_sched { struct Qdisc_class_hash clhash; int defcls; int rate2quantum; struct tcf_proto __attribute__((btf_type_tag("rcu"))) *filter_list; struct tcf_block *block; unsigned int warned; int direct_qlen; struct work_struct work; struct qdisc_skb_head direct_queue; u32 direct_pkts; u32 overlimits; struct qdisc_watchdog watchdog; s64 now; s64 near_ev_cache[8]; int row_mask[8]; struct htb_level hlevel[8]; struct Qdisc **direct_qdiscs; unsigned int num_direct_qdiscs; bool offload; }; struct tc_htb_qopt_offload { struct netlink_ext_ack *extack; enum tc_htb_command command; u32 parent_classid; u16 classid; u16 qid; u32 quantum; u64 rate; u64 ceil; u8 prio; }; struct tc_htb_glob { __u32 version; __u32 rate2quantum; __u32 defcls; __u32 debug; __u32 direct_pkts; }; struct tc_htb_opt { struct tc_ratespec rate; struct tc_ratespec ceil; __u32 buffer; __u32 cbuffer; __u32 quantum; __u32 level; __u32 prio; }; struct ingress_sched_data { struct tcf_block *block; struct tcf_block_ext_info block_info; struct mini_Qdisc_pair miniqp; }; struct clsact_sched_data { struct tcf_block *ingress_block; struct tcf_block *egress_block; struct tcf_block_ext_info ingress_block_info; struct tcf_block_ext_info egress_block_info; struct mini_Qdisc_pair miniqp_ingress; struct mini_Qdisc_pair miniqp_egress; }; enum { RED_DONT_MARK = 0, RED_PROB_MARK = 1, RED_HARD_MARK = 2, }; enum { RED_BELOW_MIN_THRESH = 0, RED_BETWEEN_TRESH = 1, RED_ABOVE_MAX_TRESH = 2, }; typedef u16 sfq_index; struct tc_sfqred_stats { __u32 prob_drop; __u32 forced_drop; __u32 prob_mark; __u32 forced_mark; __u32 prob_mark_head; __u32 forced_mark_head; }; struct sfq_head { sfq_index next; sfq_index prev; }; struct sfq_slot; struct red_parms; struct sfq_sched_data { int limit; unsigned int divisor; u8 headdrop; u8 maxdepth; siphash_key_t perturbation; u8 cur_depth; u8 flags; unsigned short scaled_quantum; struct tcf_proto __attribute__((btf_type_tag("rcu"))) *filter_list; struct tcf_block *block; sfq_index *ht; struct sfq_slot *slots; struct red_parms *red_parms; struct tc_sfqred_stats stats; struct sfq_slot *tail; struct sfq_head dep[128]; unsigned int maxflows; int perturb_period; unsigned int quantum; struct timer_list perturb_timer; struct Qdisc *sch; }; struct red_vars { int qcount; u32 qR; unsigned long qavg; ktime_t qidlestart; }; struct sfq_slot { struct sk_buff *skblist_next; struct sk_buff *skblist_prev; sfq_index qlen; sfq_index next; struct sfq_head dep; unsigned short hash; short allot; unsigned int backlog; struct red_vars vars; }; struct red_parms { u32 qth_min; u32 qth_max; u32 Scell_max; u32 max_P; struct reciprocal_value max_P_reciprocal; u32 qth_delta; u32 target_min; u32 target_max; u8 Scell_log; u8 Wlog; u8 Plog; u8 Stab[256]; }; struct tc_sfq_qopt { unsigned int quantum; int perturb_period; __u32 limit; unsigned int divisor; unsigned int flows; }; struct tc_sfq_qopt_v1 { struct tc_sfq_qopt v0; unsigned int depth; unsigned int headdrop; __u32 limit; __u32 qth_min; __u32 qth_max; unsigned char Wlog; unsigned char Plog; unsigned char Scell_log; unsigned char flags; __u32 max_P; struct tc_sfqred_stats stats; }; struct tc_sfq_xstats { __s32 allot; }; enum tc_tbf_command { TC_TBF_REPLACE = 0, TC_TBF_DESTROY = 1, TC_TBF_STATS = 2, TC_TBF_GRAFT = 3, }; enum { TCA_TBF_UNSPEC = 0, TCA_TBF_PARMS = 1, TCA_TBF_RTAB = 2, TCA_TBF_PTAB = 3, TCA_TBF_RATE64 = 4, TCA_TBF_PRATE64 = 5, TCA_TBF_BURST = 6, TCA_TBF_PBURST = 7, TCA_TBF_PAD = 8, __TCA_TBF_MAX = 9, }; struct tbf_sched_data { u32 limit; u32 max_size; s64 buffer; s64 mtu; struct psched_ratecfg rate; struct psched_ratecfg peak; s64 tokens; s64 ptokens; s64 t_c; struct Qdisc *qdisc; struct qdisc_watchdog watchdog; }; struct tc_tbf_qopt_offload_replace_params { struct psched_ratecfg rate; u32 max_size; struct gnet_stats_queue *qstats; }; struct tc_tbf_qopt_offload { enum tc_tbf_command command; u32 handle; u32 parent; union { struct tc_tbf_qopt_offload_replace_params replace_params; struct tc_qopt_offload_stats stats; u32 child_handle; }; }; struct tc_tbf_qopt { struct tc_ratespec rate; struct tc_ratespec peakrate; __u32 limit; __u32 buffer; __u32 mtu; }; enum tc_prio_command { TC_PRIO_REPLACE = 0, TC_PRIO_DESTROY = 1, TC_PRIO_STATS = 2, TC_PRIO_GRAFT = 3, }; struct prio_sched_data { int bands; struct tcf_proto __attribute__((btf_type_tag("rcu"))) *filter_list; struct tcf_block *block; u8 prio2band[16]; struct Qdisc *queues[16]; }; struct tc_prio_qopt_offload_params { int bands; u8 priomap[16]; struct gnet_stats_queue *qstats; }; struct tc_prio_qopt_offload_graft_params { u8 band; u32 child_handle; }; struct tc_prio_qopt_offload { enum tc_prio_command command; u32 handle; u32 parent; union { struct tc_prio_qopt_offload_params replace_params; struct tc_qopt_offload_stats stats; struct tc_prio_qopt_offload_graft_params graft_params; }; }; struct multiq_sched_data { u16 bands; u16 max_bands; u16 curband; struct tcf_proto __attribute__((btf_type_tag("rcu"))) *filter_list; struct tcf_block *block; struct Qdisc **queues; }; struct tc_multiq_qopt { __u16 bands; __u16 max_bands; }; struct crndstate { u32 last; u32 rho; }; struct prng { u64 seed; struct rnd_state prng_state; }; struct clgstate { u8 state; u32 a1; u32 a2; u32 a3; u32 a4; u32 a5; }; struct tc_netem_slot { __s64 min_delay; __s64 max_delay; __s32 max_packets; __s32 max_bytes; __s64 dist_delay; __s64 dist_jitter; }; struct slotstate { u64 slot_next; s32 packets_left; s32 bytes_left; }; struct disttable; struct netem_sched_data { struct rb_root t_root; struct sk_buff *t_head; struct sk_buff *t_tail; struct Qdisc *qdisc; struct qdisc_watchdog watchdog; s64 latency; s64 jitter; u32 loss; u32 ecn; u32 limit; u32 counter; u32 gap; u32 duplicate; u32 reorder; u32 corrupt; u64 rate; s32 packet_overhead; u32 cell_size; struct reciprocal_value cell_size_reciprocal; s32 cell_overhead; struct crndstate delay_cor; struct crndstate loss_cor; struct crndstate dup_cor; struct crndstate reorder_cor; struct crndstate corrupt_cor; struct prng prng; struct disttable *delay_dist; enum { CLG_RANDOM = 0, CLG_4_STATES = 1, CLG_GILB_ELL = 2, } loss_model; enum { TX_IN_GAP_PERIOD = 1, TX_IN_BURST_PERIOD = 2, LOST_IN_GAP_PERIOD = 3, LOST_IN_BURST_PERIOD = 4, } _4_state_model; enum { GOOD_STATE = 1, BAD_STATE = 2, } GE_state_model; struct clgstate clg; struct tc_netem_slot slot_config; struct slotstate slot; struct disttable *slot_dist; }; struct disttable { u32 size; s16 table[0]; }; enum { TCA_NETEM_UNSPEC = 0, TCA_NETEM_CORR = 1, TCA_NETEM_DELAY_DIST = 2, TCA_NETEM_REORDER = 3, TCA_NETEM_CORRUPT = 4, TCA_NETEM_LOSS = 5, TCA_NETEM_RATE = 6, TCA_NETEM_ECN = 7, TCA_NETEM_RATE64 = 8, TCA_NETEM_PAD = 9, TCA_NETEM_LATENCY64 = 10, TCA_NETEM_JITTER64 = 11, TCA_NETEM_SLOT = 12, TCA_NETEM_SLOT_DIST = 13, TCA_NETEM_PRNG_SEED = 14, __TCA_NETEM_MAX = 15, }; enum { NETEM_LOSS_UNSPEC = 0, NETEM_LOSS_GI = 1, NETEM_LOSS_GE = 2, __NETEM_LOSS_MAX = 3, }; struct netem_skb_cb { u64 time_to_send; }; typedef u64 psched_time_t; struct tc_netem_gemodel { __u32 p; __u32 r; __u32 h; __u32 k1; }; struct tc_netem_gimodel { __u32 p13; __u32 p31; __u32 p32; __u32 p14; __u32 p23; }; struct tc_netem_corr { __u32 delay_corr; __u32 loss_corr; __u32 dup_corr; }; struct tc_netem_reorder { __u32 probability; __u32 correlation; }; struct tc_netem_corrupt { __u32 probability; __u32 correlation; }; struct tc_netem_rate { __u32 rate; __s32 packet_overhead; __u32 cell_size; __s32 cell_overhead; }; struct tc_netem_qopt { __u32 latency; __u32 limit; __u32 loss; __u32 gap; __u32 duplicate; __u32 jitter; }; enum { TCA_CODEL_UNSPEC = 0, TCA_CODEL_TARGET = 1, TCA_CODEL_LIMIT = 2, TCA_CODEL_INTERVAL = 3, TCA_CODEL_ECN = 4, TCA_CODEL_CE_THRESHOLD = 5, __TCA_CODEL_MAX = 6, }; typedef u32 codel_time_t; struct codel_skb_cb { codel_time_t enqueue_time; unsigned int mem_usage; }; struct codel_vars { u32 count; u32 lastcount; bool dropping; u16 rec_inv_sqrt; codel_time_t first_above_time; codel_time_t drop_next; codel_time_t ldelay; }; struct codel_params { codel_time_t target; codel_time_t ce_threshold; codel_time_t interval; u32 mtu; bool ecn; u8 ce_threshold_selector; u8 ce_threshold_mask; }; struct codel_stats { u32 maxpacket; u32 drop_count; u32 drop_len; u32 ecn_mark; u32 ce_mark; }; typedef u32 (*codel_skb_len_t)(const struct sk_buff *); typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *); typedef void (*codel_skb_drop_t)(struct sk_buff *, void *); typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *, void *); struct codel_sched_data { struct codel_params params; struct codel_vars vars; struct codel_stats stats; u32 drop_overlimit; }; struct tc_codel_xstats { __u32 maxpacket; __u32 count; __u32 lastcount; __u32 ldelay; __s32 drop_next; __u32 drop_overlimit; __u32 ecn_mark; __u32 dropping; __u32 ce_mark; }; typedef s32 codel_tdiff_t; enum { TCA_FQ_CODEL_XSTATS_QDISC = 0, TCA_FQ_CODEL_XSTATS_CLASS = 1, }; enum { TCA_FQ_CODEL_UNSPEC = 0, TCA_FQ_CODEL_TARGET = 1, TCA_FQ_CODEL_LIMIT = 2, TCA_FQ_CODEL_INTERVAL = 3, TCA_FQ_CODEL_ECN = 4, TCA_FQ_CODEL_FLOWS = 5, TCA_FQ_CODEL_QUANTUM = 6, TCA_FQ_CODEL_CE_THRESHOLD = 7, TCA_FQ_CODEL_DROP_BATCH_SIZE = 8, TCA_FQ_CODEL_MEMORY_LIMIT = 9, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR = 10, TCA_FQ_CODEL_CE_THRESHOLD_MASK = 11, __TCA_FQ_CODEL_MAX = 12, }; struct fq_codel_flow { struct sk_buff *head; struct sk_buff *tail; struct list_head flowchain; int deficit; struct codel_vars cvars; }; struct fq_codel_sched_data { struct tcf_proto __attribute__((btf_type_tag("rcu"))) *filter_list; struct tcf_block *block; struct fq_codel_flow *flows; u32 *backlogs; u32 flows_cnt; u32 quantum; u32 drop_batch_size; u32 memory_limit; struct codel_params cparams; struct codel_stats cstats; u32 memory_usage; u32 drop_overmemory; u32 drop_overlimit; u32 new_flow_count; struct list_head new_flows; struct list_head old_flows; }; struct tc_fq_codel_qd_stats { __u32 maxpacket; __u32 drop_overlimit; __u32 ecn_mark; __u32 new_flow_count; __u32 new_flows_len; __u32 old_flows_len; __u32 ce_mark; __u32 memory_usage; __u32 drop_overmemory; }; struct tc_fq_codel_cl_stats { __s32 deficit; __u32 ldelay; __u32 count; __u32 lastcount; __u32 dropping; __s32 drop_next; }; struct tc_fq_codel_xstats { __u32 type; union { struct tc_fq_codel_qd_stats qdisc_stats; struct tc_fq_codel_cl_stats class_stats; }; }; struct fq_flow { struct rb_root t_root; struct sk_buff *head; union { struct sk_buff *tail; unsigned long age; }; struct rb_node fq_node; struct sock *sk; u32 socket_hash; int qlen; int credit; struct fq_flow *next; struct rb_node rate_node; u64 time_next_packet; long: 64; long: 64; }; enum { TCA_FQ_UNSPEC = 0, TCA_FQ_PLIMIT = 1, TCA_FQ_FLOW_PLIMIT = 2, TCA_FQ_QUANTUM = 3, TCA_FQ_INITIAL_QUANTUM = 4, TCA_FQ_RATE_ENABLE = 5, TCA_FQ_FLOW_DEFAULT_RATE = 6, TCA_FQ_FLOW_MAX_RATE = 7, TCA_FQ_BUCKETS_LOG = 8, TCA_FQ_FLOW_REFILL_DELAY = 9, TCA_FQ_ORPHAN_MASK = 10, TCA_FQ_LOW_RATE_THRESHOLD = 11, TCA_FQ_CE_THRESHOLD = 12, TCA_FQ_TIMER_SLACK = 13, TCA_FQ_HORIZON = 14, TCA_FQ_HORIZON_DROP = 15, __TCA_FQ_MAX = 16, }; struct fq_skb_cb { u64 time_to_send; }; struct fq_flow_head { struct fq_flow *first; struct fq_flow *last; }; struct fq_sched_data { struct fq_flow_head new_flows; struct fq_flow_head old_flows; struct rb_root delayed; u64 time_next_delayed_flow; u64 ktime_cache; unsigned long unthrottle_latency_ns; struct fq_flow internal; u32 quantum; u32 initial_quantum; u32 flow_refill_delay; u32 flow_plimit; unsigned long flow_max_rate; u64 ce_threshold; u64 horizon; u32 orphan_mask; u32 low_rate_threshold; struct rb_root *fq_root; u8 rate_enable; u8 fq_trees_log; u8 horizon_drop; u32 flows; u32 inactive_flows; u32 throttled_flows; u64 stat_gc_flows; u64 stat_internal_packets; u64 stat_throttled; u64 stat_ce_mark; u64 stat_horizon_drops; u64 stat_horizon_caps; u64 stat_flows_plimit; u64 stat_pkts_too_long; u64 stat_allocation_errors; u32 timer_slack; struct qdisc_watchdog watchdog; long: 64; long: 64; long: 64; }; struct tc_fq_qd_stats { __u64 gc_flows; __u64 highprio_packets; __u64 tcp_retrans; __u64 throttled; __u64 flows_plimit; __u64 pkts_too_long; __u64 allocation_errors; __s64 time_next_delayed_flow; __u32 flows; __u32 inactive_flows; __u32 throttled_flows; __u32 unthrottle_latency_ns; __u64 ce_mark; __u64 horizon_drops; __u64 horizon_caps; }; enum tc_clsu32_command { TC_CLSU32_NEW_KNODE = 0, TC_CLSU32_REPLACE_KNODE = 1, TC_CLSU32_DELETE_KNODE = 2, TC_CLSU32_NEW_HNODE = 3, TC_CLSU32_REPLACE_HNODE = 4, TC_CLSU32_DELETE_HNODE = 5, }; enum { TCA_U32_UNSPEC = 0, TCA_U32_CLASSID = 1, TCA_U32_HASH = 2, TCA_U32_LINK = 3, TCA_U32_DIVISOR = 4, TCA_U32_SEL = 5, TCA_U32_POLICE = 6, TCA_U32_ACT = 7, TCA_U32_INDEV = 8, TCA_U32_PCNT = 9, TCA_U32_MARK = 10, TCA_U32_FLAGS = 11, TCA_U32_PAD = 12, __TCA_U32_MAX = 13, }; struct tc_u32_key { __be32 mask; __be32 val; int off; int offmask; }; struct tc_u32_sel { unsigned char flags; unsigned char offshift; unsigned char nkeys; __be16 offmask; __u16 off; short offoff; short hoff; __be32 hmask; struct tc_u32_key keys[0]; }; struct tc_u_hnode; struct tc_u_knode { struct tc_u_knode __attribute__((btf_type_tag("rcu"))) *next; u32 handle; struct tc_u_hnode __attribute__((btf_type_tag("rcu"))) *ht_up; struct tcf_exts exts; int ifindex; u8 fshift; struct tcf_result res; struct tc_u_hnode __attribute__((btf_type_tag("rcu"))) *ht_down; u32 flags; unsigned int in_hw_count; u32 val; u32 mask; u32 __attribute__((btf_type_tag("percpu"))) *pcpu_success; struct rcu_work rwork; struct tc_u32_sel sel; }; struct tc_u_hnode { struct tc_u_hnode __attribute__((btf_type_tag("rcu"))) *next; u32 handle; u32 prio; int refcnt; unsigned int divisor; struct idr handle_idr; bool is_root; struct callback_head rcu; u32 flags; struct tc_u_knode __attribute__((btf_type_tag("rcu"))) *ht[0]; }; struct tc_u_common { struct tc_u_hnode __attribute__((btf_type_tag("rcu"))) *hlist; void *ptr; int refcnt; struct idr handle_idr; struct hlist_node hnode; long knodes; }; struct tc_cls_u32_knode { struct tcf_exts *exts; struct tcf_result *res; struct tc_u32_sel *sel; u32 handle; u32 val; u32 mask; u32 link_handle; u8 fshift; }; struct tc_cls_u32_hnode { u32 handle; u32 prio; unsigned int divisor; }; struct flow_cls_common_offload { u32 chain_index; __be16 protocol; u32 prio; struct netlink_ext_ack *extack; }; struct tc_cls_u32_offload { struct flow_cls_common_offload common; enum tc_clsu32_command command; union { struct tc_cls_u32_knode knode; struct tc_cls_u32_hnode hnode; }; }; struct tc_u32_mark { __u32 val; __u32 mask; __u32 success; }; enum { TCA_FW_UNSPEC = 0, TCA_FW_CLASSID = 1, TCA_FW_POLICE = 2, TCA_FW_INDEV = 3, TCA_FW_ACT = 4, TCA_FW_MASK = 5, __TCA_FW_MAX = 6, }; struct fw_filter { struct fw_filter __attribute__((btf_type_tag("rcu"))) *next; u32 id; struct tcf_result res; int ifindex; struct tcf_exts exts; struct tcf_proto *tp; struct rcu_work rwork; }; struct fw_head { u32 mask; struct fw_filter __attribute__((btf_type_tag("rcu"))) *ht[256]; struct callback_head rcu; }; enum { TCA_BASIC_UNSPEC = 0, TCA_BASIC_CLASSID = 1, TCA_BASIC_EMATCHES = 2, TCA_BASIC_ACT = 3, TCA_BASIC_POLICE = 4, TCA_BASIC_PCNT = 5, TCA_BASIC_PAD = 6, __TCA_BASIC_MAX = 7, }; struct tcf_ematch_tree_hdr { __u16 nmatches; __u16 progid; }; struct tcf_ematch; struct tcf_ematch_tree { struct tcf_ematch_tree_hdr hdr; struct tcf_ematch *matches; }; struct tc_basic_pcnt; struct basic_filter { u32 handle; struct tcf_exts exts; struct tcf_ematch_tree ematches; struct tcf_result res; struct tcf_proto *tp; struct list_head link; struct tc_basic_pcnt __attribute__((btf_type_tag("percpu"))) *pf; struct rcu_work rwork; }; struct tcf_ematch_ops; struct tcf_ematch { struct tcf_ematch_ops *ops; unsigned long data; unsigned int datalen; u16 matchid; u16 flags; struct net *net; }; struct tcf_pkt_info; struct tcf_ematch_ops { int kind; int datalen; int (*change)(struct net *, void *, int, struct tcf_ematch *); int (*match)(struct sk_buff *, struct tcf_ematch *, struct tcf_pkt_info *); void (*destroy)(struct tcf_ematch *); int (*dump)(struct sk_buff *, struct tcf_ematch *); struct module *owner; struct list_head link; }; struct tcf_pkt_info { unsigned char *ptr; int nexthdr; }; struct tc_basic_pcnt { __u64 rcnt; __u64 rhit; }; struct basic_head { struct list_head flist; struct idr handle_idr; struct callback_head rcu; }; enum { FLOW_KEY_SRC = 0, FLOW_KEY_DST = 1, FLOW_KEY_PROTO = 2, FLOW_KEY_PROTO_SRC = 3, FLOW_KEY_PROTO_DST = 4, FLOW_KEY_IIF = 5, FLOW_KEY_PRIORITY = 6, FLOW_KEY_MARK = 7, FLOW_KEY_NFCT = 8, FLOW_KEY_NFCT_SRC = 9, FLOW_KEY_NFCT_DST = 10, FLOW_KEY_NFCT_PROTO_SRC = 11, FLOW_KEY_NFCT_PROTO_DST = 12, FLOW_KEY_RTCLASSID = 13, FLOW_KEY_SKUID = 14, FLOW_KEY_SKGID = 15, FLOW_KEY_VLAN_TAG = 16, FLOW_KEY_RXHASH = 17, __FLOW_KEY_MAX = 18, }; enum { FLOW_MODE_MAP = 0, FLOW_MODE_HASH = 1, }; enum { TCA_FLOW_UNSPEC = 0, TCA_FLOW_KEYS = 1, TCA_FLOW_MODE = 2, TCA_FLOW_BASECLASS = 3, TCA_FLOW_RSHIFT = 4, TCA_FLOW_ADDEND = 5, TCA_FLOW_MASK = 6, TCA_FLOW_XOR = 7, TCA_FLOW_DIVISOR = 8, TCA_FLOW_ACT = 9, TCA_FLOW_POLICE = 10, TCA_FLOW_EMATCHES = 11, TCA_FLOW_PERTURB = 12, __TCA_FLOW_MAX = 13, }; struct flow_filter { struct list_head list; struct tcf_exts exts; struct tcf_ematch_tree ematches; struct tcf_proto *tp; struct timer_list perturb_timer; u32 perturb_period; u32 handle; u32 nkeys; u32 keymask; u32 mode; u32 mask; u32 xor; u32 rshift; u32 addend; u32 divisor; u32 baseclass; u32 hashrnd; struct rcu_work rwork; }; struct flow_head { struct list_head filters; struct callback_head rcu; }; enum tc_clsbpf_command { TC_CLSBPF_OFFLOAD = 0, TC_CLSBPF_STATS = 1, }; enum { TCA_BPF_UNSPEC = 0, TCA_BPF_ACT = 1, TCA_BPF_POLICE = 2, TCA_BPF_CLASSID = 3, TCA_BPF_OPS_LEN = 4, TCA_BPF_OPS = 5, TCA_BPF_FD = 6, TCA_BPF_NAME = 7, TCA_BPF_FLAGS = 8, TCA_BPF_FLAGS_GEN = 9, TCA_BPF_TAG = 10, TCA_BPF_ID = 11, __TCA_BPF_MAX = 12, }; struct cls_bpf_prog { struct bpf_prog *filter; struct list_head link; struct tcf_result res; bool exts_integrated; u32 gen_flags; unsigned int in_hw_count; struct tcf_exts exts; u32 handle; u16 bpf_num_ops; struct sock_filter *bpf_ops; const char *bpf_name; struct tcf_proto *tp; struct rcu_work rwork; }; struct tc_cls_bpf_offload { struct flow_cls_common_offload common; enum tc_clsbpf_command command; struct tcf_exts *exts; struct bpf_prog *prog; struct bpf_prog *oldprog; const char *name; bool exts_integrated; }; struct cls_bpf_head { struct list_head plist; struct idr handle_idr; struct callback_head rcu; }; enum tc_matchall_command { TC_CLSMATCHALL_REPLACE = 0, TC_CLSMATCHALL_DESTROY = 1, TC_CLSMATCHALL_STATS = 2, }; enum { TCA_MATCHALL_UNSPEC = 0, TCA_MATCHALL_CLASSID = 1, TCA_MATCHALL_ACT = 2, TCA_MATCHALL_FLAGS = 3, TCA_MATCHALL_PCNT = 4, TCA_MATCHALL_PAD = 5, __TCA_MATCHALL_MAX = 6, }; struct tc_matchall_pcnt; struct cls_mall_head { struct tcf_exts exts; struct tcf_result res; u32 handle; u32 flags; unsigned int in_hw_count; struct tc_matchall_pcnt __attribute__((btf_type_tag("percpu"))) *pf; struct rcu_work rwork; bool deleting; }; struct tc_matchall_pcnt { __u64 rhit; }; struct tc_cls_matchall_offload { struct flow_cls_common_offload common; enum tc_matchall_command command; struct flow_rule *rule; struct flow_stats stats; bool use_act_stats; unsigned long cookie; }; enum { TCA_EMATCH_TREE_UNSPEC = 0, TCA_EMATCH_TREE_HDR = 1, TCA_EMATCH_TREE_LIST = 2, __TCA_EMATCH_TREE_MAX = 3, }; struct tcf_ematch_hdr { __u16 matchid; __u16 kind; __u16 flags; __u16 pad; }; enum { TCF_EM_ALIGN_U8 = 1, TCF_EM_ALIGN_U16 = 2, TCF_EM_ALIGN_U32 = 4, }; enum { TCF_EM_OPND_EQ = 0, TCF_EM_OPND_GT = 1, TCF_EM_OPND_LT = 2, }; enum { TCF_LAYER_LINK = 0, TCF_LAYER_NETWORK = 1, TCF_LAYER_TRANSPORT = 2, __TCF_LAYER_MAX = 3, }; struct tcf_em_cmp { __u32 val; __u32 mask; __u16 off; __u8 align: 4; __u8 flags: 4; __u8 layer: 4; __u8 opnd: 4; }; struct tcf_em_nbyte { __u16 off; __u16 len: 12; __u8 layer: 4; }; struct nbyte_data { struct tcf_em_nbyte hdr; char pattern[0]; }; struct meta_value; struct meta_obj; struct meta_ops { void (*get)(struct sk_buff *, struct tcf_pkt_info *, struct meta_value *, struct meta_obj *, int *); }; struct tcf_meta_val { __u16 kind; __u8 shift; __u8 op; }; struct meta_value { struct tcf_meta_val hdr; unsigned long val; unsigned int len; }; struct meta_obj { unsigned long value; unsigned int len; }; struct meta_type_ops { void (*destroy)(struct meta_value *); int (*compare)(struct meta_obj *, struct meta_obj *); int (*change)(struct meta_value *, struct nlattr *); void (*apply_extras)(struct meta_value *, struct meta_obj *); int (*dump)(struct sk_buff *, struct meta_value *, int); }; enum { TCA_EM_META_UNSPEC = 0, TCA_EM_META_HDR = 1, TCA_EM_META_LVALUE = 2, TCA_EM_META_RVALUE = 3, __TCA_EM_META_MAX = 4, }; enum { TCF_META_TYPE_VAR = 0, TCF_META_TYPE_INT = 1, __TCF_META_TYPE_MAX = 2, }; enum { TCF_META_ID_VALUE = 0, TCF_META_ID_RANDOM = 1, TCF_META_ID_LOADAVG_0 = 2, TCF_META_ID_LOADAVG_1 = 3, TCF_META_ID_LOADAVG_2 = 4, TCF_META_ID_DEV = 5, TCF_META_ID_PRIORITY = 6, TCF_META_ID_PROTOCOL = 7, TCF_META_ID_PKTTYPE = 8, TCF_META_ID_PKTLEN = 9, TCF_META_ID_DATALEN = 10, TCF_META_ID_MACLEN = 11, TCF_META_ID_NFMARK = 12, TCF_META_ID_TCINDEX = 13, TCF_META_ID_RTCLASSID = 14, TCF_META_ID_RTIIF = 15, TCF_META_ID_SK_FAMILY = 16, TCF_META_ID_SK_STATE = 17, TCF_META_ID_SK_REUSE = 18, TCF_META_ID_SK_BOUND_IF = 19, TCF_META_ID_SK_REFCNT = 20, TCF_META_ID_SK_SHUTDOWN = 21, TCF_META_ID_SK_PROTO = 22, TCF_META_ID_SK_TYPE = 23, TCF_META_ID_SK_RCVBUF = 24, TCF_META_ID_SK_RMEM_ALLOC = 25, TCF_META_ID_SK_WMEM_ALLOC = 26, TCF_META_ID_SK_OMEM_ALLOC = 27, TCF_META_ID_SK_WMEM_QUEUED = 28, TCF_META_ID_SK_RCV_QLEN = 29, TCF_META_ID_SK_SND_QLEN = 30, TCF_META_ID_SK_ERR_QLEN = 31, TCF_META_ID_SK_FORWARD_ALLOCS = 32, TCF_META_ID_SK_SNDBUF = 33, TCF_META_ID_SK_ALLOCS = 34, __TCF_META_ID_SK_ROUTE_CAPS = 35, TCF_META_ID_SK_HASH = 36, TCF_META_ID_SK_LINGERTIME = 37, TCF_META_ID_SK_ACK_BACKLOG = 38, TCF_META_ID_SK_MAX_ACK_BACKLOG = 39, TCF_META_ID_SK_PRIO = 40, TCF_META_ID_SK_RCVLOWAT = 41, TCF_META_ID_SK_RCVTIMEO = 42, TCF_META_ID_SK_SNDTIMEO = 43, TCF_META_ID_SK_SENDMSG_OFF = 44, TCF_META_ID_SK_WRITE_PENDING = 45, TCF_META_ID_VLAN_TAG = 46, TCF_META_ID_RXHASH = 47, __TCF_META_ID_MAX = 48, }; struct meta_match { struct meta_value lvalue; struct meta_value rvalue; }; struct tcf_meta_hdr { struct tcf_meta_val left; struct tcf_meta_val right; }; struct text_match { u16 from_offset; u16 to_offset; u8 from_layer; u8 to_layer; struct ts_config *config; }; struct tcf_em_text { char algo[16]; __u16 from_offset; __u16 to_offset; __u16 pattern_len; __u8 from_layer: 4; __u8 to_layer: 4; __u8 pad; }; typedef void (*btf_trace_netlink_extack)(void *, const char *); struct listeners; struct netlink_table { struct rhashtable hash; struct hlist_head mc_list; struct listeners __attribute__((btf_type_tag("rcu"))) *listeners; unsigned int flags; unsigned int groups; struct mutex *cb_mutex; struct module *module; int (*bind)(struct net *, int); void (*unbind)(struct net *, int); void (*release)(struct sock *, unsigned long *); int registered; }; struct listeners { struct callback_head rcu; unsigned long masks[0]; }; enum netlink_skb_flags { NETLINK_SKB_DST = 8, }; enum { NETLINK_F_KERNEL_SOCKET = 0, NETLINK_F_RECV_PKTINFO = 1, NETLINK_F_BROADCAST_SEND_ERROR = 2, NETLINK_F_RECV_NO_ENOBUFS = 3, NETLINK_F_LISTEN_ALL_NSID = 4, NETLINK_F_CAP_ACK = 5, NETLINK_F_EXT_ACK = 6, NETLINK_F_STRICT_CHK = 7, }; enum { NETLINK_UNCONNECTED = 0, NETLINK_CONNECTED = 1, }; enum nlmsgerr_attrs { NLMSGERR_ATTR_UNUSED = 0, NLMSGERR_ATTR_MSG = 1, NLMSGERR_ATTR_OFFS = 2, NLMSGERR_ATTR_COOKIE = 3, NLMSGERR_ATTR_POLICY = 4, NLMSGERR_ATTR_MISS_TYPE = 5, NLMSGERR_ATTR_MISS_NEST = 6, __NLMSGERR_ATTR_MAX = 7, NLMSGERR_ATTR_MAX = 6, }; struct trace_event_raw_netlink_extack { struct trace_entry ent; u32 __data_loc_msg; char __data[0]; }; struct netlink_tap { struct net_device *dev; struct module *module; struct list_head list; }; struct netlink_sock { struct sock sk; unsigned long flags; u32 portid; u32 dst_portid; u32 dst_group; u32 subscriptions; u32 ngroups; unsigned long *groups; unsigned long state; size_t max_recvmsg_len; wait_queue_head_t wait; bool bound; bool cb_running; int dump_done_errno; struct netlink_callback cb; struct mutex *cb_mutex; struct mutex cb_def_mutex; void (*netlink_rcv)(struct sk_buff *); int (*netlink_bind)(struct net *, int); void (*netlink_unbind)(struct net *, int); void (*netlink_release)(struct sock *, unsigned long *); struct module *module; struct rhash_head node; struct callback_head rcu; struct work_struct work; }; struct sockaddr_nl { __kernel_sa_family_t nl_family; unsigned short nl_pad; __u32 nl_pid; __u32 nl_groups; }; struct trace_event_data_offsets_netlink_extack { u32 msg; }; struct netlink_tap_net { struct list_head netlink_tap_all; struct mutex netlink_tap_lock; }; struct netlink_broadcast_data { struct sock *exclude_sk; struct net *net; u32 portid; u32 group; int failure; int delivery_failure; int congested; int delivered; gfp_t allocation; struct sk_buff *skb; struct sk_buff *skb2; int (*tx_filter)(struct sock *, struct sk_buff *, void *); void *tx_data; }; struct netlink_set_err_data { struct sock *exclude_sk; u32 portid; u32 group; int code; }; struct netlink_compare_arg { possible_net_t pnet; u32 portid; }; struct nl_pktinfo { __u32 group; }; struct nl_seq_iter { struct seq_net_private p; struct rhashtable_iter hti; int link; }; struct bpf_iter__netlink { union { struct bpf_iter_meta *meta; }; union { struct netlink_sock *sk; }; }; struct nlmsgerr { int error; struct nlmsghdr msg; }; struct netlink_notify { struct net *net; u32 portid; int protocol; }; enum { CTRL_CMD_UNSPEC = 0, CTRL_CMD_NEWFAMILY = 1, CTRL_CMD_DELFAMILY = 2, CTRL_CMD_GETFAMILY = 3, CTRL_CMD_NEWOPS = 4, CTRL_CMD_DELOPS = 5, CTRL_CMD_GETOPS = 6, CTRL_CMD_NEWMCAST_GRP = 7, CTRL_CMD_DELMCAST_GRP = 8, CTRL_CMD_GETMCAST_GRP = 9, CTRL_CMD_GETPOLICY = 10, __CTRL_CMD_MAX = 11, }; enum genl_validate_flags { GENL_DONT_VALIDATE_STRICT = 1, GENL_DONT_VALIDATE_DUMP = 2, GENL_DONT_VALIDATE_DUMP_STRICT = 4, }; enum { CTRL_ATTR_UNSPEC = 0, CTRL_ATTR_FAMILY_ID = 1, CTRL_ATTR_FAMILY_NAME = 2, CTRL_ATTR_VERSION = 3, CTRL_ATTR_HDRSIZE = 4, CTRL_ATTR_MAXATTR = 5, CTRL_ATTR_OPS = 6, CTRL_ATTR_MCAST_GROUPS = 7, CTRL_ATTR_POLICY = 8, CTRL_ATTR_OP_POLICY = 9, CTRL_ATTR_OP = 10, __CTRL_ATTR_MAX = 11, }; enum { CTRL_ATTR_OP_UNSPEC = 0, CTRL_ATTR_OP_ID = 1, CTRL_ATTR_OP_FLAGS = 2, __CTRL_ATTR_OP_MAX = 3, }; enum { CTRL_ATTR_MCAST_GRP_UNSPEC = 0, CTRL_ATTR_MCAST_GRP_NAME = 1, CTRL_ATTR_MCAST_GRP_ID = 2, __CTRL_ATTR_MCAST_GRP_MAX = 3, }; enum { CTRL_ATTR_POLICY_UNSPEC = 0, CTRL_ATTR_POLICY_DO = 1, CTRL_ATTR_POLICY_DUMP = 2, __CTRL_ATTR_POLICY_DUMP_MAX = 3, CTRL_ATTR_POLICY_DUMP_MAX = 2, }; struct genl_op_iter { const struct genl_family *family; struct genl_split_ops doit; struct genl_split_ops dumpit; int cmd_idx; int entry_idx; u32 cmd; u8 flags; }; struct netlink_policy_dump_state; struct ctrl_dump_policy_ctx { struct netlink_policy_dump_state *state; const struct genl_family *rt; struct genl_op_iter *op_iter; u32 op; u16 fam_id; u8 dump_map: 1; u8 single_op: 1; }; struct genl_start_context { const struct genl_family *family; struct nlmsghdr *nlh; struct netlink_ext_ack *extack; const struct genl_split_ops *ops; int hdrlen; }; enum netlink_attribute_type { NL_ATTR_TYPE_INVALID = 0, NL_ATTR_TYPE_FLAG = 1, NL_ATTR_TYPE_U8 = 2, NL_ATTR_TYPE_U16 = 3, NL_ATTR_TYPE_U32 = 4, NL_ATTR_TYPE_U64 = 5, NL_ATTR_TYPE_S8 = 6, NL_ATTR_TYPE_S16 = 7, NL_ATTR_TYPE_S32 = 8, NL_ATTR_TYPE_S64 = 9, NL_ATTR_TYPE_BINARY = 10, NL_ATTR_TYPE_STRING = 11, NL_ATTR_TYPE_NUL_STRING = 12, NL_ATTR_TYPE_NESTED = 13, NL_ATTR_TYPE_NESTED_ARRAY = 14, NL_ATTR_TYPE_BITFIELD32 = 15, }; enum netlink_policy_type_attr { NL_POLICY_TYPE_ATTR_UNSPEC = 0, NL_POLICY_TYPE_ATTR_TYPE = 1, NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2, NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3, NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4, NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5, NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6, NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7, NL_POLICY_TYPE_ATTR_POLICY_IDX = 8, NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9, NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10, NL_POLICY_TYPE_ATTR_PAD = 11, NL_POLICY_TYPE_ATTR_MASK = 12, __NL_POLICY_TYPE_ATTR_MAX = 13, NL_POLICY_TYPE_ATTR_MAX = 12, }; struct netlink_policy_dump_state { unsigned int policy_idx; unsigned int attr_idx; unsigned int n_alloc; struct { const struct nla_policy *policy; unsigned int maxtype; } policies[0]; }; typedef void (*btf_trace_bpf_test_finish)(void *, int *); struct bpf_test_timer { enum { NO_PREEMPT = 0, NO_MIGRATE = 1, } mode; u32 i; u64 time_start; u64 time_spent; }; enum nf_inet_hooks { NF_INET_PRE_ROUTING = 0, NF_INET_LOCAL_IN = 1, NF_INET_FORWARD = 2, NF_INET_LOCAL_OUT = 3, NF_INET_POST_ROUTING = 4, NF_INET_NUMHOOKS = 5, NF_INET_INGRESS = 5, }; struct bpf_fentry_test_t { struct bpf_fentry_test_t *a; }; struct trace_event_raw_bpf_test_finish { struct trace_entry ent; int err; char __data[0]; }; struct xdp_test_data { struct xdp_buff *orig_ctx; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct xdp_rxq_info rxq; struct net_device *dev; struct page_pool *pp; struct xdp_frame **frames; struct sk_buff **skbs; struct xdp_mem_info mem; u32 batch_size; u32 frame_cnt; long: 64; long: 64; }; struct xdp_page_head { struct xdp_buff orig_ctx; struct xdp_buff ctx; union { struct { struct {} __empty_frame; struct xdp_frame frame[0]; }; struct { struct {} __empty_data; u8 data[0]; }; }; }; struct trace_event_data_offsets_bpf_test_finish {}; struct prog_test_member1 { int a; }; struct prog_test_member { struct prog_test_member1 m; int c; }; struct prog_test_ref_kfunc { int a; int b; struct prog_test_member memb; struct prog_test_ref_kfunc *next; refcount_t cnt; }; struct bpf_raw_tp_test_run_info { struct bpf_prog *prog; void *ctx; u32 retval; }; struct bpf_dummy_ops_test_args { u64 args[12]; struct bpf_dummy_ops_state state; }; typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *, ...); enum ethtool_flags { ETH_FLAG_TXVLAN = 128, ETH_FLAG_RXVLAN = 256, ETH_FLAG_LRO = 32768, ETH_FLAG_NTUPLE = 134217728, ETH_FLAG_RXHASH = 268435456, }; enum ethtool_sfeatures_retval_bits { ETHTOOL_F_UNSUPPORTED__BIT = 0, ETHTOOL_F_WISH__BIT = 1, ETHTOOL_F_COMPAT__BIT = 2, }; enum tunable_id { ETHTOOL_ID_UNSPEC = 0, ETHTOOL_RX_COPYBREAK = 1, ETHTOOL_TX_COPYBREAK = 2, ETHTOOL_PFC_PREVENTION_TOUT = 3, ETHTOOL_TX_COPYBREAK_BUF_SIZE = 4, __ETHTOOL_TUNABLE_COUNT = 5, }; enum tunable_type_id { ETHTOOL_TUNABLE_UNSPEC = 0, ETHTOOL_TUNABLE_U8 = 1, ETHTOOL_TUNABLE_U16 = 2, ETHTOOL_TUNABLE_U32 = 3, ETHTOOL_TUNABLE_U64 = 4, ETHTOOL_TUNABLE_STRING = 5, ETHTOOL_TUNABLE_S8 = 6, ETHTOOL_TUNABLE_S16 = 7, ETHTOOL_TUNABLE_S32 = 8, ETHTOOL_TUNABLE_S64 = 9, }; enum phy_tunable_id { ETHTOOL_PHY_ID_UNSPEC = 0, ETHTOOL_PHY_DOWNSHIFT = 1, ETHTOOL_PHY_FAST_LINK_DOWN = 2, ETHTOOL_PHY_EDPD = 3, __ETHTOOL_PHY_TUNABLE_COUNT = 4, }; enum ethtool_fec_config_bits { ETHTOOL_FEC_NONE_BIT = 0, ETHTOOL_FEC_AUTO_BIT = 1, ETHTOOL_FEC_OFF_BIT = 2, ETHTOOL_FEC_RS_BIT = 3, ETHTOOL_FEC_BASER_BIT = 4, ETHTOOL_FEC_LLRS_BIT = 5, }; struct ethtool_rx_flow_key { struct flow_dissector_key_basic basic; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; }; struct flow_dissector_key_ports tp; struct flow_dissector_key_ip ip; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_eth_addrs eth_addrs; }; struct ethtool_rx_flow_match { struct flow_dissector dissector; struct ethtool_rx_flow_key key; struct ethtool_rx_flow_key mask; }; struct ethtool_devlink_compat { struct devlink *devlink; union { struct ethtool_flash efl; struct ethtool_drvinfo info; }; }; struct ethtool_value { __u32 cmd; __u32 data; }; struct ethtool_rx_flow_rule { struct flow_rule *rule; unsigned long priv[0]; }; struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2]; }; struct ethtool_link_usettings { struct ethtool_link_settings base; struct { __u32 supported[4]; __u32 advertising[4]; __u32 lp_advertising[4]; } link_modes; }; struct ethtool_rx_flow_spec_input { const struct ethtool_rx_flow_spec *fs; u32 rss_ctx; }; struct ethtool_gstrings { __u32 cmd; __u32 string_set; __u32 len; __u8 data[0]; }; struct ethtool_perm_addr { __u32 cmd; __u32 size; __u8 data[0]; }; struct ethtool_sset_info { __u32 cmd; __u32 reserved; __u64 sset_mask; __u32 data[0]; }; struct ethtool_rxfh { __u32 cmd; __u32 rss_context; __u32 indir_size; __u32 key_size; __u8 hfunc; __u8 rsvd8[3]; __u32 rsvd32; __u32 rss_config[0]; }; struct ethtool_get_features_block { __u32 available; __u32 requested; __u32 active; __u32 never_changed; }; struct ethtool_gfeatures { __u32 cmd; __u32 size; struct ethtool_get_features_block features[0]; }; struct ethtool_set_features_block { __u32 valid; __u32 requested; }; struct ethtool_sfeatures { __u32 cmd; __u32 size; struct ethtool_set_features_block features[0]; }; struct ethtool_per_queue_op { __u32 cmd; __u32 sub_command; __u32 queue_mask[128]; char data[0]; }; struct link_mode_info { int speed; u8 lanes; u8 duplex; }; typedef void (*ethnl_notify_handler_t)(struct net_device *, unsigned int, const void *); struct ethnl_req_info; struct ethnl_reply_data; struct ethnl_request_ops { u8 request_cmd; u8 reply_cmd; u16 hdr_attr; unsigned int req_info_size; unsigned int reply_data_size; bool allow_nodev_do; u8 set_ntf_cmd; int (*parse_request)(struct ethnl_req_info *, struct nlattr **, struct netlink_ext_ack *); int (*prepare_data)(const struct ethnl_req_info *, struct ethnl_reply_data *, const struct genl_info *); int (*reply_size)(const struct ethnl_req_info *, const struct ethnl_reply_data *); int (*fill_reply)(struct sk_buff *, const struct ethnl_req_info *, const struct ethnl_reply_data *); void (*cleanup_data)(struct ethnl_reply_data *); int (*set_validate)(struct ethnl_req_info *, struct genl_info *); int (*set)(struct ethnl_req_info *, struct genl_info *); }; struct ethnl_req_info { struct net_device *dev; netdevice_tracker dev_tracker; u32 flags; }; struct ethnl_reply_data { struct net_device *dev; }; enum { ETHTOOL_A_HEADER_UNSPEC = 0, ETHTOOL_A_HEADER_DEV_INDEX = 1, ETHTOOL_A_HEADER_DEV_NAME = 2, ETHTOOL_A_HEADER_FLAGS = 3, __ETHTOOL_A_HEADER_CNT = 4, ETHTOOL_A_HEADER_MAX = 3, }; enum ethtool_multicast_groups { ETHNL_MCGRP_MONITOR = 0, }; struct ethnl_dump_ctx { const struct ethnl_request_ops *ops; struct ethnl_req_info *req_info; struct ethnl_reply_data *reply_data; unsigned long pos_ifindex; }; enum { ETHTOOL_A_BITSET_UNSPEC = 0, ETHTOOL_A_BITSET_NOMASK = 1, ETHTOOL_A_BITSET_SIZE = 2, ETHTOOL_A_BITSET_BITS = 3, ETHTOOL_A_BITSET_VALUE = 4, ETHTOOL_A_BITSET_MASK = 5, __ETHTOOL_A_BITSET_CNT = 6, ETHTOOL_A_BITSET_MAX = 5, }; enum { ETHTOOL_A_BITSET_BITS_UNSPEC = 0, ETHTOOL_A_BITSET_BITS_BIT = 1, __ETHTOOL_A_BITSET_BITS_CNT = 2, ETHTOOL_A_BITSET_BITS_MAX = 1, }; enum { ETHTOOL_A_BITSET_BIT_UNSPEC = 0, ETHTOOL_A_BITSET_BIT_INDEX = 1, ETHTOOL_A_BITSET_BIT_NAME = 2, ETHTOOL_A_BITSET_BIT_VALUE = 3, __ETHTOOL_A_BITSET_BIT_CNT = 4, ETHTOOL_A_BITSET_BIT_MAX = 3, }; typedef const char (* const ethnl_string_array_t)[32]; struct strset_info { bool per_dev; bool free_strings; unsigned int count; const char (*strings)[32]; }; enum { ETHTOOL_A_STRSET_UNSPEC = 0, ETHTOOL_A_STRSET_HEADER = 1, ETHTOOL_A_STRSET_STRINGSETS = 2, ETHTOOL_A_STRSET_COUNTS_ONLY = 3, __ETHTOOL_A_STRSET_CNT = 4, ETHTOOL_A_STRSET_MAX = 3, }; enum { ETHTOOL_A_STRINGSETS_UNSPEC = 0, ETHTOOL_A_STRINGSETS_STRINGSET = 1, __ETHTOOL_A_STRINGSETS_CNT = 2, ETHTOOL_A_STRINGSETS_MAX = 1, }; enum { ETHTOOL_A_STRINGSET_UNSPEC = 0, ETHTOOL_A_STRINGSET_ID = 1, ETHTOOL_A_STRINGSET_COUNT = 2, ETHTOOL_A_STRINGSET_STRINGS = 3, __ETHTOOL_A_STRINGSET_CNT = 4, ETHTOOL_A_STRINGSET_MAX = 3, }; enum { ETHTOOL_A_STRINGS_UNSPEC = 0, ETHTOOL_A_STRINGS_STRING = 1, __ETHTOOL_A_STRINGS_CNT = 2, ETHTOOL_A_STRINGS_MAX = 1, }; enum { ETHTOOL_A_STRING_UNSPEC = 0, ETHTOOL_A_STRING_INDEX = 1, ETHTOOL_A_STRING_VALUE = 2, __ETHTOOL_A_STRING_CNT = 3, ETHTOOL_A_STRING_MAX = 2, }; struct strset_req_info { struct ethnl_req_info base; u32 req_ids; bool counts_only; }; struct strset_reply_data { struct ethnl_reply_data base; struct strset_info sets[21]; }; enum { ETHTOOL_A_LINKINFO_UNSPEC = 0, ETHTOOL_A_LINKINFO_HEADER = 1, ETHTOOL_A_LINKINFO_PORT = 2, ETHTOOL_A_LINKINFO_PHYADDR = 3, ETHTOOL_A_LINKINFO_TP_MDIX = 4, ETHTOOL_A_LINKINFO_TP_MDIX_CTRL = 5, ETHTOOL_A_LINKINFO_TRANSCEIVER = 6, __ETHTOOL_A_LINKINFO_CNT = 7, ETHTOOL_A_LINKINFO_MAX = 6, }; struct linkinfo_reply_data { struct ethnl_reply_data base; struct ethtool_link_ksettings ksettings; struct ethtool_link_settings *lsettings; }; enum { ETHTOOL_A_LINKMODES_UNSPEC = 0, ETHTOOL_A_LINKMODES_HEADER = 1, ETHTOOL_A_LINKMODES_AUTONEG = 2, ETHTOOL_A_LINKMODES_OURS = 3, ETHTOOL_A_LINKMODES_PEER = 4, ETHTOOL_A_LINKMODES_SPEED = 5, ETHTOOL_A_LINKMODES_DUPLEX = 6, ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 7, ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 8, ETHTOOL_A_LINKMODES_LANES = 9, ETHTOOL_A_LINKMODES_RATE_MATCHING = 10, __ETHTOOL_A_LINKMODES_CNT = 11, ETHTOOL_A_LINKMODES_MAX = 10, }; struct linkmodes_reply_data { struct ethnl_reply_data base; struct ethtool_link_ksettings ksettings; struct ethtool_link_settings *lsettings; bool peer_empty; }; enum { ETHTOOL_A_RSS_UNSPEC = 0, ETHTOOL_A_RSS_HEADER = 1, ETHTOOL_A_RSS_CONTEXT = 2, ETHTOOL_A_RSS_HFUNC = 3, ETHTOOL_A_RSS_INDIR = 4, ETHTOOL_A_RSS_HKEY = 5, __ETHTOOL_A_RSS_CNT = 6, ETHTOOL_A_RSS_MAX = 5, }; struct rss_req_info { struct ethnl_req_info base; u32 rss_context; }; struct rss_reply_data { struct ethnl_reply_data base; u32 indir_size; u32 hkey_size; u32 hfunc; u32 *indir_table; u8 *hkey; }; enum { ETHTOOL_A_LINKSTATE_UNSPEC = 0, ETHTOOL_A_LINKSTATE_HEADER = 1, ETHTOOL_A_LINKSTATE_LINK = 2, ETHTOOL_A_LINKSTATE_SQI = 3, ETHTOOL_A_LINKSTATE_SQI_MAX = 4, ETHTOOL_A_LINKSTATE_EXT_STATE = 5, ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 6, ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 7, __ETHTOOL_A_LINKSTATE_CNT = 8, ETHTOOL_A_LINKSTATE_MAX = 7, }; struct linkstate_reply_data { struct ethnl_reply_data base; int link; int sqi; int sqi_max; struct ethtool_link_ext_stats link_stats; bool link_ext_state_provided; struct ethtool_link_ext_state_info ethtool_link_ext_state_info; }; enum { ETHTOOL_A_DEBUG_UNSPEC = 0, ETHTOOL_A_DEBUG_HEADER = 1, ETHTOOL_A_DEBUG_MSGMASK = 2, __ETHTOOL_A_DEBUG_CNT = 3, ETHTOOL_A_DEBUG_MAX = 2, }; struct debug_reply_data { struct ethnl_reply_data base; u32 msg_mask; }; enum { ETHTOOL_A_WOL_UNSPEC = 0, ETHTOOL_A_WOL_HEADER = 1, ETHTOOL_A_WOL_MODES = 2, ETHTOOL_A_WOL_SOPASS = 3, __ETHTOOL_A_WOL_CNT = 4, ETHTOOL_A_WOL_MAX = 3, }; struct wol_reply_data { struct ethnl_reply_data base; struct ethtool_wolinfo wol; bool show_sopass; }; enum { ETHTOOL_A_FEATURES_UNSPEC = 0, ETHTOOL_A_FEATURES_HEADER = 1, ETHTOOL_A_FEATURES_HW = 2, ETHTOOL_A_FEATURES_WANTED = 3, ETHTOOL_A_FEATURES_ACTIVE = 4, ETHTOOL_A_FEATURES_NOCHANGE = 5, __ETHTOOL_A_FEATURES_CNT = 6, ETHTOOL_A_FEATURES_MAX = 5, }; struct features_reply_data { struct ethnl_reply_data base; u32 hw[2]; u32 wanted[2]; u32 active[2]; u32 nochange[2]; u32 all[2]; }; enum { ETHTOOL_A_PRIVFLAGS_UNSPEC = 0, ETHTOOL_A_PRIVFLAGS_HEADER = 1, ETHTOOL_A_PRIVFLAGS_FLAGS = 2, __ETHTOOL_A_PRIVFLAGS_CNT = 3, ETHTOOL_A_PRIVFLAGS_MAX = 2, }; struct privflags_reply_data { struct ethnl_reply_data base; const char (*priv_flag_names)[32]; unsigned int n_priv_flags; u32 priv_flags; }; enum { ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, ETHTOOL_TCP_DATA_SPLIT_DISABLED = 1, ETHTOOL_TCP_DATA_SPLIT_ENABLED = 2, }; enum { ETHTOOL_A_RINGS_UNSPEC = 0, ETHTOOL_A_RINGS_HEADER = 1, ETHTOOL_A_RINGS_RX_MAX = 2, ETHTOOL_A_RINGS_RX_MINI_MAX = 3, ETHTOOL_A_RINGS_RX_JUMBO_MAX = 4, ETHTOOL_A_RINGS_TX_MAX = 5, ETHTOOL_A_RINGS_RX = 6, ETHTOOL_A_RINGS_RX_MINI = 7, ETHTOOL_A_RINGS_RX_JUMBO = 8, ETHTOOL_A_RINGS_TX = 9, ETHTOOL_A_RINGS_RX_BUF_LEN = 10, ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 11, ETHTOOL_A_RINGS_CQE_SIZE = 12, ETHTOOL_A_RINGS_TX_PUSH = 13, ETHTOOL_A_RINGS_RX_PUSH = 14, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 15, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 16, __ETHTOOL_A_RINGS_CNT = 17, ETHTOOL_A_RINGS_MAX = 16, }; enum ethtool_supported_ring_param { ETHTOOL_RING_USE_RX_BUF_LEN = 1, ETHTOOL_RING_USE_CQE_SIZE = 2, ETHTOOL_RING_USE_TX_PUSH = 4, ETHTOOL_RING_USE_RX_PUSH = 8, ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = 16, }; struct rings_reply_data { struct ethnl_reply_data base; struct ethtool_ringparam ringparam; struct kernel_ethtool_ringparam kernel_ringparam; u32 supported_ring_params; }; enum { ETHTOOL_A_CHANNELS_UNSPEC = 0, ETHTOOL_A_CHANNELS_HEADER = 1, ETHTOOL_A_CHANNELS_RX_MAX = 2, ETHTOOL_A_CHANNELS_TX_MAX = 3, ETHTOOL_A_CHANNELS_OTHER_MAX = 4, ETHTOOL_A_CHANNELS_COMBINED_MAX = 5, ETHTOOL_A_CHANNELS_RX_COUNT = 6, ETHTOOL_A_CHANNELS_TX_COUNT = 7, ETHTOOL_A_CHANNELS_OTHER_COUNT = 8, ETHTOOL_A_CHANNELS_COMBINED_COUNT = 9, __ETHTOOL_A_CHANNELS_CNT = 10, ETHTOOL_A_CHANNELS_MAX = 9, }; struct channels_reply_data { struct ethnl_reply_data base; struct ethtool_channels channels; }; enum { ETHTOOL_A_COALESCE_UNSPEC = 0, ETHTOOL_A_COALESCE_HEADER = 1, ETHTOOL_A_COALESCE_RX_USECS = 2, ETHTOOL_A_COALESCE_RX_MAX_FRAMES = 3, ETHTOOL_A_COALESCE_RX_USECS_IRQ = 4, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ = 5, ETHTOOL_A_COALESCE_TX_USECS = 6, ETHTOOL_A_COALESCE_TX_MAX_FRAMES = 7, ETHTOOL_A_COALESCE_TX_USECS_IRQ = 8, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ = 9, ETHTOOL_A_COALESCE_STATS_BLOCK_USECS = 10, ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX = 11, ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX = 12, ETHTOOL_A_COALESCE_PKT_RATE_LOW = 13, ETHTOOL_A_COALESCE_RX_USECS_LOW = 14, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW = 15, ETHTOOL_A_COALESCE_TX_USECS_LOW = 16, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW = 17, ETHTOOL_A_COALESCE_PKT_RATE_HIGH = 18, ETHTOOL_A_COALESCE_RX_USECS_HIGH = 19, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH = 20, ETHTOOL_A_COALESCE_TX_USECS_HIGH = 21, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 22, ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 23, ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 24, ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 25, ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES = 26, ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES = 27, ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS = 28, __ETHTOOL_A_COALESCE_CNT = 29, ETHTOOL_A_COALESCE_MAX = 28, }; struct coalesce_reply_data { struct ethnl_reply_data base; struct ethtool_coalesce coalesce; struct kernel_ethtool_coalesce kernel_coalesce; u32 supported_params; }; enum { ETHTOOL_A_PAUSE_UNSPEC = 0, ETHTOOL_A_PAUSE_HEADER = 1, ETHTOOL_A_PAUSE_AUTONEG = 2, ETHTOOL_A_PAUSE_RX = 3, ETHTOOL_A_PAUSE_TX = 4, ETHTOOL_A_PAUSE_STATS = 5, ETHTOOL_A_PAUSE_STATS_SRC = 6, __ETHTOOL_A_PAUSE_CNT = 7, ETHTOOL_A_PAUSE_MAX = 6, }; enum { ETHTOOL_A_PAUSE_STAT_UNSPEC = 0, ETHTOOL_A_PAUSE_STAT_PAD = 1, ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 2, ETHTOOL_A_PAUSE_STAT_RX_FRAMES = 3, __ETHTOOL_A_PAUSE_STAT_CNT = 4, ETHTOOL_A_PAUSE_STAT_MAX = 3, }; struct pause_req_info { struct ethnl_req_info base; enum ethtool_mac_stats_src src; }; struct pause_reply_data { struct ethnl_reply_data base; struct ethtool_pauseparam pauseparam; struct ethtool_pause_stats pausestat; }; enum { ETHTOOL_A_EEE_UNSPEC = 0, ETHTOOL_A_EEE_HEADER = 1, ETHTOOL_A_EEE_MODES_OURS = 2, ETHTOOL_A_EEE_MODES_PEER = 3, ETHTOOL_A_EEE_ACTIVE = 4, ETHTOOL_A_EEE_ENABLED = 5, ETHTOOL_A_EEE_TX_LPI_ENABLED = 6, ETHTOOL_A_EEE_TX_LPI_TIMER = 7, __ETHTOOL_A_EEE_CNT = 8, ETHTOOL_A_EEE_MAX = 7, }; struct eee_reply_data { struct ethnl_reply_data base; struct ethtool_eee eee; }; enum { ETHTOOL_A_TSINFO_UNSPEC = 0, ETHTOOL_A_TSINFO_HEADER = 1, ETHTOOL_A_TSINFO_TIMESTAMPING = 2, ETHTOOL_A_TSINFO_TX_TYPES = 3, ETHTOOL_A_TSINFO_RX_FILTERS = 4, ETHTOOL_A_TSINFO_PHC_INDEX = 5, __ETHTOOL_A_TSINFO_CNT = 6, ETHTOOL_A_TSINFO_MAX = 5, }; struct tsinfo_reply_data { struct ethnl_reply_data base; struct ethtool_ts_info ts_info; }; enum { ETHTOOL_A_CABLE_TEST_UNSPEC = 0, ETHTOOL_A_CABLE_TEST_HEADER = 1, __ETHTOOL_A_CABLE_TEST_CNT = 2, ETHTOOL_A_CABLE_TEST_MAX = 1, }; enum { ETHTOOL_A_CABLE_TEST_NTF_UNSPEC = 0, ETHTOOL_A_CABLE_TEST_NTF_HEADER = 1, ETHTOOL_A_CABLE_TEST_NTF_STATUS = 2, ETHTOOL_A_CABLE_TEST_NTF_NEST = 3, __ETHTOOL_A_CABLE_TEST_NTF_CNT = 4, ETHTOOL_A_CABLE_TEST_NTF_MAX = 3, }; enum { ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0, ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 1, ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 2, }; enum { ETHTOOL_A_CABLE_NEST_UNSPEC = 0, ETHTOOL_A_CABLE_NEST_RESULT = 1, ETHTOOL_A_CABLE_NEST_FAULT_LENGTH = 2, __ETHTOOL_A_CABLE_NEST_CNT = 3, ETHTOOL_A_CABLE_NEST_MAX = 2, }; enum { ETHTOOL_A_CABLE_RESULT_UNSPEC = 0, ETHTOOL_A_CABLE_RESULT_PAIR = 1, ETHTOOL_A_CABLE_RESULT_CODE = 2, __ETHTOOL_A_CABLE_RESULT_CNT = 3, ETHTOOL_A_CABLE_RESULT_MAX = 2, }; enum { ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0, ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 1, ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 2, __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT = 3, ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 2, }; enum { ETHTOOL_A_CABLE_TEST_TDR_UNSPEC = 0, ETHTOOL_A_CABLE_TEST_TDR_HEADER = 1, ETHTOOL_A_CABLE_TEST_TDR_CFG = 2, __ETHTOOL_A_CABLE_TEST_TDR_CNT = 3, ETHTOOL_A_CABLE_TEST_TDR_MAX = 2, }; enum { ETHTOOL_A_CABLE_TDR_NEST_UNSPEC = 0, ETHTOOL_A_CABLE_TDR_NEST_STEP = 1, ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE = 2, ETHTOOL_A_CABLE_TDR_NEST_PULSE = 3, __ETHTOOL_A_CABLE_TDR_NEST_CNT = 4, ETHTOOL_A_CABLE_TDR_NEST_MAX = 3, }; enum { ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC = 0, ETHTOOL_A_CABLE_AMPLITUDE_PAIR = 1, ETHTOOL_A_CABLE_AMPLITUDE_mV = 2, __ETHTOOL_A_CABLE_AMPLITUDE_CNT = 3, ETHTOOL_A_CABLE_AMPLITUDE_MAX = 2, }; enum { ETHTOOL_A_CABLE_PULSE_UNSPEC = 0, ETHTOOL_A_CABLE_PULSE_mV = 1, __ETHTOOL_A_CABLE_PULSE_CNT = 2, ETHTOOL_A_CABLE_PULSE_MAX = 1, }; enum { ETHTOOL_A_CABLE_STEP_UNSPEC = 0, ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE = 1, ETHTOOL_A_CABLE_STEP_LAST_DISTANCE = 2, ETHTOOL_A_CABLE_STEP_STEP_DISTANCE = 3, __ETHTOOL_A_CABLE_STEP_CNT = 4, ETHTOOL_A_CABLE_STEP_MAX = 3, }; enum { ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC = 0, ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST = 1, ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST = 2, ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP = 3, ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR = 4, __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT = 5, ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = 4, }; enum { ETHTOOL_A_CABLE_PAIR_A = 0, ETHTOOL_A_CABLE_PAIR_B = 1, ETHTOOL_A_CABLE_PAIR_C = 2, ETHTOOL_A_CABLE_PAIR_D = 3, }; enum { ETHTOOL_A_TUNNEL_INFO_UNSPEC = 0, ETHTOOL_A_TUNNEL_INFO_HEADER = 1, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 2, __ETHTOOL_A_TUNNEL_INFO_CNT = 3, ETHTOOL_A_TUNNEL_INFO_MAX = 2, }; enum udp_tunnel_nic_info_flags { UDP_TUNNEL_NIC_INFO_MAY_SLEEP = 1, UDP_TUNNEL_NIC_INFO_OPEN_ONLY = 2, UDP_TUNNEL_NIC_INFO_IPV4_ONLY = 4, UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = 8, }; enum { ETHTOOL_UDP_TUNNEL_TYPE_VXLAN = 0, ETHTOOL_UDP_TUNNEL_TYPE_GENEVE = 1, ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE = 2, __ETHTOOL_UDP_TUNNEL_TYPE_CNT = 3, }; enum { ETHTOOL_A_TUNNEL_UDP_UNSPEC = 0, ETHTOOL_A_TUNNEL_UDP_TABLE = 1, __ETHTOOL_A_TUNNEL_UDP_CNT = 2, ETHTOOL_A_TUNNEL_UDP_MAX = 1, }; enum { ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC = 0, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE = 1, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES = 2, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY = 3, __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT = 4, ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = 3, }; enum { ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC = 0, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT = 1, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE = 2, __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT = 3, ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = 2, }; struct ethnl_tunnel_info_dump_ctx { struct ethnl_req_info req_info; unsigned long ifindex; }; enum { ETHTOOL_A_FEC_UNSPEC = 0, ETHTOOL_A_FEC_HEADER = 1, ETHTOOL_A_FEC_MODES = 2, ETHTOOL_A_FEC_AUTO = 3, ETHTOOL_A_FEC_ACTIVE = 4, ETHTOOL_A_FEC_STATS = 5, __ETHTOOL_A_FEC_CNT = 6, ETHTOOL_A_FEC_MAX = 5, }; enum { ETHTOOL_A_FEC_STAT_UNSPEC = 0, ETHTOOL_A_FEC_STAT_PAD = 1, ETHTOOL_A_FEC_STAT_CORRECTED = 2, ETHTOOL_A_FEC_STAT_UNCORR = 3, ETHTOOL_A_FEC_STAT_CORR_BITS = 4, __ETHTOOL_A_FEC_STAT_CNT = 5, ETHTOOL_A_FEC_STAT_MAX = 4, }; struct fec_stat_grp { u64 stats[9]; u8 cnt; }; struct fec_reply_data { struct ethnl_reply_data base; unsigned long fec_link_modes[2]; u32 active_fec; u8 fec_auto; struct fec_stat_grp corr; struct fec_stat_grp uncorr; struct fec_stat_grp corr_bits; }; enum { ETHTOOL_A_MODULE_EEPROM_UNSPEC = 0, ETHTOOL_A_MODULE_EEPROM_HEADER = 1, ETHTOOL_A_MODULE_EEPROM_OFFSET = 2, ETHTOOL_A_MODULE_EEPROM_LENGTH = 3, ETHTOOL_A_MODULE_EEPROM_PAGE = 4, ETHTOOL_A_MODULE_EEPROM_BANK = 5, ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS = 6, ETHTOOL_A_MODULE_EEPROM_DATA = 7, __ETHTOOL_A_MODULE_EEPROM_CNT = 8, ETHTOOL_A_MODULE_EEPROM_MAX = 7, }; struct eeprom_req_info { struct ethnl_req_info base; u32 offset; u32 length; u8 page; u8 bank; u8 i2c_address; }; struct eeprom_reply_data { struct ethnl_reply_data base; u32 length; u8 *data; }; enum { ETHTOOL_STATS_ETH_PHY = 0, ETHTOOL_STATS_ETH_MAC = 1, ETHTOOL_STATS_ETH_CTRL = 2, ETHTOOL_STATS_RMON = 3, __ETHTOOL_STATS_CNT = 4, }; enum { ETHTOOL_A_STATS_UNSPEC = 0, ETHTOOL_A_STATS_PAD = 1, ETHTOOL_A_STATS_HEADER = 2, ETHTOOL_A_STATS_GROUPS = 3, ETHTOOL_A_STATS_GRP = 4, ETHTOOL_A_STATS_SRC = 5, __ETHTOOL_A_STATS_CNT = 6, ETHTOOL_A_STATS_MAX = 5, }; enum { ETHTOOL_A_STATS_GRP_UNSPEC = 0, ETHTOOL_A_STATS_GRP_PAD = 1, ETHTOOL_A_STATS_GRP_ID = 2, ETHTOOL_A_STATS_GRP_SS_ID = 3, ETHTOOL_A_STATS_GRP_STAT = 4, ETHTOOL_A_STATS_GRP_HIST_RX = 5, ETHTOOL_A_STATS_GRP_HIST_TX = 6, ETHTOOL_A_STATS_GRP_HIST_BKT_LOW = 7, ETHTOOL_A_STATS_GRP_HIST_BKT_HI = 8, ETHTOOL_A_STATS_GRP_HIST_VAL = 9, __ETHTOOL_A_STATS_GRP_CNT = 10, ETHTOOL_A_STATS_GRP_MAX = 9, }; enum { ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR = 0, __ETHTOOL_A_STATS_ETH_PHY_CNT = 1, ETHTOOL_A_STATS_ETH_PHY_MAX = 0, }; enum { ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT = 0, ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL = 1, ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL = 2, ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT = 3, ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR = 4, ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR = 5, ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES = 6, ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER = 7, ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL = 8, ETHTOOL_A_STATS_ETH_MAC_11_XS_COL = 9, ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR = 10, ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR = 11, ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES = 12, ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR = 13, ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST = 14, ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST = 15, ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER = 16, ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST = 17, ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST = 18, ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR = 19, ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN = 20, ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR = 21, __ETHTOOL_A_STATS_ETH_MAC_CNT = 22, ETHTOOL_A_STATS_ETH_MAC_MAX = 21, }; enum { ETHTOOL_A_STATS_ETH_CTRL_3_TX = 0, ETHTOOL_A_STATS_ETH_CTRL_4_RX = 1, ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP = 2, __ETHTOOL_A_STATS_ETH_CTRL_CNT = 3, ETHTOOL_A_STATS_ETH_CTRL_MAX = 2, }; enum { ETHTOOL_A_STATS_RMON_UNDERSIZE = 0, ETHTOOL_A_STATS_RMON_OVERSIZE = 1, ETHTOOL_A_STATS_RMON_FRAG = 2, ETHTOOL_A_STATS_RMON_JABBER = 3, __ETHTOOL_A_STATS_RMON_CNT = 4, ETHTOOL_A_STATS_RMON_MAX = 3, }; struct stats_req_info { struct ethnl_req_info base; unsigned long stat_mask[1]; enum ethtool_mac_stats_src src; }; struct stats_reply_data { struct ethnl_reply_data base; union { struct { struct ethtool_eth_phy_stats phy_stats; struct ethtool_eth_mac_stats mac_stats; struct ethtool_eth_ctrl_stats ctrl_stats; struct ethtool_rmon_stats rmon_stats; }; struct { struct ethtool_eth_phy_stats phy_stats; struct ethtool_eth_mac_stats mac_stats; struct ethtool_eth_ctrl_stats ctrl_stats; struct ethtool_rmon_stats rmon_stats; } stats; }; const struct ethtool_rmon_hist_range *rmon_ranges; }; enum { ETHTOOL_A_PHC_VCLOCKS_UNSPEC = 0, ETHTOOL_A_PHC_VCLOCKS_HEADER = 1, ETHTOOL_A_PHC_VCLOCKS_NUM = 2, ETHTOOL_A_PHC_VCLOCKS_INDEX = 3, __ETHTOOL_A_PHC_VCLOCKS_CNT = 4, ETHTOOL_A_PHC_VCLOCKS_MAX = 3, }; struct phc_vclocks_reply_data { struct ethnl_reply_data base; int num; int *index; }; enum { ETHTOOL_A_MM_STAT_UNSPEC = 0, ETHTOOL_A_MM_STAT_PAD = 1, ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS = 2, ETHTOOL_A_MM_STAT_SMD_ERRORS = 3, ETHTOOL_A_MM_STAT_REASSEMBLY_OK = 4, ETHTOOL_A_MM_STAT_RX_FRAG_COUNT = 5, ETHTOOL_A_MM_STAT_TX_FRAG_COUNT = 6, ETHTOOL_A_MM_STAT_HOLD_COUNT = 7, __ETHTOOL_A_MM_STAT_CNT = 8, ETHTOOL_A_MM_STAT_MAX = 7, }; enum { ETHTOOL_A_MM_UNSPEC = 0, ETHTOOL_A_MM_HEADER = 1, ETHTOOL_A_MM_PMAC_ENABLED = 2, ETHTOOL_A_MM_TX_ENABLED = 3, ETHTOOL_A_MM_TX_ACTIVE = 4, ETHTOOL_A_MM_TX_MIN_FRAG_SIZE = 5, ETHTOOL_A_MM_RX_MIN_FRAG_SIZE = 6, ETHTOOL_A_MM_VERIFY_ENABLED = 7, ETHTOOL_A_MM_VERIFY_STATUS = 8, ETHTOOL_A_MM_VERIFY_TIME = 9, ETHTOOL_A_MM_MAX_VERIFY_TIME = 10, ETHTOOL_A_MM_STATS = 11, __ETHTOOL_A_MM_CNT = 12, ETHTOOL_A_MM_MAX = 11, }; struct mm_reply_data { struct ethnl_reply_data base; struct ethtool_mm_state state; struct ethtool_mm_stats stats; }; enum { ETHTOOL_A_MODULE_UNSPEC = 0, ETHTOOL_A_MODULE_HEADER = 1, ETHTOOL_A_MODULE_POWER_MODE_POLICY = 2, ETHTOOL_A_MODULE_POWER_MODE = 3, __ETHTOOL_A_MODULE_CNT = 4, ETHTOOL_A_MODULE_MAX = 3, }; struct module_reply_data { struct ethnl_reply_data base; struct ethtool_module_power_mode_params power; }; enum ethtool_podl_pse_admin_state { ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1, ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED = 2, ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED = 3, }; enum ethtool_podl_pse_pw_d_status { ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1, ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED = 2, ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING = 3, ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING = 4, ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP = 5, ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE = 6, ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR = 7, }; enum { ETHTOOL_A_PSE_UNSPEC = 0, ETHTOOL_A_PSE_HEADER = 1, ETHTOOL_A_PODL_PSE_ADMIN_STATE = 2, ETHTOOL_A_PODL_PSE_ADMIN_CONTROL = 3, ETHTOOL_A_PODL_PSE_PW_D_STATUS = 4, __ETHTOOL_A_PSE_CNT = 5, ETHTOOL_A_PSE_MAX = 4, }; struct pse_control_status { enum ethtool_podl_pse_admin_state podl_admin_state; enum ethtool_podl_pse_pw_d_status podl_pw_status; }; struct pse_reply_data { struct ethnl_reply_data base; struct pse_control_status status; }; struct pse_control_config { enum ethtool_podl_pse_admin_state admin_cotrol; }; enum { ETHTOOL_A_PLCA_UNSPEC = 0, ETHTOOL_A_PLCA_HEADER = 1, ETHTOOL_A_PLCA_VERSION = 2, ETHTOOL_A_PLCA_ENABLED = 3, ETHTOOL_A_PLCA_STATUS = 4, ETHTOOL_A_PLCA_NODE_CNT = 5, ETHTOOL_A_PLCA_NODE_ID = 6, ETHTOOL_A_PLCA_TO_TMR = 7, ETHTOOL_A_PLCA_BURST_CNT = 8, ETHTOOL_A_PLCA_BURST_TMR = 9, __ETHTOOL_A_PLCA_CNT = 10, ETHTOOL_A_PLCA_MAX = 9, }; struct plca_reply_data { struct ethnl_reply_data base; struct phy_plca_cfg plca_cfg; struct phy_plca_status plca_st; }; struct nf_conntrack_zone { u16 id; u8 flags; u8 dir; }; struct nf_queue_entry; struct nf_ipv6_ops { void (*route_input)(struct sk_buff *); int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *, const struct nf_queue_entry *); u64 android_kabi_reserved1; }; struct nf_queue_entry { struct list_head list; struct sk_buff *skb; unsigned int id; unsigned int hook_index; struct nf_hook_state state; u16 size; }; struct nfnl_ct_hook { size_t (*build_size)(const struct nf_conn *); int (*build)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, u_int16_t, u_int16_t); int (*parse)(const struct nlattr *, struct nf_conn *); int (*attach_expect)(const struct nlattr *, struct nf_conn *, u32, u32); void (*seq_adjust)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, s32); u64 android_kabi_reserved1; }; struct nf_ct_hook { int (*update)(struct net *, struct sk_buff *); void (*destroy)(struct nf_conntrack *); bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); void (*attach)(struct sk_buff *, const struct sk_buff *); void (*set_closing)(struct nf_conntrack *); int (*confirm)(struct sk_buff *); }; struct nf_defrag_hook { struct module *owner; int (*enable)(struct net *); void (*disable)(struct net *); }; enum nf_nat_manip_type { NF_NAT_MANIP_SRC = 0, NF_NAT_MANIP_DST = 1, }; struct nf_nat_hook { int (*parse_nat_setup)(struct nf_conn *, enum nf_nat_manip_type, const struct nlattr *); void (*decode_session)(struct sk_buff *, struct flowi *); unsigned int (*manip_pkt)(struct sk_buff *, struct nf_conn *, enum nf_nat_manip_type, enum ip_conntrack_dir); void (*remove_nat_bysrc)(struct nf_conn *); u64 android_kabi_reserved1; }; struct nf_hook_entries_rcu_head { struct callback_head head; void *allocation; }; struct nf_loginfo { u_int8_t type; union { struct { u_int32_t copy_len; u_int16_t group; u_int16_t qthreshold; u_int16_t flags; } ulog; struct { u_int8_t level; u_int8_t logflags; } log; } u; }; struct nf_log_buf { unsigned int count; char buf[1020]; }; struct nf_queue_handler { int (*outfn)(struct nf_queue_entry *, unsigned int); void (*nf_hook_drop)(struct net *); }; struct ip_rt_info { __be32 daddr; __be32 saddr; u_int8_t tos; u_int32_t mark; }; struct ip6_rt_info { struct in6_addr daddr; struct in6_addr saddr; u_int32_t mark; }; struct nf_sockopt_ops { struct list_head list; u_int8_t pf; int set_optmin; int set_optmax; int (*set)(struct sock *, int, sockptr_t, unsigned int); int get_optmin; int get_optmax; int (*get)(struct sock *, int, void __attribute__((btf_type_tag("user"))) *, int *); struct module *owner; u64 android_kabi_reserved1; }; enum nf_ip_hook_priorities { NF_IP_PRI_FIRST = -2147483648, NF_IP_PRI_RAW_BEFORE_DEFRAG = -450, NF_IP_PRI_CONNTRACK_DEFRAG = -400, NF_IP_PRI_RAW = -300, NF_IP_PRI_SELINUX_FIRST = -225, NF_IP_PRI_CONNTRACK = -200, NF_IP_PRI_MANGLE = -150, NF_IP_PRI_NAT_DST = -100, NF_IP_PRI_FILTER = 0, NF_IP_PRI_SECURITY = 50, NF_IP_PRI_NAT_SRC = 100, NF_IP_PRI_SELINUX_LAST = 225, NF_IP_PRI_CONNTRACK_HELPER = 300, NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647, NF_IP_PRI_LAST = 2147483647, }; struct bpf_nf_link { struct bpf_link link; struct nf_hook_ops hook_ops; struct net *net; u32 dead; const struct nf_defrag_hook *defrag_hook; }; enum nfnl_abort_action { NFNL_ABORT_NONE = 0, NFNL_ABORT_AUTOLOAD = 1, NFNL_ABORT_VALIDATE = 2, }; struct nfnl_callback; struct nfnetlink_subsystem { const char *name; __u8 subsys_id; __u8 cb_count; const struct nfnl_callback *cb; struct module *owner; int (*commit)(struct net *, struct sk_buff *); int (*abort)(struct net *, struct sk_buff *, enum nfnl_abort_action); bool (*valid_genid)(struct net *, u32); u64 android_kabi_reserved1; }; enum nfnl_callback_type { NFNL_CB_UNSPEC = 0, NFNL_CB_MUTEX = 1, NFNL_CB_RCU = 2, NFNL_CB_BATCH = 3, }; struct nfnl_info; struct nfnl_callback { int (*call)(struct sk_buff *, const struct nfnl_info *, const struct nlattr * const *); const struct nla_policy *policy; enum nfnl_callback_type type; __u16 attr_count; u64 android_kabi_reserved1; }; struct nfgenmsg; struct nfnl_info { struct net *net; struct sock *sk; const struct nlmsghdr *nlh; const struct nfgenmsg *nfmsg; struct netlink_ext_ack *extack; }; struct nfgenmsg { __u8 nfgen_family; __u8 version; __be16 res_id; }; enum nfnl_batch_attributes { NFNL_BATCH_UNSPEC = 0, NFNL_BATCH_GENID = 1, __NFNL_BATCH_MAX = 2, }; enum { NFNL_BATCH_FAILURE = 1, NFNL_BATCH_DONE = 2, NFNL_BATCH_REPLAY = 4, }; enum nfnetlink_groups { NFNLGRP_NONE = 0, NFNLGRP_CONNTRACK_NEW = 1, NFNLGRP_CONNTRACK_UPDATE = 2, NFNLGRP_CONNTRACK_DESTROY = 3, NFNLGRP_CONNTRACK_EXP_NEW = 4, NFNLGRP_CONNTRACK_EXP_UPDATE = 5, NFNLGRP_CONNTRACK_EXP_DESTROY = 6, NFNLGRP_NFTABLES = 7, NFNLGRP_ACCT_QUOTA = 8, NFNLGRP_NFTRACE = 9, __NFNLGRP_MAX = 10, }; struct nfnl_err { struct list_head head; struct nlmsghdr *nlh; int err; struct netlink_ext_ack extack; }; struct nfnl_net { struct sock *nfnl; }; enum nfqnl_attr_type { NFQA_UNSPEC = 0, NFQA_PACKET_HDR = 1, NFQA_VERDICT_HDR = 2, NFQA_MARK = 3, NFQA_TIMESTAMP = 4, NFQA_IFINDEX_INDEV = 5, NFQA_IFINDEX_OUTDEV = 6, NFQA_IFINDEX_PHYSINDEV = 7, NFQA_IFINDEX_PHYSOUTDEV = 8, NFQA_HWADDR = 9, NFQA_PAYLOAD = 10, NFQA_CT = 11, NFQA_CT_INFO = 12, NFQA_CAP_LEN = 13, NFQA_SKB_INFO = 14, NFQA_EXP = 15, NFQA_UID = 16, NFQA_GID = 17, NFQA_SECCTX = 18, NFQA_VLAN = 19, NFQA_L2HDR = 20, NFQA_PRIORITY = 21, NFQA_CGROUP_CLASSID = 22, __NFQA_MAX = 23, }; enum nfqnl_vlan_attr { NFQA_VLAN_UNSPEC = 0, NFQA_VLAN_PROTO = 1, NFQA_VLAN_TCI = 2, __NFQA_VLAN_MAX = 3, }; enum nfqnl_attr_config { NFQA_CFG_UNSPEC = 0, NFQA_CFG_CMD = 1, NFQA_CFG_PARAMS = 2, NFQA_CFG_QUEUE_MAXLEN = 3, NFQA_CFG_MASK = 4, NFQA_CFG_FLAGS = 5, __NFQA_CFG_MAX = 6, }; enum nfqnl_msg_config_cmds { NFQNL_CFG_CMD_NONE = 0, NFQNL_CFG_CMD_BIND = 1, NFQNL_CFG_CMD_UNBIND = 2, NFQNL_CFG_CMD_PF_BIND = 3, NFQNL_CFG_CMD_PF_UNBIND = 4, }; enum nfqnl_config_mode { NFQNL_COPY_NONE = 0, NFQNL_COPY_META = 1, NFQNL_COPY_PACKET = 2, }; enum nfqnl_msg_types { NFQNL_MSG_PACKET = 0, NFQNL_MSG_VERDICT = 1, NFQNL_MSG_CONFIG = 2, NFQNL_MSG_VERDICT_BATCH = 3, NFQNL_MSG_MAX = 4, }; enum ip_conntrack_status { IPS_EXPECTED_BIT = 0, IPS_EXPECTED = 1, IPS_SEEN_REPLY_BIT = 1, IPS_SEEN_REPLY = 2, IPS_ASSURED_BIT = 2, IPS_ASSURED = 4, IPS_CONFIRMED_BIT = 3, IPS_CONFIRMED = 8, IPS_SRC_NAT_BIT = 4, IPS_SRC_NAT = 16, IPS_DST_NAT_BIT = 5, IPS_DST_NAT = 32, IPS_NAT_MASK = 48, IPS_SEQ_ADJUST_BIT = 6, IPS_SEQ_ADJUST = 64, IPS_SRC_NAT_DONE_BIT = 7, IPS_SRC_NAT_DONE = 128, IPS_DST_NAT_DONE_BIT = 8, IPS_DST_NAT_DONE = 256, IPS_NAT_DONE_MASK = 384, IPS_DYING_BIT = 9, IPS_DYING = 512, IPS_FIXED_TIMEOUT_BIT = 10, IPS_FIXED_TIMEOUT = 1024, IPS_TEMPLATE_BIT = 11, IPS_TEMPLATE = 2048, IPS_UNTRACKED_BIT = 12, IPS_UNTRACKED = 4096, IPS_NAT_CLASH_BIT = 12, IPS_NAT_CLASH = 4096, IPS_HELPER_BIT = 13, IPS_HELPER = 8192, IPS_OFFLOAD_BIT = 14, IPS_OFFLOAD = 16384, IPS_HW_OFFLOAD_BIT = 15, IPS_HW_OFFLOAD = 32768, IPS_UNCHANGEABLE_MASK = 56313, __IPS_MAX_BIT = 16, }; struct nfqnl_instance { struct hlist_node hlist; struct callback_head rcu; u32 peer_portid; unsigned int queue_maxlen; unsigned int copy_range; unsigned int queue_dropped; unsigned int queue_user_dropped; u_int16_t queue_num; u_int8_t copy_mode; u_int32_t flags; long: 0; spinlock_t lock; unsigned int queue_total; unsigned int id_sequence; struct list_head queue_list; long: 64; long: 64; long: 64; long: 64; }; struct nfnl_queue_net { spinlock_t instances_lock; struct hlist_head instance_table[16]; }; typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); struct nfqnl_msg_verdict_hdr { __be32 verdict; __be32 id; }; struct iter_state { struct seq_net_private p; unsigned int bucket; }; struct nfqnl_msg_packet_hdr { __be32 packet_id; __be16 hw_protocol; __u8 hook; } __attribute__((packed)); struct nfqnl_msg_packet_hw { __be16 hw_addrlen; __u16 _pad; __u8 hw_addr[8]; }; struct nfqnl_msg_packet_timestamp { __be64 sec; __be64 usec; }; struct nfqnl_msg_config_cmd { __u8 command; __u8 _pad; __be16 pf; }; struct nfqnl_msg_config_params { __be32 copy_range; __u8 copy_mode; } __attribute__((packed)); enum nfulnl_attr_config { NFULA_CFG_UNSPEC = 0, NFULA_CFG_CMD = 1, NFULA_CFG_MODE = 2, NFULA_CFG_NLBUFSIZ = 3, NFULA_CFG_TIMEOUT = 4, NFULA_CFG_QTHRESH = 5, NFULA_CFG_FLAGS = 6, __NFULA_CFG_MAX = 7, }; enum nfulnl_msg_config_cmds { NFULNL_CFG_CMD_NONE = 0, NFULNL_CFG_CMD_BIND = 1, NFULNL_CFG_CMD_UNBIND = 2, NFULNL_CFG_CMD_PF_BIND = 3, NFULNL_CFG_CMD_PF_UNBIND = 4, }; enum nfulnl_msg_types { NFULNL_MSG_PACKET = 0, NFULNL_MSG_CONFIG = 1, NFULNL_MSG_MAX = 2, }; enum nfulnl_attr_type { NFULA_UNSPEC = 0, NFULA_PACKET_HDR = 1, NFULA_MARK = 2, NFULA_TIMESTAMP = 3, NFULA_IFINDEX_INDEV = 4, NFULA_IFINDEX_OUTDEV = 5, NFULA_IFINDEX_PHYSINDEV = 6, NFULA_IFINDEX_PHYSOUTDEV = 7, NFULA_HWADDR = 8, NFULA_PAYLOAD = 9, NFULA_PREFIX = 10, NFULA_UID = 11, NFULA_SEQ = 12, NFULA_SEQ_GLOBAL = 13, NFULA_GID = 14, NFULA_HWTYPE = 15, NFULA_HWHEADER = 16, NFULA_HWLEN = 17, NFULA_CT = 18, NFULA_CT_INFO = 19, NFULA_VLAN = 20, NFULA_L2HDR = 21, __NFULA_MAX = 22, }; enum nfulnl_vlan_attr { NFULA_VLAN_UNSPEC = 0, NFULA_VLAN_PROTO = 1, NFULA_VLAN_TCI = 2, __NFULA_VLAN_MAX = 3, }; struct nfulnl_instance { struct hlist_node hlist; spinlock_t lock; refcount_t use; unsigned int qlen; struct sk_buff *skb; struct timer_list timer; struct net *net; netns_tracker ns_tracker; struct user_namespace *peer_user_ns; u32 peer_portid; unsigned int flushtimeout; unsigned int nlbufsiz; unsigned int qthreshold; u_int32_t copy_range; u_int32_t seq; u_int16_t group_num; u_int16_t flags; u_int8_t copy_mode; struct callback_head rcu; }; struct nfnl_log_net { spinlock_t instances_lock; struct hlist_head instance_table[16]; atomic_t global_seq; }; struct nfulnl_msg_packet_hdr { __be16 hw_protocol; __u8 hook; __u8 _pad; }; struct nfulnl_msg_packet_hw { __be16 hw_addrlen; __u16 _pad; __u8 hw_addr[8]; }; struct nfulnl_msg_packet_timestamp { __be64 sec; __be64 usec; }; struct nfulnl_msg_config_cmd { __u8 command; }; struct nfulnl_msg_config_mode { __be32 copy_range; __u8 copy_mode; __u8 _pad; } __attribute__((packed)); struct conntrack_gc_work { struct delayed_work dwork; u32 next_bucket; u32 avg_timeout; u32 count; u32 start_time; bool exiting; bool early_drop; }; struct nf_conntrack_expect_policy; struct nf_conntrack_helper { struct hlist_node hnode; char name[16]; refcount_t refcnt; struct module *me; const struct nf_conntrack_expect_policy *expect_policy; struct nf_conntrack_tuple tuple; int (*help)(struct sk_buff *, unsigned int, struct nf_conn *, enum ip_conntrack_info); void (*destroy)(struct nf_conn *); int (*from_nlattr)(struct nlattr *, struct nf_conn *); int (*to_nlattr)(struct sk_buff *, const struct nf_conn *); unsigned int expect_class_max; unsigned int flags; unsigned int queue_num; u16 data_len; char nat_mod_name[16]; }; struct nf_conntrack_expect_policy { unsigned int max_expected; unsigned int timeout; char name[16]; }; enum ip_conntrack_events { IPCT_NEW = 0, IPCT_RELATED = 1, IPCT_DESTROY = 2, IPCT_REPLY = 3, IPCT_ASSURED = 4, IPCT_PROTOINFO = 5, IPCT_HELPER = 6, IPCT_MARK = 7, IPCT_SEQADJ = 8, IPCT_NATSEQADJ = 8, IPCT_SECMARK = 9, IPCT_LABEL = 10, IPCT_SYNPROXY = 11, __IPCT_MAX = 12, }; enum nf_ct_ecache_state { NFCT_ECACHE_DESTROY_FAIL = 0, NFCT_ECACHE_DESTROY_SENT = 1, }; enum ctattr_l4proto { CTA_PROTO_UNSPEC = 0, CTA_PROTO_NUM = 1, CTA_PROTO_SRC_PORT = 2, CTA_PROTO_DST_PORT = 3, CTA_PROTO_ICMP_ID = 4, CTA_PROTO_ICMP_TYPE = 5, CTA_PROTO_ICMP_CODE = 6, CTA_PROTO_ICMPV6_ID = 7, CTA_PROTO_ICMPV6_TYPE = 8, CTA_PROTO_ICMPV6_CODE = 9, __CTA_PROTO_MAX = 10, }; enum nf_ct_ext_id { NF_CT_EXT_HELPER = 0, NF_CT_EXT_NAT = 1, NF_CT_EXT_SEQADJ = 2, NF_CT_EXT_ACCT = 3, NF_CT_EXT_ECACHE = 4, NF_CT_EXT_NUM = 5, }; enum nf_ct_helper_flags { NF_CT_HELPER_F_USERSPACE = 1, NF_CT_HELPER_F_CONFIGURED = 2, }; struct nf_conntrack_l4proto; struct nf_ct_timeout { __u16 l3num; const struct nf_conntrack_l4proto *l4proto; char data[0]; }; struct nf_conntrack_l4proto { u_int8_t l4proto; bool allow_clash; u16 nlattr_size; bool (*can_early_drop)(const struct nf_conn *); int (*to_nlattr)(struct sk_buff *, struct nlattr *, struct nf_conn *, bool); int (*from_nlattr)(struct nlattr **, struct nf_conn *); int (*tuple_to_nlattr)(struct sk_buff *, const struct nf_conntrack_tuple *); unsigned int (*nlattr_tuple_size)(); int (*nlattr_to_tuple)(struct nlattr **, struct nf_conntrack_tuple *, u_int32_t); const struct nla_policy *nla_policy; struct { int (*nlattr_to_obj)(struct nlattr **, struct net *, void *); int (*obj_to_nlattr)(struct sk_buff *, const void *); u16 obj_size; u16 nlattr_max; const struct nla_policy *nla_policy; } ctnl_timeout; void (*print_conntrack)(struct seq_file *, struct nf_conn *); }; struct nf_conntrack_net_ecache { struct delayed_work dwork; spinlock_t dying_lock; struct hlist_nulls_head dying_list; }; struct nf_conntrack_net { atomic_t count; unsigned int expect_count; unsigned int users4; unsigned int users6; unsigned int users_bridge; struct ctl_table_header *sysctl_header; struct nf_conntrack_net_ecache ecache; }; struct nf_conn_counter { atomic64_t packets; atomic64_t bytes; }; struct nf_conn_acct { struct nf_conn_counter counter[2]; }; struct nf_conn_tstamp { u_int64_t start; u_int64_t stop; }; struct nf_conn_help { struct nf_conntrack_helper __attribute__((btf_type_tag("rcu"))) *helper; struct hlist_head expectations; u8 expecting[4]; long: 0; char data[32]; }; struct nf_conntrack_ecache { unsigned long cache; u16 ctmask; u16 expmask; u32 missed; u32 portid; }; struct nf_ct_iter_data { struct net *net; void *data; u32 portid; int report; }; struct nf_conn_timeout { struct nf_ct_timeout __attribute__((btf_type_tag("rcu"))) *timeout; }; enum nf_ct_sysctl_index { NF_SYSCTL_CT_MAX = 0, NF_SYSCTL_CT_COUNT = 1, NF_SYSCTL_CT_BUCKETS = 2, NF_SYSCTL_CT_CHECKSUM = 3, NF_SYSCTL_CT_LOG_INVALID = 4, NF_SYSCTL_CT_EXPECT_MAX = 5, NF_SYSCTL_CT_ACCT = 6, NF_SYSCTL_CT_EVENTS = 7, NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC = 8, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT = 9, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV = 10, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED = 11, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT = 12, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT = 13, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK = 14, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT = 15, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE = 16, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS = 17, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK = 18, NF_SYSCTL_CT_PROTO_TCP_LOOSE = 19, NF_SYSCTL_CT_PROTO_TCP_LIBERAL = 20, NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST = 21, NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS = 22, NF_SYSCTL_CT_PROTO_TIMEOUT_UDP = 23, NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM = 24, NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP = 25, NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6 = 26, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED = 27, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT = 28, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED = 29, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED = 30, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT = 31, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD = 32, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT = 33, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT = 34, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST = 35, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND = 36, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN = 37, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN = 38, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ = 39, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING = 40, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT = 41, NF_SYSCTL_CT_PROTO_DCCP_LOOSE = 42, NF_SYSCTL_CT_PROTO_TIMEOUT_GRE = 43, NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM = 44, __NF_SYSCTL_CT_LAST_SYSCTL = 45, }; enum udp_conntrack { UDP_CT_UNREPLIED = 0, UDP_CT_REPLIED = 1, UDP_CT_MAX = 2, }; enum tcp_conntrack { TCP_CONNTRACK_NONE = 0, TCP_CONNTRACK_SYN_SENT = 1, TCP_CONNTRACK_SYN_RECV = 2, TCP_CONNTRACK_ESTABLISHED = 3, TCP_CONNTRACK_FIN_WAIT = 4, TCP_CONNTRACK_CLOSE_WAIT = 5, TCP_CONNTRACK_LAST_ACK = 6, TCP_CONNTRACK_TIME_WAIT = 7, TCP_CONNTRACK_CLOSE = 8, TCP_CONNTRACK_LISTEN = 9, TCP_CONNTRACK_MAX = 10, TCP_CONNTRACK_IGNORE = 11, TCP_CONNTRACK_RETRANS = 12, TCP_CONNTRACK_UNACK = 13, TCP_CONNTRACK_TIMEOUT_MAX = 14, }; enum ct_dccp_states { CT_DCCP_NONE = 0, CT_DCCP_REQUEST = 1, CT_DCCP_RESPOND = 2, CT_DCCP_PARTOPEN = 3, CT_DCCP_OPEN = 4, CT_DCCP_CLOSEREQ = 5, CT_DCCP_CLOSING = 6, CT_DCCP_TIMEWAIT = 7, CT_DCCP_IGNORE = 8, CT_DCCP_INVALID = 9, __CT_DCCP_MAX = 10, }; enum gre_conntrack { GRE_CT_UNREPLIED = 0, GRE_CT_REPLIED = 1, GRE_CT_MAX = 2, }; struct ct_iter_state { struct seq_net_private p; struct hlist_nulls_head *hash; unsigned int htable_size; unsigned int bucket; u_int64_t time_now; }; enum ip_conntrack_expect_events { IPEXP_NEW = 0, IPEXP_DESTROY = 1, }; struct ct_expect_iter_state { struct seq_net_private p; unsigned int bucket; }; struct nf_ct_helper_expectfn { struct list_head head; const char *name; void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); }; struct nf_conntrack_nat_helper { struct list_head list; char mod_name[16]; struct module *module; }; struct nf_ct_bridge_info { struct nf_hook_ops *ops; unsigned int ops_size; struct module *me; }; enum nf_ct_tcp_action { NFCT_TCP_IGNORE = 0, NFCT_TCP_INVALID = 1, NFCT_TCP_ACCEPT = 2, }; enum tcp_bit_set { TCP_SYN_SET = 0, TCP_SYNACK_SET = 1, TCP_FIN_SET = 2, TCP_ACK_SET = 3, TCP_RST_SET = 4, TCP_NONE_SET = 5, }; enum { TCP_FLAG_CWR = 32768, TCP_FLAG_ECE = 16384, TCP_FLAG_URG = 8192, TCP_FLAG_ACK = 4096, TCP_FLAG_PSH = 2048, TCP_FLAG_RST = 1024, TCP_FLAG_SYN = 512, TCP_FLAG_FIN = 256, TCP_RESERVED_BITS = 15, TCP_DATA_OFFSET = 240, }; enum ctattr_protoinfo { CTA_PROTOINFO_UNSPEC = 0, CTA_PROTOINFO_TCP = 1, CTA_PROTOINFO_DCCP = 2, CTA_PROTOINFO_SCTP = 3, __CTA_PROTOINFO_MAX = 4, }; enum ctattr_protoinfo_tcp { CTA_PROTOINFO_TCP_UNSPEC = 0, CTA_PROTOINFO_TCP_STATE = 1, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL = 2, CTA_PROTOINFO_TCP_WSCALE_REPLY = 3, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL = 4, CTA_PROTOINFO_TCP_FLAGS_REPLY = 5, __CTA_PROTOINFO_TCP_MAX = 6, }; struct nf_ct_tcp_flags { __u8 flags; __u8 mask; }; struct nf_ct_seqadj { u32 correction_pos; s32 offset_before; s32 offset_after; }; struct nf_conn_seqadj { struct nf_ct_seqadj seq[2]; }; struct tcp_sack_block_wire { __be32 start_seq; __be32 end_seq; }; struct icmpv6_echo { __be16 identifier; __be16 sequence; }; struct icmpv6_nd_advt { __u32 reserved: 5; __u32 override: 1; __u32 solicited: 1; __u32 router: 1; __u32 reserved2: 24; }; struct icmpv6_nd_ra { __u8 hop_limit; __u8 reserved: 3; __u8 router_pref: 2; __u8 home_agent: 1; __u8 other: 1; __u8 managed: 1; __be16 rt_lifetime; }; struct icmp6hdr { __u8 icmp6_type; __u8 icmp6_code; __sum16 icmp6_cksum; union { __be32 un_data32[1]; __be16 un_data16[2]; __u8 un_data8[4]; struct icmpv6_echo u_echo; struct icmpv6_nd_advt u_nd_advt; struct icmpv6_nd_ra u_nd_ra; } icmp6_dataun; }; struct rd_msg { struct icmp6hdr icmph; struct in6_addr target; struct in6_addr dest; __u8 opt[0]; }; enum { __ND_OPT_PREFIX_INFO_END = 0, ND_OPT_SOURCE_LL_ADDR = 1, ND_OPT_TARGET_LL_ADDR = 2, ND_OPT_PREFIX_INFO = 3, ND_OPT_REDIRECT_HDR = 4, ND_OPT_MTU = 5, ND_OPT_NONCE = 14, __ND_OPT_ARRAY_MAX = 15, ND_OPT_ROUTE_INFO = 24, ND_OPT_RDNSS = 25, ND_OPT_DNSSL = 31, ND_OPT_6CO = 34, ND_OPT_CAPTIVE_PORTAL = 37, ND_OPT_PREF64 = 38, __ND_OPT_MAX = 39, }; enum retry_state { STATE_CONGESTED = 0, STATE_RESTART = 1, STATE_DONE = 2, }; enum ct_dccp_roles { CT_DCCP_ROLE_CLIENT = 0, CT_DCCP_ROLE_SERVER = 1, __CT_DCCP_ROLE_MAX = 2, }; enum dccp_pkt_type { DCCP_PKT_REQUEST = 0, DCCP_PKT_RESPONSE = 1, DCCP_PKT_DATA = 2, DCCP_PKT_ACK = 3, DCCP_PKT_DATAACK = 4, DCCP_PKT_CLOSEREQ = 5, DCCP_PKT_CLOSE = 6, DCCP_PKT_RESET = 7, DCCP_PKT_SYNC = 8, DCCP_PKT_SYNCACK = 9, DCCP_PKT_INVALID = 10, }; enum ctattr_protoinfo_dccp { CTA_PROTOINFO_DCCP_UNSPEC = 0, CTA_PROTOINFO_DCCP_STATE = 1, CTA_PROTOINFO_DCCP_ROLE = 2, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ = 3, CTA_PROTOINFO_DCCP_PAD = 4, __CTA_PROTOINFO_DCCP_MAX = 5, }; struct dccp_hdr_ext { __be32 dccph_seq_low; }; struct dccp_hdr_ack_bits { __be16 dccph_reserved1; __be16 dccph_ack_nr_high; __be32 dccph_ack_nr_low; }; struct dccp_hdr_request { __be32 dccph_req_service; }; struct dccp_hdr_response { struct dccp_hdr_ack_bits dccph_resp_ack; __be32 dccph_resp_service; }; struct dccp_hdr_reset { struct dccp_hdr_ack_bits dccph_reset_ack; __u8 dccph_reset_code; __u8 dccph_reset_data[3]; }; struct nf_conntrack_dccp_buf { struct dccp_hdr dh; struct dccp_hdr_ext ext; union { struct dccp_hdr_ack_bits ack; struct dccp_hdr_request req; struct dccp_hdr_response response; struct dccp_hdr_reset rst; } u; }; enum sctp_cid { SCTP_CID_DATA = 0, SCTP_CID_INIT = 1, SCTP_CID_INIT_ACK = 2, SCTP_CID_SACK = 3, SCTP_CID_HEARTBEAT = 4, SCTP_CID_HEARTBEAT_ACK = 5, SCTP_CID_ABORT = 6, SCTP_CID_SHUTDOWN = 7, SCTP_CID_SHUTDOWN_ACK = 8, SCTP_CID_ERROR = 9, SCTP_CID_COOKIE_ECHO = 10, SCTP_CID_COOKIE_ACK = 11, SCTP_CID_ECN_ECNE = 12, SCTP_CID_ECN_CWR = 13, SCTP_CID_SHUTDOWN_COMPLETE = 14, SCTP_CID_AUTH = 15, SCTP_CID_I_DATA = 64, SCTP_CID_FWD_TSN = 192, SCTP_CID_ASCONF = 193, SCTP_CID_I_FWD_TSN = 194, SCTP_CID_ASCONF_ACK = 128, SCTP_CID_RECONF = 130, SCTP_CID_PAD = 132, }; enum { SCTP_CHUNK_FLAG_T = 1, }; enum ctattr_protoinfo_sctp { CTA_PROTOINFO_SCTP_UNSPEC = 0, CTA_PROTOINFO_SCTP_STATE = 1, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL = 2, CTA_PROTOINFO_SCTP_VTAG_REPLY = 3, __CTA_PROTOINFO_SCTP_MAX = 4, }; enum pptp_ctrlsess_state { PPTP_SESSION_NONE = 0, PPTP_SESSION_ERROR = 1, PPTP_SESSION_STOPREQ = 2, PPTP_SESSION_REQUESTED = 3, PPTP_SESSION_CONFIRMED = 4, }; enum pptp_ctrlcall_state { PPTP_CALL_NONE = 0, PPTP_CALL_ERROR = 1, PPTP_CALL_OUT_REQ = 2, PPTP_CALL_OUT_CONF = 3, PPTP_CALL_IN_REQ = 4, PPTP_CALL_IN_REP = 5, PPTP_CALL_IN_CONF = 6, PPTP_CALL_CLEAR_REQ = 7, }; struct nf_ct_gre_keymap { struct list_head list; struct nf_conntrack_tuple tuple; struct callback_head rcu; }; struct nf_ct_pptp_master { enum pptp_ctrlsess_state sstate; enum pptp_ctrlcall_state cstate; __be16 pac_call_id; __be16 pns_call_id; struct nf_ct_gre_keymap *keymap[2]; }; struct pptp_gre_header { struct gre_base_hdr gre_hd; __be16 payload_len; __be16 call_id; __be32 seq; __be32 ack; }; enum { NF_BPF_CT_OPTS_SZ = 12, }; enum { BPF_F_CURRENT_NETNS = -1, }; struct nf_conn___init { struct nf_conn ct; }; struct bpf_ct_opts { s32 netns_id; s32 error; u8 l4proto; u8 dir; u8 reserved[2]; }; enum cntl_msg_types { IPCTNL_MSG_CT_NEW = 0, IPCTNL_MSG_CT_GET = 1, IPCTNL_MSG_CT_DELETE = 2, IPCTNL_MSG_CT_GET_CTRZERO = 3, IPCTNL_MSG_CT_GET_STATS_CPU = 4, IPCTNL_MSG_CT_GET_STATS = 5, IPCTNL_MSG_CT_GET_DYING = 6, IPCTNL_MSG_CT_GET_UNCONFIRMED = 7, IPCTNL_MSG_MAX = 8, }; enum ctattr_type { CTA_UNSPEC = 0, CTA_TUPLE_ORIG = 1, CTA_TUPLE_REPLY = 2, CTA_STATUS = 3, CTA_PROTOINFO = 4, CTA_HELP = 5, CTA_NAT_SRC = 6, CTA_TIMEOUT = 7, CTA_MARK = 8, CTA_COUNTERS_ORIG = 9, CTA_COUNTERS_REPLY = 10, CTA_USE = 11, CTA_ID = 12, CTA_NAT_DST = 13, CTA_TUPLE_MASTER = 14, CTA_SEQ_ADJ_ORIG = 15, CTA_NAT_SEQ_ADJ_ORIG = 15, CTA_SEQ_ADJ_REPLY = 16, CTA_NAT_SEQ_ADJ_REPLY = 16, CTA_SECMARK = 17, CTA_ZONE = 18, CTA_SECCTX = 19, CTA_TIMESTAMP = 20, CTA_MARK_MASK = 21, CTA_LABELS = 22, CTA_LABELS_MASK = 23, CTA_SYNPROXY = 24, CTA_FILTER = 25, CTA_STATUS_MASK = 26, __CTA_MAX = 27, }; enum ctattr_tuple { CTA_TUPLE_UNSPEC = 0, CTA_TUPLE_IP = 1, CTA_TUPLE_PROTO = 2, CTA_TUPLE_ZONE = 3, __CTA_TUPLE_MAX = 4, }; enum ctattr_ip { CTA_IP_UNSPEC = 0, CTA_IP_V4_SRC = 1, CTA_IP_V4_DST = 2, CTA_IP_V6_SRC = 3, CTA_IP_V6_DST = 4, __CTA_IP_MAX = 5, }; enum ctattr_counters { CTA_COUNTERS_UNSPEC = 0, CTA_COUNTERS_PACKETS = 1, CTA_COUNTERS_BYTES = 2, CTA_COUNTERS32_PACKETS = 3, CTA_COUNTERS32_BYTES = 4, CTA_COUNTERS_PAD = 5, __CTA_COUNTERS_MAX = 6, }; enum ctattr_tstamp { CTA_TIMESTAMP_UNSPEC = 0, CTA_TIMESTAMP_START = 1, CTA_TIMESTAMP_STOP = 2, CTA_TIMESTAMP_PAD = 3, __CTA_TIMESTAMP_MAX = 4, }; enum ctattr_help { CTA_HELP_UNSPEC = 0, CTA_HELP_NAME = 1, CTA_HELP_INFO = 2, __CTA_HELP_MAX = 3, }; enum ctattr_secctx { CTA_SECCTX_UNSPEC = 0, CTA_SECCTX_NAME = 1, __CTA_SECCTX_MAX = 2, }; enum ctattr_seqadj { CTA_SEQADJ_UNSPEC = 0, CTA_SEQADJ_CORRECTION_POS = 1, CTA_SEQADJ_OFFSET_BEFORE = 2, CTA_SEQADJ_OFFSET_AFTER = 3, __CTA_SEQADJ_MAX = 4, }; enum ctattr_synproxy { CTA_SYNPROXY_UNSPEC = 0, CTA_SYNPROXY_ISN = 1, CTA_SYNPROXY_ITS = 2, CTA_SYNPROXY_TSOFF = 3, __CTA_SYNPROXY_MAX = 4, }; enum ctnl_exp_msg_types { IPCTNL_MSG_EXP_NEW = 0, IPCTNL_MSG_EXP_GET = 1, IPCTNL_MSG_EXP_DELETE = 2, IPCTNL_MSG_EXP_GET_STATS_CPU = 3, IPCTNL_MSG_EXP_MAX = 4, }; enum ctattr_expect { CTA_EXPECT_UNSPEC = 0, CTA_EXPECT_MASTER = 1, CTA_EXPECT_TUPLE = 2, CTA_EXPECT_MASK = 3, CTA_EXPECT_TIMEOUT = 4, CTA_EXPECT_ID = 5, CTA_EXPECT_HELP_NAME = 6, CTA_EXPECT_ZONE = 7, CTA_EXPECT_FLAGS = 8, CTA_EXPECT_CLASS = 9, CTA_EXPECT_NAT = 10, CTA_EXPECT_FN = 11, __CTA_EXPECT_MAX = 12, }; enum ctattr_expect_nat { CTA_EXPECT_NAT_UNSPEC = 0, CTA_EXPECT_NAT_DIR = 1, CTA_EXPECT_NAT_TUPLE = 2, __CTA_EXPECT_NAT_MAX = 3, }; enum ctattr_expect_stats { CTA_STATS_EXP_UNSPEC = 0, CTA_STATS_EXP_NEW = 1, CTA_STATS_EXP_CREATE = 2, CTA_STATS_EXP_DELETE = 3, __CTA_STATS_EXP_MAX = 4, }; enum ctattr_filter { CTA_FILTER_UNSPEC = 0, CTA_FILTER_ORIG_FLAGS = 1, CTA_FILTER_REPLY_FLAGS = 2, __CTA_FILTER_MAX = 3, }; enum ctattr_stats_cpu { CTA_STATS_UNSPEC = 0, CTA_STATS_SEARCHED = 1, CTA_STATS_FOUND = 2, CTA_STATS_NEW = 3, CTA_STATS_INVALID = 4, CTA_STATS_IGNORE = 5, CTA_STATS_DELETE = 6, CTA_STATS_DELETE_LIST = 7, CTA_STATS_INSERT = 8, CTA_STATS_INSERT_FAILED = 9, CTA_STATS_DROP = 10, CTA_STATS_EARLY_DROP = 11, CTA_STATS_ERROR = 12, CTA_STATS_SEARCH_RESTART = 13, CTA_STATS_CLASH_RESOLVE = 14, CTA_STATS_CHAIN_TOOLONG = 15, __CTA_STATS_MAX = 16, }; enum ctattr_stats_global { CTA_STATS_GLOBAL_UNSPEC = 0, CTA_STATS_GLOBAL_ENTRIES = 1, CTA_STATS_GLOBAL_MAX_ENTRIES = 2, __CTA_STATS_GLOBAL_MAX = 3, }; struct ctnetlink_filter_u32 { u32 val; u32 mask; }; struct ctnetlink_filter { u8 family; u_int32_t orig_flags; u_int32_t reply_flags; struct nf_conntrack_tuple orig; struct nf_conntrack_tuple reply; struct nf_conntrack_zone zone; struct ctnetlink_filter_u32 mark; struct ctnetlink_filter_u32 status; }; struct ctnetlink_list_dump_ctx { struct nf_conn *last; unsigned int cpu; bool done; }; enum amanda_strings { SEARCH_CONNECT = 0, SEARCH_NEWLINE = 1, SEARCH_DATA = 2, SEARCH_MESG = 3, SEARCH_INDEX = 4, SEARCH_STATE = 5, }; enum nf_ct_ftp_type { NF_CT_FTP_PORT = 0, NF_CT_FTP_PASV = 1, NF_CT_FTP_EPRT = 2, NF_CT_FTP_EPSV = 3, }; struct ftp_search { const char *pattern; size_t plen; char skip; char term; enum nf_ct_ftp_type ftptype; int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); }; struct nf_ct_ftp_master { u_int32_t seq_aft_nl[4]; u_int16_t seq_aft_nl_num[2]; u_int16_t flags[2]; }; struct H245_TransportAddress; typedef struct H245_TransportAddress H245_TransportAddress; struct TransportAddress; typedef struct TransportAddress TransportAddress; struct nfct_h323_nat_hooks { int (*set_h245_addr)(struct sk_buff *, unsigned int, unsigned char **, int, H245_TransportAddress *, union nf_inet_addr *, __be16); int (*set_h225_addr)(struct sk_buff *, unsigned int, unsigned char **, int, TransportAddress *, union nf_inet_addr *, __be16); int (*set_sig_addr)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, unsigned int, unsigned char **, TransportAddress *, int); int (*set_ras_addr)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, unsigned int, unsigned char **, TransportAddress *, int); int (*nat_rtp_rtcp)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, unsigned int, unsigned char **, int, H245_TransportAddress *, __be16, __be16, struct nf_conntrack_expect *, struct nf_conntrack_expect *); int (*nat_t120)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, unsigned int, unsigned char **, int, H245_TransportAddress *, __be16, struct nf_conntrack_expect *); int (*nat_h245)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, unsigned int, unsigned char **, int, TransportAddress *, __be16, struct nf_conntrack_expect *); int (*nat_callforwarding)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, unsigned int, unsigned char **, int, TransportAddress *, __be16, struct nf_conntrack_expect *); int (*nat_q931)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, unsigned int, unsigned char **, TransportAddress *, int, __be16, struct nf_conntrack_expect *); }; struct UnicastAddress_iPAddress { int options; unsigned int network; }; typedef struct UnicastAddress_iPAddress UnicastAddress_iPAddress; struct UnicastAddress_iP6Address { int options; unsigned int network; }; typedef struct UnicastAddress_iP6Address UnicastAddress_iP6Address; struct UnicastAddress { enum { eUnicastAddress_iPAddress = 0, eUnicastAddress_iPXAddress = 1, eUnicastAddress_iP6Address = 2, eUnicastAddress_netBios = 3, eUnicastAddress_iPSourceRouteAddress = 4, eUnicastAddress_nsap = 5, eUnicastAddress_nonStandardAddress = 6, } choice; union { UnicastAddress_iPAddress iPAddress; UnicastAddress_iP6Address iP6Address; }; }; typedef struct UnicastAddress UnicastAddress; struct H245_TransportAddress { enum { eH245_TransportAddress_unicastAddress = 0, eH245_TransportAddress_multicastAddress = 1, } choice; union { UnicastAddress unicastAddress; }; }; struct TransportAddress_ipAddress { int options; unsigned int ip; }; typedef struct TransportAddress_ipAddress TransportAddress_ipAddress; struct TransportAddress_ip6Address { int options; unsigned int ip; }; typedef struct TransportAddress_ip6Address TransportAddress_ip6Address; struct TransportAddress { enum { eTransportAddress_ipAddress = 0, eTransportAddress_ipSourceRoute = 1, eTransportAddress_ipxAddress = 2, eTransportAddress_ip6Address = 3, eTransportAddress_netBios = 4, eTransportAddress_nsap = 5, eTransportAddress_nonStandardAddress = 6, } choice; union { TransportAddress_ipAddress ipAddress; TransportAddress_ip6Address ip6Address; }; }; struct GatekeeperRequest { enum { eGatekeeperRequest_nonStandardData = -2147483648, eGatekeeperRequest_gatekeeperIdentifier = 1073741824, eGatekeeperRequest_callServices = 536870912, eGatekeeperRequest_endpointAlias = 268435456, eGatekeeperRequest_alternateEndpoints = 134217728, eGatekeeperRequest_tokens = 67108864, eGatekeeperRequest_cryptoTokens = 33554432, eGatekeeperRequest_authenticationCapability = 16777216, eGatekeeperRequest_algorithmOIDs = 8388608, eGatekeeperRequest_integrity = 4194304, eGatekeeperRequest_integrityCheckValue = 2097152, eGatekeeperRequest_supportsAltGK = 1048576, eGatekeeperRequest_featureSet = 524288, eGatekeeperRequest_genericData = 262144, } options; TransportAddress rasAddress; }; typedef struct GatekeeperRequest GatekeeperRequest; struct GatekeeperConfirm { enum { eGatekeeperConfirm_nonStandardData = -2147483648, eGatekeeperConfirm_gatekeeperIdentifier = 1073741824, eGatekeeperConfirm_alternateGatekeeper = 536870912, eGatekeeperConfirm_authenticationMode = 268435456, eGatekeeperConfirm_tokens = 134217728, eGatekeeperConfirm_cryptoTokens = 67108864, eGatekeeperConfirm_algorithmOID = 33554432, eGatekeeperConfirm_integrity = 16777216, eGatekeeperConfirm_integrityCheckValue = 8388608, eGatekeeperConfirm_featureSet = 4194304, eGatekeeperConfirm_genericData = 2097152, } options; TransportAddress rasAddress; }; typedef struct GatekeeperConfirm GatekeeperConfirm; struct RegistrationRequest_callSignalAddress { int count; TransportAddress item[10]; }; typedef struct RegistrationRequest_callSignalAddress RegistrationRequest_callSignalAddress; struct RegistrationRequest_rasAddress { int count; TransportAddress item[10]; }; typedef struct RegistrationRequest_rasAddress RegistrationRequest_rasAddress; struct RegistrationRequest { enum { eRegistrationRequest_nonStandardData = -2147483648, eRegistrationRequest_terminalAlias = 1073741824, eRegistrationRequest_gatekeeperIdentifier = 536870912, eRegistrationRequest_alternateEndpoints = 268435456, eRegistrationRequest_timeToLive = 134217728, eRegistrationRequest_tokens = 67108864, eRegistrationRequest_cryptoTokens = 33554432, eRegistrationRequest_integrityCheckValue = 16777216, eRegistrationRequest_keepAlive = 8388608, eRegistrationRequest_endpointIdentifier = 4194304, eRegistrationRequest_willSupplyUUIEs = 2097152, eRegistrationRequest_maintainConnection = 1048576, eRegistrationRequest_alternateTransportAddresses = 524288, eRegistrationRequest_additiveRegistration = 262144, eRegistrationRequest_terminalAliasPattern = 131072, eRegistrationRequest_supportsAltGK = 65536, eRegistrationRequest_usageReportingCapability = 32768, eRegistrationRequest_multipleCalls = 16384, eRegistrationRequest_supportedH248Packages = 8192, eRegistrationRequest_callCreditCapability = 4096, eRegistrationRequest_capacityReportingCapability = 2048, eRegistrationRequest_capacity = 1024, eRegistrationRequest_featureSet = 512, eRegistrationRequest_genericData = 256, } options; RegistrationRequest_callSignalAddress callSignalAddress; RegistrationRequest_rasAddress rasAddress; unsigned int timeToLive; }; typedef struct RegistrationRequest RegistrationRequest; struct RegistrationConfirm_callSignalAddress { int count; TransportAddress item[10]; }; typedef struct RegistrationConfirm_callSignalAddress RegistrationConfirm_callSignalAddress; struct RegistrationConfirm { enum { eRegistrationConfirm_nonStandardData = -2147483648, eRegistrationConfirm_terminalAlias = 1073741824, eRegistrationConfirm_gatekeeperIdentifier = 536870912, eRegistrationConfirm_alternateGatekeeper = 268435456, eRegistrationConfirm_timeToLive = 134217728, eRegistrationConfirm_tokens = 67108864, eRegistrationConfirm_cryptoTokens = 33554432, eRegistrationConfirm_integrityCheckValue = 16777216, eRegistrationConfirm_willRespondToIRR = 8388608, eRegistrationConfirm_preGrantedARQ = 4194304, eRegistrationConfirm_maintainConnection = 2097152, eRegistrationConfirm_serviceControl = 1048576, eRegistrationConfirm_supportsAdditiveRegistration = 524288, eRegistrationConfirm_terminalAliasPattern = 262144, eRegistrationConfirm_supportedPrefixes = 131072, eRegistrationConfirm_usageSpec = 65536, eRegistrationConfirm_featureServerAlias = 32768, eRegistrationConfirm_capacityReportingSpec = 16384, eRegistrationConfirm_featureSet = 8192, eRegistrationConfirm_genericData = 4096, } options; RegistrationConfirm_callSignalAddress callSignalAddress; unsigned int timeToLive; }; typedef struct RegistrationConfirm RegistrationConfirm; struct UnregistrationRequest_callSignalAddress { int count; TransportAddress item[10]; }; typedef struct UnregistrationRequest_callSignalAddress UnregistrationRequest_callSignalAddress; struct UnregistrationRequest { enum { eUnregistrationRequest_endpointAlias = -2147483648, eUnregistrationRequest_nonStandardData = 1073741824, eUnregistrationRequest_endpointIdentifier = 536870912, eUnregistrationRequest_alternateEndpoints = 268435456, eUnregistrationRequest_gatekeeperIdentifier = 134217728, eUnregistrationRequest_tokens = 67108864, eUnregistrationRequest_cryptoTokens = 33554432, eUnregistrationRequest_integrityCheckValue = 16777216, eUnregistrationRequest_reason = 8388608, eUnregistrationRequest_endpointAliasPattern = 4194304, eUnregistrationRequest_supportedPrefixes = 2097152, eUnregistrationRequest_alternateGatekeeper = 1048576, eUnregistrationRequest_genericData = 524288, } options; UnregistrationRequest_callSignalAddress callSignalAddress; }; typedef struct UnregistrationRequest UnregistrationRequest; struct AdmissionRequest { enum { eAdmissionRequest_callModel = -2147483648, eAdmissionRequest_destinationInfo = 1073741824, eAdmissionRequest_destCallSignalAddress = 536870912, eAdmissionRequest_destExtraCallInfo = 268435456, eAdmissionRequest_srcCallSignalAddress = 134217728, eAdmissionRequest_nonStandardData = 67108864, eAdmissionRequest_callServices = 33554432, eAdmissionRequest_canMapAlias = 16777216, eAdmissionRequest_callIdentifier = 8388608, eAdmissionRequest_srcAlternatives = 4194304, eAdmissionRequest_destAlternatives = 2097152, eAdmissionRequest_gatekeeperIdentifier = 1048576, eAdmissionRequest_tokens = 524288, eAdmissionRequest_cryptoTokens = 262144, eAdmissionRequest_integrityCheckValue = 131072, eAdmissionRequest_transportQOS = 65536, eAdmissionRequest_willSupplyUUIEs = 32768, eAdmissionRequest_callLinkage = 16384, eAdmissionRequest_gatewayDataRate = 8192, eAdmissionRequest_capacity = 4096, eAdmissionRequest_circuitInfo = 2048, eAdmissionRequest_desiredProtocols = 1024, eAdmissionRequest_desiredTunnelledProtocol = 512, eAdmissionRequest_featureSet = 256, eAdmissionRequest_genericData = 128, } options; TransportAddress destCallSignalAddress; TransportAddress srcCallSignalAddress; }; typedef struct AdmissionRequest AdmissionRequest; struct AdmissionConfirm { enum { eAdmissionConfirm_irrFrequency = -2147483648, eAdmissionConfirm_nonStandardData = 1073741824, eAdmissionConfirm_destinationInfo = 536870912, eAdmissionConfirm_destExtraCallInfo = 268435456, eAdmissionConfirm_destinationType = 134217728, eAdmissionConfirm_remoteExtensionAddress = 67108864, eAdmissionConfirm_alternateEndpoints = 33554432, eAdmissionConfirm_tokens = 16777216, eAdmissionConfirm_cryptoTokens = 8388608, eAdmissionConfirm_integrityCheckValue = 4194304, eAdmissionConfirm_transportQOS = 2097152, eAdmissionConfirm_willRespondToIRR = 1048576, eAdmissionConfirm_uuiesRequested = 524288, eAdmissionConfirm_language = 262144, eAdmissionConfirm_alternateTransportAddresses = 131072, eAdmissionConfirm_useSpecifiedTransport = 65536, eAdmissionConfirm_circuitInfo = 32768, eAdmissionConfirm_usageSpec = 16384, eAdmissionConfirm_supportedProtocols = 8192, eAdmissionConfirm_serviceControl = 4096, eAdmissionConfirm_multipleCalls = 2048, eAdmissionConfirm_featureSet = 1024, eAdmissionConfirm_genericData = 512, } options; TransportAddress destCallSignalAddress; }; typedef struct AdmissionConfirm AdmissionConfirm; struct LocationRequest { enum { eLocationRequest_endpointIdentifier = -2147483648, eLocationRequest_nonStandardData = 1073741824, eLocationRequest_sourceInfo = 536870912, eLocationRequest_canMapAlias = 268435456, eLocationRequest_gatekeeperIdentifier = 134217728, eLocationRequest_tokens = 67108864, eLocationRequest_cryptoTokens = 33554432, eLocationRequest_integrityCheckValue = 16777216, eLocationRequest_desiredProtocols = 8388608, eLocationRequest_desiredTunnelledProtocol = 4194304, eLocationRequest_featureSet = 2097152, eLocationRequest_genericData = 1048576, eLocationRequest_hopCount = 524288, eLocationRequest_circuitInfo = 262144, } options; TransportAddress replyAddress; }; typedef struct LocationRequest LocationRequest; struct LocationConfirm { enum { eLocationConfirm_nonStandardData = -2147483648, eLocationConfirm_destinationInfo = 1073741824, eLocationConfirm_destExtraCallInfo = 536870912, eLocationConfirm_destinationType = 268435456, eLocationConfirm_remoteExtensionAddress = 134217728, eLocationConfirm_alternateEndpoints = 67108864, eLocationConfirm_tokens = 33554432, eLocationConfirm_cryptoTokens = 16777216, eLocationConfirm_integrityCheckValue = 8388608, eLocationConfirm_alternateTransportAddresses = 4194304, eLocationConfirm_supportedProtocols = 2097152, eLocationConfirm_multipleCalls = 1048576, eLocationConfirm_featureSet = 524288, eLocationConfirm_genericData = 262144, eLocationConfirm_circuitInfo = 131072, eLocationConfirm_serviceControl = 65536, } options; TransportAddress callSignalAddress; TransportAddress rasAddress; }; typedef struct LocationConfirm LocationConfirm; struct InfoRequestResponse_callSignalAddress { int count; TransportAddress item[10]; }; typedef struct InfoRequestResponse_callSignalAddress InfoRequestResponse_callSignalAddress; struct InfoRequestResponse { enum { eInfoRequestResponse_nonStandardData = -2147483648, eInfoRequestResponse_endpointAlias = 1073741824, eInfoRequestResponse_perCallInfo = 536870912, eInfoRequestResponse_tokens = 268435456, eInfoRequestResponse_cryptoTokens = 134217728, eInfoRequestResponse_integrityCheckValue = 67108864, eInfoRequestResponse_needResponse = 33554432, eInfoRequestResponse_capacity = 16777216, eInfoRequestResponse_irrStatus = 8388608, eInfoRequestResponse_unsolicited = 4194304, eInfoRequestResponse_genericData = 2097152, } options; TransportAddress rasAddress; InfoRequestResponse_callSignalAddress callSignalAddress; }; typedef struct InfoRequestResponse InfoRequestResponse; struct RasMessage { enum { eRasMessage_gatekeeperRequest = 0, eRasMessage_gatekeeperConfirm = 1, eRasMessage_gatekeeperReject = 2, eRasMessage_registrationRequest = 3, eRasMessage_registrationConfirm = 4, eRasMessage_registrationReject = 5, eRasMessage_unregistrationRequest = 6, eRasMessage_unregistrationConfirm = 7, eRasMessage_unregistrationReject = 8, eRasMessage_admissionRequest = 9, eRasMessage_admissionConfirm = 10, eRasMessage_admissionReject = 11, eRasMessage_bandwidthRequest = 12, eRasMessage_bandwidthConfirm = 13, eRasMessage_bandwidthReject = 14, eRasMessage_disengageRequest = 15, eRasMessage_disengageConfirm = 16, eRasMessage_disengageReject = 17, eRasMessage_locationRequest = 18, eRasMessage_locationConfirm = 19, eRasMessage_locationReject = 20, eRasMessage_infoRequest = 21, eRasMessage_infoRequestResponse = 22, eRasMessage_nonStandardMessage = 23, eRasMessage_unknownMessageResponse = 24, eRasMessage_requestInProgress = 25, eRasMessage_resourcesAvailableIndicate = 26, eRasMessage_resourcesAvailableConfirm = 27, eRasMessage_infoRequestAck = 28, eRasMessage_infoRequestNak = 29, eRasMessage_serviceControlIndication = 30, eRasMessage_serviceControlResponse = 31, } choice; union { GatekeeperRequest gatekeeperRequest; GatekeeperConfirm gatekeeperConfirm; RegistrationRequest registrationRequest; RegistrationConfirm registrationConfirm; UnregistrationRequest unregistrationRequest; AdmissionRequest admissionRequest; AdmissionConfirm admissionConfirm; LocationRequest locationRequest; LocationConfirm locationConfirm; InfoRequestResponse infoRequestResponse; }; }; typedef struct RasMessage RasMessage; struct DataProtocolCapability { enum { eDataProtocolCapability_nonStandard = 0, eDataProtocolCapability_v14buffered = 1, eDataProtocolCapability_v42lapm = 2, eDataProtocolCapability_hdlcFrameTunnelling = 3, eDataProtocolCapability_h310SeparateVCStack = 4, eDataProtocolCapability_h310SingleVCStack = 5, eDataProtocolCapability_transparent = 6, eDataProtocolCapability_segmentationAndReassembly = 7, eDataProtocolCapability_hdlcFrameTunnelingwSAR = 8, eDataProtocolCapability_v120 = 9, eDataProtocolCapability_separateLANStack = 10, eDataProtocolCapability_v76wCompression = 11, eDataProtocolCapability_tcp = 12, eDataProtocolCapability_udp = 13, } choice; }; typedef struct DataProtocolCapability DataProtocolCapability; struct DataApplicationCapability_application { enum { eDataApplicationCapability_application_nonStandard = 0, eDataApplicationCapability_application_t120 = 1, eDataApplicationCapability_application_dsm_cc = 2, eDataApplicationCapability_application_userData = 3, eDataApplicationCapability_application_t84 = 4, eDataApplicationCapability_application_t434 = 5, eDataApplicationCapability_application_h224 = 6, eDataApplicationCapability_application_nlpid = 7, eDataApplicationCapability_application_dsvdControl = 8, eDataApplicationCapability_application_h222DataPartitioning = 9, eDataApplicationCapability_application_t30fax = 10, eDataApplicationCapability_application_t140 = 11, eDataApplicationCapability_application_t38fax = 12, eDataApplicationCapability_application_genericDataCapability = 13, } choice; union { DataProtocolCapability t120; }; }; typedef struct DataApplicationCapability_application DataApplicationCapability_application; struct DataApplicationCapability { int options; DataApplicationCapability_application application; }; typedef struct DataApplicationCapability DataApplicationCapability; struct DataType { enum { eDataType_nonStandard = 0, eDataType_nullData = 1, eDataType_videoData = 2, eDataType_audioData = 3, eDataType_data = 4, eDataType_encryptionData = 5, eDataType_h235Control = 6, eDataType_h235Media = 7, eDataType_multiplexedStream = 8, } choice; union { DataApplicationCapability data; }; }; typedef struct DataType DataType; struct H2250LogicalChannelParameters { enum { eH2250LogicalChannelParameters_nonStandard = -2147483648, eH2250LogicalChannelParameters_associatedSessionID = 1073741824, eH2250LogicalChannelParameters_mediaChannel = 536870912, eH2250LogicalChannelParameters_mediaGuaranteedDelivery = 268435456, eH2250LogicalChannelParameters_mediaControlChannel = 134217728, eH2250LogicalChannelParameters_mediaControlGuaranteedDelivery = 67108864, eH2250LogicalChannelParameters_silenceSuppression = 33554432, eH2250LogicalChannelParameters_destination = 16777216, eH2250LogicalChannelParameters_dynamicRTPPayloadType = 8388608, eH2250LogicalChannelParameters_mediaPacketization = 4194304, eH2250LogicalChannelParameters_transportCapability = 2097152, eH2250LogicalChannelParameters_redundancyEncoding = 1048576, eH2250LogicalChannelParameters_source = 524288, } options; H245_TransportAddress mediaChannel; H245_TransportAddress mediaControlChannel; }; typedef struct H2250LogicalChannelParameters H2250LogicalChannelParameters; struct OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters { enum { eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters = 0, eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters = 1, eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters = 2, eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters = 3, eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_none = 4, } choice; union { H2250LogicalChannelParameters h2250LogicalChannelParameters; }; }; typedef struct OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters; struct OpenLogicalChannel_forwardLogicalChannelParameters { enum { eOpenLogicalChannel_forwardLogicalChannelParameters_portNumber = -2147483648, eOpenLogicalChannel_forwardLogicalChannelParameters_forwardLogicalChannelDependency = 1073741824, eOpenLogicalChannel_forwardLogicalChannelParameters_replacementFor = 536870912, } options; DataType dataType; OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters multiplexParameters; }; typedef struct OpenLogicalChannel_forwardLogicalChannelParameters OpenLogicalChannel_forwardLogicalChannelParameters; struct OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters { enum { eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters = 0, eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters = 1, eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters = 2, } choice; union { H2250LogicalChannelParameters h2250LogicalChannelParameters; }; }; typedef struct OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters; struct OpenLogicalChannel_reverseLogicalChannelParameters { enum { eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters = -2147483648, eOpenLogicalChannel_reverseLogicalChannelParameters_reverseLogicalChannelDependency = 1073741824, eOpenLogicalChannel_reverseLogicalChannelParameters_replacementFor = 536870912, } options; OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters multiplexParameters; }; typedef struct OpenLogicalChannel_reverseLogicalChannelParameters OpenLogicalChannel_reverseLogicalChannelParameters; struct NetworkAccessParameters_networkAddress { enum { eNetworkAccessParameters_networkAddress_q2931Address = 0, eNetworkAccessParameters_networkAddress_e164Address = 1, eNetworkAccessParameters_networkAddress_localAreaAddress = 2, } choice; union { H245_TransportAddress localAreaAddress; }; }; typedef struct NetworkAccessParameters_networkAddress NetworkAccessParameters_networkAddress; struct NetworkAccessParameters { enum { eNetworkAccessParameters_distribution = -2147483648, eNetworkAccessParameters_externalReference = 1073741824, eNetworkAccessParameters_t120SetupProcedure = 536870912, } options; NetworkAccessParameters_networkAddress networkAddress; }; typedef struct NetworkAccessParameters NetworkAccessParameters; struct OpenLogicalChannel { enum { eOpenLogicalChannel_reverseLogicalChannelParameters = -2147483648, eOpenLogicalChannel_separateStack = 1073741824, eOpenLogicalChannel_encryptionSync = 536870912, } options; OpenLogicalChannel_forwardLogicalChannelParameters forwardLogicalChannelParameters; OpenLogicalChannel_reverseLogicalChannelParameters reverseLogicalChannelParameters; NetworkAccessParameters separateStack; }; typedef struct OpenLogicalChannel OpenLogicalChannel; struct Setup_UUIE_fastStart { int count; OpenLogicalChannel item[30]; }; typedef struct Setup_UUIE_fastStart Setup_UUIE_fastStart; struct Setup_UUIE { enum { eSetup_UUIE_h245Address = -2147483648, eSetup_UUIE_sourceAddress = 1073741824, eSetup_UUIE_destinationAddress = 536870912, eSetup_UUIE_destCallSignalAddress = 268435456, eSetup_UUIE_destExtraCallInfo = 134217728, eSetup_UUIE_destExtraCRV = 67108864, eSetup_UUIE_callServices = 33554432, eSetup_UUIE_sourceCallSignalAddress = 16777216, eSetup_UUIE_remoteExtensionAddress = 8388608, eSetup_UUIE_callIdentifier = 4194304, eSetup_UUIE_h245SecurityCapability = 2097152, eSetup_UUIE_tokens = 1048576, eSetup_UUIE_cryptoTokens = 524288, eSetup_UUIE_fastStart = 262144, eSetup_UUIE_mediaWaitForConnect = 131072, eSetup_UUIE_canOverlapSend = 65536, eSetup_UUIE_endpointIdentifier = 32768, eSetup_UUIE_multipleCalls = 16384, eSetup_UUIE_maintainConnection = 8192, eSetup_UUIE_connectionParameters = 4096, eSetup_UUIE_language = 2048, eSetup_UUIE_presentationIndicator = 1024, eSetup_UUIE_screeningIndicator = 512, eSetup_UUIE_serviceControl = 256, eSetup_UUIE_symmetricOperationRequired = 128, eSetup_UUIE_capacity = 64, eSetup_UUIE_circuitInfo = 32, eSetup_UUIE_desiredProtocols = 16, eSetup_UUIE_neededFeatures = 8, eSetup_UUIE_desiredFeatures = 4, eSetup_UUIE_supportedFeatures = 2, eSetup_UUIE_parallelH245Control = 1, } options; TransportAddress h245Address; TransportAddress destCallSignalAddress; TransportAddress sourceCallSignalAddress; Setup_UUIE_fastStart fastStart; }; typedef struct Setup_UUIE Setup_UUIE; struct CallProceeding_UUIE_fastStart { int count; OpenLogicalChannel item[30]; }; typedef struct CallProceeding_UUIE_fastStart CallProceeding_UUIE_fastStart; struct CallProceeding_UUIE { enum { eCallProceeding_UUIE_h245Address = -2147483648, eCallProceeding_UUIE_callIdentifier = 1073741824, eCallProceeding_UUIE_h245SecurityMode = 536870912, eCallProceeding_UUIE_tokens = 268435456, eCallProceeding_UUIE_cryptoTokens = 134217728, eCallProceeding_UUIE_fastStart = 67108864, eCallProceeding_UUIE_multipleCalls = 33554432, eCallProceeding_UUIE_maintainConnection = 16777216, eCallProceeding_UUIE_fastConnectRefused = 8388608, eCallProceeding_UUIE_featureSet = 4194304, } options; TransportAddress h245Address; CallProceeding_UUIE_fastStart fastStart; }; typedef struct CallProceeding_UUIE CallProceeding_UUIE; struct Connect_UUIE_fastStart { int count; OpenLogicalChannel item[30]; }; typedef struct Connect_UUIE_fastStart Connect_UUIE_fastStart; struct Connect_UUIE { enum { eConnect_UUIE_h245Address = -2147483648, eConnect_UUIE_callIdentifier = 1073741824, eConnect_UUIE_h245SecurityMode = 536870912, eConnect_UUIE_tokens = 268435456, eConnect_UUIE_cryptoTokens = 134217728, eConnect_UUIE_fastStart = 67108864, eConnect_UUIE_multipleCalls = 33554432, eConnect_UUIE_maintainConnection = 16777216, eConnect_UUIE_language = 8388608, eConnect_UUIE_connectedAddress = 4194304, eConnect_UUIE_presentationIndicator = 2097152, eConnect_UUIE_screeningIndicator = 1048576, eConnect_UUIE_fastConnectRefused = 524288, eConnect_UUIE_serviceControl = 262144, eConnect_UUIE_capacity = 131072, eConnect_UUIE_featureSet = 65536, } options; TransportAddress h245Address; Connect_UUIE_fastStart fastStart; }; typedef struct Connect_UUIE Connect_UUIE; struct Alerting_UUIE_fastStart { int count; OpenLogicalChannel item[30]; }; typedef struct Alerting_UUIE_fastStart Alerting_UUIE_fastStart; struct Alerting_UUIE { enum { eAlerting_UUIE_h245Address = -2147483648, eAlerting_UUIE_callIdentifier = 1073741824, eAlerting_UUIE_h245SecurityMode = 536870912, eAlerting_UUIE_tokens = 268435456, eAlerting_UUIE_cryptoTokens = 134217728, eAlerting_UUIE_fastStart = 67108864, eAlerting_UUIE_multipleCalls = 33554432, eAlerting_UUIE_maintainConnection = 16777216, eAlerting_UUIE_alertingAddress = 8388608, eAlerting_UUIE_presentationIndicator = 4194304, eAlerting_UUIE_screeningIndicator = 2097152, eAlerting_UUIE_fastConnectRefused = 1048576, eAlerting_UUIE_serviceControl = 524288, eAlerting_UUIE_capacity = 262144, eAlerting_UUIE_featureSet = 131072, } options; TransportAddress h245Address; Alerting_UUIE_fastStart fastStart; }; typedef struct Alerting_UUIE Alerting_UUIE; struct FacilityReason { enum { eFacilityReason_routeCallToGatekeeper = 0, eFacilityReason_callForwarded = 1, eFacilityReason_routeCallToMC = 2, eFacilityReason_undefinedReason = 3, eFacilityReason_conferenceListChoice = 4, eFacilityReason_startH245 = 5, eFacilityReason_noH245 = 6, eFacilityReason_newTokens = 7, eFacilityReason_featureSetUpdate = 8, eFacilityReason_forwardedElements = 9, eFacilityReason_transportedInformation = 10, } choice; }; typedef struct FacilityReason FacilityReason; struct Facility_UUIE_fastStart { int count; OpenLogicalChannel item[30]; }; typedef struct Facility_UUIE_fastStart Facility_UUIE_fastStart; struct Facility_UUIE { enum { eFacility_UUIE_alternativeAddress = -2147483648, eFacility_UUIE_alternativeAliasAddress = 1073741824, eFacility_UUIE_conferenceID = 536870912, eFacility_UUIE_callIdentifier = 268435456, eFacility_UUIE_destExtraCallInfo = 134217728, eFacility_UUIE_remoteExtensionAddress = 67108864, eFacility_UUIE_tokens = 33554432, eFacility_UUIE_cryptoTokens = 16777216, eFacility_UUIE_conferences = 8388608, eFacility_UUIE_h245Address = 4194304, eFacility_UUIE_fastStart = 2097152, eFacility_UUIE_multipleCalls = 1048576, eFacility_UUIE_maintainConnection = 524288, eFacility_UUIE_fastConnectRefused = 262144, eFacility_UUIE_serviceControl = 131072, eFacility_UUIE_circuitInfo = 65536, eFacility_UUIE_featureSet = 32768, eFacility_UUIE_destinationInfo = 16384, eFacility_UUIE_h245SecurityMode = 8192, } options; TransportAddress alternativeAddress; FacilityReason reason; TransportAddress h245Address; Facility_UUIE_fastStart fastStart; }; typedef struct Facility_UUIE Facility_UUIE; struct Progress_UUIE_fastStart { int count; OpenLogicalChannel item[30]; }; typedef struct Progress_UUIE_fastStart Progress_UUIE_fastStart; struct Progress_UUIE { enum { eProgress_UUIE_h245Address = -2147483648, eProgress_UUIE_h245SecurityMode = 1073741824, eProgress_UUIE_tokens = 536870912, eProgress_UUIE_cryptoTokens = 268435456, eProgress_UUIE_fastStart = 134217728, eProgress_UUIE_multipleCalls = 67108864, eProgress_UUIE_maintainConnection = 33554432, eProgress_UUIE_fastConnectRefused = 16777216, } options; TransportAddress h245Address; Progress_UUIE_fastStart fastStart; }; typedef struct Progress_UUIE Progress_UUIE; struct H323_UU_PDU_h323_message_body { enum { eH323_UU_PDU_h323_message_body_setup = 0, eH323_UU_PDU_h323_message_body_callProceeding = 1, eH323_UU_PDU_h323_message_body_connect = 2, eH323_UU_PDU_h323_message_body_alerting = 3, eH323_UU_PDU_h323_message_body_information = 4, eH323_UU_PDU_h323_message_body_releaseComplete = 5, eH323_UU_PDU_h323_message_body_facility = 6, eH323_UU_PDU_h323_message_body_progress = 7, eH323_UU_PDU_h323_message_body_empty = 8, eH323_UU_PDU_h323_message_body_status = 9, eH323_UU_PDU_h323_message_body_statusInquiry = 10, eH323_UU_PDU_h323_message_body_setupAcknowledge = 11, eH323_UU_PDU_h323_message_body_notify = 12, } choice; union { Setup_UUIE setup; CallProceeding_UUIE callProceeding; Connect_UUIE connect; Alerting_UUIE alerting; Facility_UUIE facility; Progress_UUIE progress; }; }; typedef struct H323_UU_PDU_h323_message_body H323_UU_PDU_h323_message_body; struct RequestMessage { enum { eRequestMessage_nonStandard = 0, eRequestMessage_masterSlaveDetermination = 1, eRequestMessage_terminalCapabilitySet = 2, eRequestMessage_openLogicalChannel = 3, eRequestMessage_closeLogicalChannel = 4, eRequestMessage_requestChannelClose = 5, eRequestMessage_multiplexEntrySend = 6, eRequestMessage_requestMultiplexEntry = 7, eRequestMessage_requestMode = 8, eRequestMessage_roundTripDelayRequest = 9, eRequestMessage_maintenanceLoopRequest = 10, eRequestMessage_communicationModeRequest = 11, eRequestMessage_conferenceRequest = 12, eRequestMessage_multilinkRequest = 13, eRequestMessage_logicalChannelRateRequest = 14, } choice; union { OpenLogicalChannel openLogicalChannel; }; }; typedef struct RequestMessage RequestMessage; struct OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters { enum { eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters = 0, eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters = 1, } choice; union { H2250LogicalChannelParameters h2250LogicalChannelParameters; }; }; typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters; struct OpenLogicalChannelAck_reverseLogicalChannelParameters { enum { eOpenLogicalChannelAck_reverseLogicalChannelParameters_portNumber = -2147483648, eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters = 1073741824, eOpenLogicalChannelAck_reverseLogicalChannelParameters_replacementFor = 536870912, } options; OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters multiplexParameters; }; typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters OpenLogicalChannelAck_reverseLogicalChannelParameters; struct H2250LogicalChannelAckParameters { enum { eH2250LogicalChannelAckParameters_nonStandard = -2147483648, eH2250LogicalChannelAckParameters_sessionID = 1073741824, eH2250LogicalChannelAckParameters_mediaChannel = 536870912, eH2250LogicalChannelAckParameters_mediaControlChannel = 268435456, eH2250LogicalChannelAckParameters_dynamicRTPPayloadType = 134217728, eH2250LogicalChannelAckParameters_flowControlToZero = 67108864, eH2250LogicalChannelAckParameters_portNumber = 33554432, } options; H245_TransportAddress mediaChannel; H245_TransportAddress mediaControlChannel; }; typedef struct H2250LogicalChannelAckParameters H2250LogicalChannelAckParameters; struct OpenLogicalChannelAck_forwardMultiplexAckParameters { enum { eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters = 0, } choice; union { H2250LogicalChannelAckParameters h2250LogicalChannelAckParameters; }; }; typedef struct OpenLogicalChannelAck_forwardMultiplexAckParameters OpenLogicalChannelAck_forwardMultiplexAckParameters; struct OpenLogicalChannelAck { enum { eOpenLogicalChannelAck_reverseLogicalChannelParameters = -2147483648, eOpenLogicalChannelAck_separateStack = 1073741824, eOpenLogicalChannelAck_forwardMultiplexAckParameters = 536870912, eOpenLogicalChannelAck_encryptionSync = 268435456, } options; OpenLogicalChannelAck_reverseLogicalChannelParameters reverseLogicalChannelParameters; NetworkAccessParameters separateStack; OpenLogicalChannelAck_forwardMultiplexAckParameters forwardMultiplexAckParameters; }; typedef struct OpenLogicalChannelAck OpenLogicalChannelAck; struct ResponseMessage { enum { eResponseMessage_nonStandard = 0, eResponseMessage_masterSlaveDeterminationAck = 1, eResponseMessage_masterSlaveDeterminationReject = 2, eResponseMessage_terminalCapabilitySetAck = 3, eResponseMessage_terminalCapabilitySetReject = 4, eResponseMessage_openLogicalChannelAck = 5, eResponseMessage_openLogicalChannelReject = 6, eResponseMessage_closeLogicalChannelAck = 7, eResponseMessage_requestChannelCloseAck = 8, eResponseMessage_requestChannelCloseReject = 9, eResponseMessage_multiplexEntrySendAck = 10, eResponseMessage_multiplexEntrySendReject = 11, eResponseMessage_requestMultiplexEntryAck = 12, eResponseMessage_requestMultiplexEntryReject = 13, eResponseMessage_requestModeAck = 14, eResponseMessage_requestModeReject = 15, eResponseMessage_roundTripDelayResponse = 16, eResponseMessage_maintenanceLoopAck = 17, eResponseMessage_maintenanceLoopReject = 18, eResponseMessage_communicationModeResponse = 19, eResponseMessage_conferenceResponse = 20, eResponseMessage_multilinkResponse = 21, eResponseMessage_logicalChannelRateAcknowledge = 22, eResponseMessage_logicalChannelRateReject = 23, } choice; union { OpenLogicalChannelAck openLogicalChannelAck; }; }; typedef struct ResponseMessage ResponseMessage; struct MultimediaSystemControlMessage { enum { eMultimediaSystemControlMessage_request = 0, eMultimediaSystemControlMessage_response = 1, eMultimediaSystemControlMessage_command = 2, eMultimediaSystemControlMessage_indication = 3, } choice; union { RequestMessage request; ResponseMessage response; }; }; typedef struct MultimediaSystemControlMessage MultimediaSystemControlMessage; struct H323_UU_PDU_h245Control { int count; MultimediaSystemControlMessage item[4]; }; typedef struct H323_UU_PDU_h245Control H323_UU_PDU_h245Control; struct H323_UU_PDU { enum { eH323_UU_PDU_nonStandardData = -2147483648, eH323_UU_PDU_h4501SupplementaryService = 1073741824, eH323_UU_PDU_h245Tunneling = 536870912, eH323_UU_PDU_h245Control = 268435456, eH323_UU_PDU_nonStandardControl = 134217728, eH323_UU_PDU_callLinkage = 67108864, eH323_UU_PDU_tunnelledSignallingMessage = 33554432, eH323_UU_PDU_provisionalRespToH245Tunneling = 16777216, eH323_UU_PDU_stimulusControl = 8388608, eH323_UU_PDU_genericData = 4194304, } options; H323_UU_PDU_h323_message_body h323_message_body; H323_UU_PDU_h245Control h245Control; }; typedef struct H323_UU_PDU H323_UU_PDU; struct H323_UserInformation { enum { eH323_UserInformation_user_data = -2147483648, } options; H323_UU_PDU h323_uu_pdu; }; typedef struct H323_UserInformation H323_UserInformation; typedef struct { enum { Q931_NationalEscape = 0, Q931_Alerting = 1, Q931_CallProceeding = 2, Q931_Connect = 7, Q931_ConnectAck = 15, Q931_Progress = 3, Q931_Setup = 5, Q931_SetupAck = 13, Q931_Resume = 38, Q931_ResumeAck = 46, Q931_ResumeReject = 34, Q931_Suspend = 37, Q931_SuspendAck = 45, Q931_SuspendReject = 33, Q931_UserInformation = 32, Q931_Disconnect = 69, Q931_Release = 77, Q931_ReleaseComplete = 90, Q931_Restart = 70, Q931_RestartAck = 78, Q931_Segment = 96, Q931_CongestionCtrl = 121, Q931_Information = 123, Q931_Notify = 110, Q931_Status = 125, Q931_StatusEnquiry = 117, Q931_Facility = 98, } MessageType; H323_UserInformation UUIE; } Q931; struct nf_ct_h323_master { __be16 sig_port[2]; __be16 rtp_port[8]; union { u_int32_t timeout; u_int16_t tpkt_len[2]; }; }; struct field_t { unsigned char type; unsigned char sz; unsigned char lb; unsigned char ub; unsigned short attr; unsigned short offset; const struct field_t *fields; }; struct bitstr; typedef int (*decoder_t)(struct bitstr *, const struct field_t *, char *, int); struct bitstr { unsigned char *buf; unsigned char *beg; unsigned char *end; unsigned char *cur; unsigned int bit; }; struct PptpControlHeader; union pptp_ctrl_union; struct nf_nat_pptp_hook { int (*outbound)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, unsigned int, struct PptpControlHeader *, union pptp_ctrl_union *); int (*inbound)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, unsigned int, struct PptpControlHeader *, union pptp_ctrl_union *); void (*exp_gre)(struct nf_conntrack_expect *, struct nf_conntrack_expect *); void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); }; struct PptpControlHeader { __be16 messageType; __u16 reserved; }; struct PptpStartSessionRequest { __be16 protocolVersion; __u16 reserved1; __be32 framingCapability; __be32 bearerCapability; __be16 maxChannels; __be16 firmwareRevision; __u8 hostName[64]; __u8 vendorString[64]; }; struct PptpStartSessionReply { __be16 protocolVersion; __u8 resultCode; __u8 generalErrorCode; __be32 framingCapability; __be32 bearerCapability; __be16 maxChannels; __be16 firmwareRevision; __u8 hostName[64]; __u8 vendorString[64]; }; struct PptpStopSessionRequest { __u8 reason; __u8 reserved1; __u16 reserved2; }; struct PptpStopSessionReply { __u8 resultCode; __u8 generalErrorCode; __u16 reserved1; }; struct PptpOutCallRequest { __be16 callID; __be16 callSerialNumber; __be32 minBPS; __be32 maxBPS; __be32 bearerType; __be32 framingType; __be16 packetWindow; __be16 packetProcDelay; __be16 phoneNumberLength; __u16 reserved1; __u8 phoneNumber[64]; __u8 subAddress[64]; }; struct PptpOutCallReply { __be16 callID; __be16 peersCallID; __u8 resultCode; __u8 generalErrorCode; __be16 causeCode; __be32 connectSpeed; __be16 packetWindow; __be16 packetProcDelay; __be32 physChannelID; }; struct PptpInCallRequest { __be16 callID; __be16 callSerialNumber; __be32 callBearerType; __be32 physChannelID; __be16 dialedNumberLength; __be16 dialingNumberLength; __u8 dialedNumber[64]; __u8 dialingNumber[64]; __u8 subAddress[64]; }; struct PptpInCallReply { __be16 callID; __be16 peersCallID; __u8 resultCode; __u8 generalErrorCode; __be16 packetWindow; __be16 packetProcDelay; __u16 reserved; }; struct PptpInCallConnected { __be16 peersCallID; __u16 reserved; __be32 connectSpeed; __be16 packetWindow; __be16 packetProcDelay; __be32 callFramingType; }; struct PptpClearCallRequest { __be16 callID; __u16 reserved; }; struct PptpCallDisconnectNotify { __be16 callID; __u8 resultCode; __u8 generalErrorCode; __be16 causeCode; __u16 reserved; __u8 callStatistics[128]; }; struct PptpWanErrorNotify { __be16 peersCallID; __u16 reserved; __be32 crcErrors; __be32 framingErrors; __be32 hardwareOverRuns; __be32 bufferOverRuns; __be32 timeoutErrors; __be32 alignmentErrors; }; struct PptpSetLinkInfo { __be16 peersCallID; __u16 reserved; __be32 sendAccm; __be32 recvAccm; }; union pptp_ctrl_union { struct PptpStartSessionRequest sreq; struct PptpStartSessionReply srep; struct PptpStopSessionRequest streq; struct PptpStopSessionReply strep; struct PptpOutCallRequest ocreq; struct PptpOutCallReply ocack; struct PptpInCallRequest icreq; struct PptpInCallReply icack; struct PptpInCallConnected iccon; struct PptpClearCallRequest clrreq; struct PptpCallDisconnectNotify disc; struct PptpWanErrorNotify wanerr; struct PptpSetLinkInfo setlink; }; struct pptp_pkt_hdr { __u16 packetLength; __be16 packetType; __be32 magicCookie; }; enum sane_state { SANE_STATE_NORMAL = 0, SANE_STATE_START_REQUESTED = 1, }; struct sane_request { __be32 RPC_code; __be32 handle; }; struct sane_reply_net_start { __be32 status; __be16 zero; __be16 port; }; struct nf_ct_sane_master { enum sane_state state; }; struct tftphdr { __be16 opcode; }; enum ctattr_nat { CTA_NAT_UNSPEC = 0, CTA_NAT_V4_MINIP = 1, CTA_NAT_V4_MAXIP = 2, CTA_NAT_PROTO = 3, CTA_NAT_V6_MINIP = 4, CTA_NAT_V6_MAXIP = 5, __CTA_NAT_MAX = 6, }; enum ctattr_protonat { CTA_PROTONAT_UNSPEC = 0, CTA_PROTONAT_PORT_MIN = 1, CTA_PROTONAT_PORT_MAX = 2, __CTA_PROTONAT_MAX = 3, }; struct nf_nat_pptp { __be16 pns_call_id; __be16 pac_call_id; }; union nf_conntrack_nat_help { struct nf_nat_pptp nat_pptp_info; }; struct nf_conn_nat { union nf_conntrack_nat_help help; int masq_index; }; struct nf_nat_range2 { unsigned int flags; union nf_inet_addr min_addr; union nf_inet_addr max_addr; union nf_conntrack_man_proto min_proto; union nf_conntrack_man_proto max_proto; union nf_conntrack_man_proto base_proto; }; struct nf_nat_proto_clean { u8 l3proto; u8 l4proto; }; struct nf_nat_lookup_hook_priv { struct nf_hook_entries __attribute__((btf_type_tag("rcu"))) *entries; struct callback_head callback_head; }; struct nf_nat_hooks_net { struct nf_hook_ops *nat_hook_ops; unsigned int users; }; struct nat_net { struct nf_nat_hooks_net nat_proto_net[11]; }; struct inet6_ifaddr { struct in6_addr addr; __u32 prefix_len; __u32 rt_priority; __u32 valid_lft; __u32 prefered_lft; refcount_t refcnt; spinlock_t lock; int state; __u32 flags; __u8 dad_probes; __u8 stable_privacy_retry; __u16 scope; __u64 dad_nonce; unsigned long cstamp; unsigned long tstamp; struct delayed_work dad_work; struct inet6_dev *idev; struct fib6_info *rt; struct hlist_node addr_lst; struct list_head if_list; struct list_head if_list_aux; struct list_head tmp_list; struct inet6_ifaddr *ifpub; int regen_count; bool tokenized; u8 ifa_proto; struct callback_head rcu; struct in6_addr peer_addr; }; struct masq_dev_work { struct work_struct work; struct net *net; netns_tracker ns_tracker; union nf_inet_addr addr; int ifindex; int (*iter)(struct nf_conn *, void *); }; struct nf_conncount_tuple { struct list_head node; struct nf_conntrack_tuple tuple; struct nf_conntrack_zone zone; int cpu; u32 jiffies32; }; struct nf_conncount_list { spinlock_t list_lock; u32 last_gc; struct list_head head; unsigned int count; }; struct nf_conncount_rb { struct rb_node node; struct nf_conncount_list list; u32 key[5]; struct callback_head callback_head; }; struct nf_conncount_data { unsigned int keylen; struct rb_root root[256]; struct net *net; struct work_struct gc_work; unsigned long pending_trees[4]; unsigned int gc_tree; }; struct xt_af { struct mutex mutex; struct list_head match; struct list_head target; }; enum { MTTG_TRAV_INIT = 0, MTTG_TRAV_NFP_UNSPEC = 1, MTTG_TRAV_NFP_SPEC = 2, MTTG_TRAV_DONE = 3, }; struct xt_action_param; struct xt_mtchk_param; struct xt_mtdtor_param; struct xt_match { struct list_head list; const char name[29]; u_int8_t revision; bool (*match)(const struct sk_buff *, struct xt_action_param *); int (*checkentry)(const struct xt_mtchk_param *); void (*destroy)(const struct xt_mtdtor_param *); struct module *me; const char *table; unsigned int matchsize; unsigned int usersize; unsigned int hooks; unsigned short proto; unsigned short family; }; struct xt_target; struct xt_action_param { union { const struct xt_match *match; const struct xt_target *target; }; union { const void *matchinfo; const void *targinfo; }; const struct nf_hook_state *state; unsigned int thoff; u16 fragoff; bool hotdrop; }; struct xt_tgchk_param; struct xt_tgdtor_param; struct xt_target { struct list_head list; const char name[29]; u_int8_t revision; unsigned int (*target)(struct sk_buff *, const struct xt_action_param *); int (*checkentry)(const struct xt_tgchk_param *); void (*destroy)(const struct xt_tgdtor_param *); struct module *me; const char *table; unsigned int targetsize; unsigned int usersize; unsigned int hooks; unsigned short proto; unsigned short family; }; struct xt_tgchk_param { struct net *net; const char *table; const void *entryinfo; const struct xt_target *target; void *targinfo; unsigned int hook_mask; u_int8_t family; bool nft_compat; }; struct xt_tgdtor_param { struct net *net; const struct xt_target *target; void *targinfo; u_int8_t family; }; struct xt_mtchk_param { struct net *net; const char *table; const void *entryinfo; const struct xt_match *match; void *matchinfo; unsigned int hook_mask; u_int8_t family; bool nft_compat; }; struct xt_mtdtor_param { struct net *net; const struct xt_match *match; void *matchinfo; u_int8_t family; }; struct xt_table_info; struct xt_table { struct list_head list; unsigned int valid_hooks; struct xt_table_info *private; struct nf_hook_ops *ops; struct module *me; u_int8_t af; int priority; const char name[32]; }; struct xt_table_info { unsigned int size; unsigned int number; unsigned int initial_entries; unsigned int hook_entry[5]; unsigned int underflow[5]; unsigned int stacksize; void ***jumpstack; unsigned char entries[0]; }; struct xt_template { struct list_head list; int (*table_init)(struct net *); struct module *me; char name[32]; }; struct xt_entry_match { union { struct { __u16 match_size; char name[29]; __u8 revision; } user; struct { __u16 match_size; struct xt_match *match; } kernel; __u16 match_size; } u; unsigned char data[0]; }; struct nf_mttg_trav { struct list_head *head; struct list_head *curr; uint8_t class; }; struct xt_counters { __u64 pcnt; __u64 bcnt; }; struct xt_entry_target { union { struct { __u16 target_size; char name[29]; __u8 revision; } user; struct { __u16 target_size; struct xt_target *target; } kernel; __u16 target_size; } u; unsigned char data[0]; }; struct xt_error_target { struct xt_entry_target target; char errorname[30]; }; struct xt_standard_target { struct xt_entry_target target; int verdict; }; struct xt_counters_info { char name[32]; unsigned int num_counters; struct xt_counters counters[0]; }; struct xt_pernet { struct list_head tables[11]; }; struct xt_percpu_counter_alloc_state { unsigned int off; const char __attribute__((btf_type_tag("percpu"))) *mem; }; struct xt_tcp { __u16 spts[2]; __u16 dpts[2]; __u8 option; __u8 flg_mask; __u8 flg_cmp; __u8 invflags; }; struct xt_udp { __u16 spts[2]; __u16 dpts[2]; __u8 invflags; }; struct ipt_icmp { __u8 type; __u8 code[2]; __u8 invflags; }; struct ip6t_icmp { __u8 type; __u8 code[2]; __u8 invflags; }; struct xt_mark_mtinfo1 { __u32 mark; __u32 mask; __u8 invert; }; struct xt_mark_tginfo2 { __u32 mark; __u32 mask; }; enum { XT_CONNMARK_SET = 0, XT_CONNMARK_SAVE = 1, XT_CONNMARK_RESTORE = 2, }; enum { D_SHIFT_LEFT = 0, D_SHIFT_RIGHT = 1, }; struct xt_connmark_mtinfo1 { __u32 mark; __u32 mask; __u8 invert; }; struct xt_connmark_tginfo1 { __u32 ctmark; __u32 ctmask; __u32 nfmask; __u8 mode; }; struct xt_connmark_tginfo2 { __u32 ctmark; __u32 ctmask; __u32 nfmask; __u8 shift_dir; __u8 shift_bits; __u8 mode; }; struct nf_nat_ipv4_range { unsigned int flags; __be32 min_ip; __be32 max_ip; union nf_conntrack_man_proto min; union nf_conntrack_man_proto max; }; struct nf_nat_ipv4_multi_range_compat { unsigned int rangesize; struct nf_nat_ipv4_range range[1]; }; struct nf_nat_range { unsigned int flags; union nf_inet_addr min_addr; union nf_inet_addr max_addr; union nf_conntrack_man_proto min_proto; union nf_conntrack_man_proto max_proto; }; struct xt_classify_target_info { __u32 priority; }; enum { CONNSECMARK_SAVE = 1, CONNSECMARK_RESTORE = 2, }; struct xt_connsecmark_target_info { __u8 mode; }; enum { XT_CT_NOTRACK = 1, XT_CT_NOTRACK_ALIAS = 2, XT_CT_ZONE_DIR_ORIG = 4, XT_CT_ZONE_DIR_REPL = 8, XT_CT_ZONE_MARK = 16, XT_CT_MASK = 31, }; struct xt_ct_target_info_v1 { __u16 flags; __u16 zone; __u32 ct_events; __u32 exp_events; char helper[16]; char timeout[32]; struct nf_conn *ct; }; struct ipt_ip { struct in_addr src; struct in_addr dst; struct in_addr smsk; struct in_addr dmsk; char iniface[16]; char outiface[16]; unsigned char iniface_mask[16]; unsigned char outiface_mask[16]; __u16 proto; __u8 flags; __u8 invflags; }; struct ipt_entry { struct ipt_ip ip; unsigned int nfcache; __u16 target_offset; __u16 next_offset; unsigned int comefrom; struct xt_counters counters; unsigned char elems[0]; }; struct ip6t_ip6 { struct in6_addr src; struct in6_addr dst; struct in6_addr smsk; struct in6_addr dmsk; char iniface[16]; char outiface[16]; unsigned char iniface_mask[16]; unsigned char outiface_mask[16]; __u16 proto; __u8 tos; __u8 flags; __u8 invflags; }; struct ip6t_entry { struct ip6t_ip6 ipv6; unsigned int nfcache; __u16 target_offset; __u16 next_offset; unsigned int comefrom; struct xt_counters counters; unsigned char elems[0]; }; struct xt_ct_target_info { __u16 flags; __u16 zone; __u32 ct_events; __u32 exp_events; char helper[16]; struct nf_conn *ct; }; struct xt_DSCP_info { __u8 dscp; }; struct xt_tos_target_info { __u8 tos_value; __u8 tos_mask; }; struct xt_nflog_info { __u32 len; __u16 group; __u16 threshold; __u16 flags; __u16 pad; char prefix[64]; }; struct xt_NFQ_info_v1 { __u16 queuenum; __u16 queues_total; }; struct xt_NFQ_info { __u16 queuenum; }; struct xt_NFQ_info_v3 { __u16 queuenum; __u16 queues_total; __u16 flags; }; struct xt_NFQ_info_v2 { __u16 queuenum; __u16 queues_total; __u16 bypass; }; struct xt_secmark_target_info_v1 { __u8 mode; char secctx[256]; __u32 secid; }; struct xt_secmark_target_info { __u8 mode; __u32 secid; char secctx[256]; }; enum nf_tproxy_lookup_t { NF_TPROXY_LOOKUP_LISTENER = 0, NF_TPROXY_LOOKUP_ESTABLISHED = 1, }; struct xt_tproxy_target_info { __u32 mark_mask; __u32 mark_value; __be32 laddr; __be16 lport; }; struct xt_tproxy_target_info_v1 { __u32 mark_mask; __u32 mark_value; union nf_inet_addr laddr; __be16 lport; }; struct xt_tcpmss_info { __u16 mss; }; struct xt_tee_tginfo; struct xt_tee_priv { struct list_head list; struct xt_tee_tginfo *tginfo; int oif; }; struct xt_tee_tginfo { union nf_inet_addr gw; char oif[16]; struct xt_tee_priv *priv; }; struct tee_net { struct list_head priv_list; struct mutex lock; }; struct idletimer_tg { struct list_head entry; struct alarm alarm; struct timer_list timer; struct work_struct work; struct kobject *kobj; struct device_attribute attr; struct timespec64 delayed_timer_trigger; struct timespec64 last_modified_timer; struct timespec64 last_suspend_time; struct notifier_block pm_nb; int timeout; unsigned int refcnt; u8 timer_type; bool work_pending; bool send_nl_msg; bool active; uid_t uid; bool suspend_time_valid; }; struct idletimer_tg_info { __u32 timeout; char label[28]; struct idletimer_tg *timer; }; struct idletimer_tg_info_v1 { __u32 timeout; char label[28]; __u8 send_nl_msg; __u8 timer_type; struct idletimer_tg *timer; }; enum xt_bpf_modes { XT_BPF_MODE_BYTECODE = 0, XT_BPF_MODE_FD_PINNED = 1, XT_BPF_MODE_FD_ELF = 2, }; struct xt_bpf_info { __u16 bpf_program_num_elem; struct sock_filter bpf_program[64]; struct bpf_prog *filter; }; struct xt_bpf_info_v1 { __u16 mode; __u16 bpf_program_num_elem; __s32 fd; union { struct sock_filter bpf_program[64]; char path[512]; }; struct bpf_prog *filter; }; enum xt_connbytes_what { XT_CONNBYTES_PKTS = 0, XT_CONNBYTES_BYTES = 1, XT_CONNBYTES_AVGPKT = 2, }; enum xt_connbytes_direction { XT_CONNBYTES_DIR_ORIGINAL = 0, XT_CONNBYTES_DIR_REPLY = 1, XT_CONNBYTES_DIR_BOTH = 2, }; struct xt_connbytes_info { struct { __u64 from; __u64 to; } count; __u8 what; __u8 direction; }; enum { XT_CONNLIMIT_INVERT = 1, XT_CONNLIMIT_DADDR = 2, }; struct xt_connlimit_info { union { union nf_inet_addr mask; }; unsigned int limit; __u32 flags; struct nf_conncount_data *data; }; enum { XT_CONNTRACK_STATE = 1, XT_CONNTRACK_PROTO = 2, XT_CONNTRACK_ORIGSRC = 4, XT_CONNTRACK_ORIGDST = 8, XT_CONNTRACK_REPLSRC = 16, XT_CONNTRACK_REPLDST = 32, XT_CONNTRACK_STATUS = 64, XT_CONNTRACK_EXPIRES = 128, XT_CONNTRACK_ORIGSRC_PORT = 256, XT_CONNTRACK_ORIGDST_PORT = 512, XT_CONNTRACK_REPLSRC_PORT = 1024, XT_CONNTRACK_REPLDST_PORT = 2048, XT_CONNTRACK_DIRECTION = 4096, XT_CONNTRACK_STATE_ALIAS = 8192, }; struct xt_conntrack_mtinfo2 { union nf_inet_addr origsrc_addr; union nf_inet_addr origsrc_mask; union nf_inet_addr origdst_addr; union nf_inet_addr origdst_mask; union nf_inet_addr replsrc_addr; union nf_inet_addr replsrc_mask; union nf_inet_addr repldst_addr; union nf_inet_addr repldst_mask; __u32 expires_min; __u32 expires_max; __u16 l4proto; __be16 origsrc_port; __be16 origdst_port; __be16 replsrc_port; __be16 repldst_port; __u16 match_flags; __u16 invert_flags; __u16 state_mask; __u16 status_mask; }; struct xt_conntrack_mtinfo3 { union nf_inet_addr origsrc_addr; union nf_inet_addr origsrc_mask; union nf_inet_addr origdst_addr; union nf_inet_addr origdst_mask; union nf_inet_addr replsrc_addr; union nf_inet_addr replsrc_mask; union nf_inet_addr repldst_addr; union nf_inet_addr repldst_mask; __u32 expires_min; __u32 expires_max; __u16 l4proto; __u16 origsrc_port; __u16 origdst_port; __u16 replsrc_port; __u16 repldst_port; __u16 match_flags; __u16 invert_flags; __u16 state_mask; __u16 status_mask; __u16 origsrc_port_high; __u16 origdst_port_high; __u16 replsrc_port_high; __u16 repldst_port_high; }; struct xt_conntrack_mtinfo1 { union nf_inet_addr origsrc_addr; union nf_inet_addr origsrc_mask; union nf_inet_addr origdst_addr; union nf_inet_addr origdst_mask; union nf_inet_addr replsrc_addr; union nf_inet_addr replsrc_mask; union nf_inet_addr repldst_addr; union nf_inet_addr repldst_mask; __u32 expires_min; __u32 expires_max; __u16 l4proto; __be16 origsrc_port; __be16 origdst_port; __be16 replsrc_port; __be16 repldst_port; __u16 match_flags; __u16 invert_flags; __u8 state_mask; __u8 status_mask; }; struct xt_dscp_info { __u8 dscp; __u8 invert; }; struct xt_tos_match_info { __u8 tos_mask; __u8 tos_value; __u8 invert; }; struct xt_ecn_info { __u8 operation; __u8 invert; __u8 ip_ect; union { struct { __u8 ect; } tcp; } proto; }; struct xt_esp { __u32 spis[2]; __u8 invflags; }; enum { XT_HASHLIMIT_HASH_DIP = 1, XT_HASHLIMIT_HASH_DPT = 2, XT_HASHLIMIT_HASH_SIP = 4, XT_HASHLIMIT_HASH_SPT = 8, XT_HASHLIMIT_INVERT = 16, XT_HASHLIMIT_BYTES = 32, XT_HASHLIMIT_RATE_MATCH = 64, }; struct hashlimit_cfg1 { __u32 mode; __u32 avg; __u32 burst; __u32 size; __u32 max; __u32 gc_interval; __u32 expire; __u8 srcmask; __u8 dstmask; }; struct hashlimit_cfg2 { __u64 avg; __u64 burst; __u32 mode; __u32 size; __u32 max; __u32 gc_interval; __u32 expire; __u8 srcmask; __u8 dstmask; }; struct dsthash_dst { union { struct { __be32 src; __be32 dst; } ip; struct { __be32 src[4]; __be32 dst[4]; } ip6; }; __be16 src_port; __be16 dst_port; }; struct dsthash_ent { struct hlist_node node; struct dsthash_dst dst; spinlock_t lock; unsigned long expires; struct { unsigned long prev; union { struct { u_int64_t credit; u_int64_t credit_cap; u_int64_t cost; }; struct { u_int32_t interval; u_int32_t prev_window; u_int64_t current_rate; u_int64_t rate; int64_t burst; }; }; } rateinfo; struct callback_head rcu; }; struct hashlimit_cfg3 { __u64 avg; __u64 burst; __u32 mode; __u32 size; __u32 max; __u32 gc_interval; __u32 expire; __u32 interval; __u8 srcmask; __u8 dstmask; }; struct xt_hashlimit_htable { struct hlist_node node; refcount_t use; u_int8_t family; bool rnd_initialized; struct hashlimit_cfg3 cfg; spinlock_t lock; u_int32_t rnd; unsigned int count; struct delayed_work gc_work; struct proc_dir_entry *pde; const char *name; struct net *net; struct hlist_head hash[0]; }; struct hashlimit_net { struct hlist_head htables; struct proc_dir_entry *ipt_hashlimit; struct proc_dir_entry *ip6t_hashlimit; }; struct xt_hashlimit_mtinfo1 { char name[16]; struct hashlimit_cfg1 cfg; struct xt_hashlimit_htable *hinfo; }; struct xt_hashlimit_mtinfo2 { char name[255]; struct hashlimit_cfg2 cfg; struct xt_hashlimit_htable *hinfo; }; struct xt_hashlimit_mtinfo3 { char name[255]; struct hashlimit_cfg3 cfg; struct xt_hashlimit_htable *hinfo; }; struct xt_helper_info { int invert; char name[30]; }; enum { IPT_TTL_EQ = 0, IPT_TTL_NE = 1, IPT_TTL_LT = 2, IPT_TTL_GT = 3, }; enum { IP6T_HL_EQ = 0, IP6T_HL_NE = 1, IP6T_HL_LT = 2, IP6T_HL_GT = 3, }; struct ipt_ttl_info { __u8 mode; __u8 ttl; }; struct ip6t_hl_info { __u8 mode; __u8 hop_limit; }; enum { IPRANGE_SRC = 1, IPRANGE_DST = 2, IPRANGE_SRC_INV = 16, IPRANGE_DST_INV = 32, }; struct xt_iprange_mtinfo { union nf_inet_addr src_min; union nf_inet_addr src_max; union nf_inet_addr dst_min; union nf_inet_addr dst_max; __u8 flags; }; enum xt_l2tp_type { XT_L2TP_TYPE_CONTROL = 0, XT_L2TP_TYPE_DATA = 1, }; enum { XT_L2TP_TID = 1, XT_L2TP_SID = 2, XT_L2TP_VERSION = 4, XT_L2TP_TYPE = 8, }; struct xt_l2tp_info { __u32 tid; __u32 sid; __u8 version; __u8 type; __u8 flags; }; struct l2tp_data { u32 tid; u32 sid; u8 type; u8 version; }; union l2tp_val { __be16 val16[2]; __be32 val32; }; struct xt_length_info { __u16 min; __u16 max; __u8 invert; }; struct xt_limit_priv; struct xt_rateinfo { __u32 avg; __u32 burst; unsigned long prev; __u32 credit; __u32 credit_cap; __u32 cost; struct xt_limit_priv *master; }; struct xt_limit_priv { unsigned long prev; u32 credit; }; struct xt_mac_info { unsigned char srcaddr[6]; int invert; }; enum xt_multiport_flags { XT_MULTIPORT_SOURCE = 0, XT_MULTIPORT_DESTINATION = 1, XT_MULTIPORT_EITHER = 2, }; struct xt_multiport_v1 { __u8 flags; __u8 count; __u16 ports[15]; __u8 pflags[15]; __u8 invert; }; enum { XT_OWNER_UID = 1, XT_OWNER_GID = 2, XT_OWNER_SOCKET = 4, XT_OWNER_SUPPL_GROUPS = 8, }; struct xt_owner_match_info { __u32 uid_min; __u32 uid_max; __u32 gid_min; __u32 gid_max; __u8 match; __u8 invert; }; struct xt_pkttype_info { int pkttype; int invert; }; enum xt_policy_flags { XT_POLICY_MATCH_IN = 1, XT_POLICY_MATCH_OUT = 2, XT_POLICY_MATCH_NONE = 4, XT_POLICY_MATCH_STRICT = 8, }; struct xt_policy_spec { __u8 saddr: 1; __u8 daddr: 1; __u8 proto: 1; __u8 mode: 1; __u8 spi: 1; __u8 reqid: 1; }; struct xt_policy_elem { union { struct { union nf_inet_addr saddr; union nf_inet_addr smask; union nf_inet_addr daddr; union nf_inet_addr dmask; }; }; __be32 spi; __u32 reqid; __u8 proto; __u8 mode; struct xt_policy_spec match; struct xt_policy_spec invert; }; struct xt_policy_info { struct xt_policy_elem pol[4]; __u16 flags; __u16 len; }; enum xt_quota_flags { XT_QUOTA_INVERT = 1, }; struct xt_quota_priv { spinlock_t lock; uint64_t quota; }; struct xt_quota_info { __u32 flags; __u32 pad; __u64 quota; struct xt_quota_priv *master; }; enum xt_quota_flags___2 { XT_QUOTA_INVERT___2 = 1, XT_QUOTA_GROW = 2, XT_QUOTA_PACKET = 4, XT_QUOTA_NO_CHANGE = 8, XT_QUOTA_MASK = 15, }; struct xt_quota_counter { u_int64_t quota; spinlock_t lock; struct list_head list; atomic_t ref; char name[15]; struct proc_dir_entry *procfs_entry; }; struct ulog_packet_msg { unsigned long mark; long timestamp_sec; long timestamp_usec; unsigned int hook; char indev_name[16]; char outdev_name[16]; size_t data_len; char prefix[32]; unsigned char mac_len; unsigned char mac[80]; unsigned char payload[0]; }; typedef struct ulog_packet_msg ulog_packet_msg_t; struct xt_quota_mtinfo2 { char name[15]; u_int8_t flags; __u64 quota; struct xt_quota_counter *master; }; struct xt_socket_mtinfo1 { __u8 flags; }; enum { XT_SOCKET_TRANSPARENT = 1, XT_SOCKET_NOWILDCARD = 2, XT_SOCKET_RESTORESKMARK = 4, }; struct xt_socket_mtinfo2 { __u8 flags; }; struct xt_socket_mtinfo3 { __u8 flags; }; struct xt_state_info { unsigned int statemask; }; enum xt_statistic_flags { XT_STATISTIC_INVERT = 1, }; enum xt_statistic_mode { XT_STATISTIC_MODE_RANDOM = 0, XT_STATISTIC_MODE_NTH = 1, __XT_STATISTIC_MODE_MAX = 2, }; struct xt_statistic_priv; struct xt_statistic_info { __u16 mode; __u16 flags; union { struct { __u32 probability; } random; struct { __u32 every; __u32 packet; __u32 count; } nth; } u; struct xt_statistic_priv *master; }; struct xt_statistic_priv { atomic_t count; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; enum { XT_STRING_FLAG_INVERT = 1, XT_STRING_FLAG_IGNORECASE = 2, }; struct xt_string_info { __u16 from_offset; __u16 to_offset; char algo[16]; char pattern[128]; __u8 patlen; union { struct { __u8 invert; } v0; struct { __u8 flags; } v1; } u; struct ts_config *config; }; enum { XT_TIME_LOCAL_TZ = 1, XT_TIME_CONTIGUOUS = 2, XT_TIME_ALL_MONTHDAYS = 4294967294, XT_TIME_ALL_WEEKDAYS = 254, XT_TIME_MIN_DAYTIME = 0, XT_TIME_MAX_DAYTIME = 86399, }; enum { DSE_FIRST = 2039, SECONDS_PER_DAY = 86400, }; struct xtm { u_int8_t month; u_int8_t monthday; u_int8_t weekday; u_int8_t hour; u_int8_t minute; u_int8_t second; unsigned int dse; }; struct xt_time_info { __u32 date_start; __u32 date_stop; __u32 daytime_start; __u32 daytime_stop; __u32 monthdays_match; __u8 weekdays_match; __u8 flags; }; enum xt_u32_ops { XT_U32_AND = 0, XT_U32_LEFTSH = 1, XT_U32_RIGHTSH = 2, XT_U32_AT = 3, }; struct xt_u32_location_element { __u32 number; __u8 nextop; }; struct xt_u32_value_element { __u32 min; __u32 max; }; struct xt_u32_test { struct xt_u32_location_element location[11]; struct xt_u32_value_element value[11]; __u8 nnums; __u8 nvalues; }; struct xt_u32 { struct xt_u32_test tests[11]; __u8 ntests; __u8 invert; }; struct uncached_list { spinlock_t lock; struct list_head head; struct list_head quarantine; }; struct ip_sf_list { struct ip_sf_list *sf_next; unsigned long sf_count[2]; __be32 sf_inaddr; unsigned char sf_gsresp; unsigned char sf_oldin; unsigned char sf_crcount; }; struct rt_cache_stat { unsigned int in_slow_tot; unsigned int in_slow_mc; unsigned int in_no_route; unsigned int in_brd; unsigned int in_martian_dst; unsigned int in_martian_src; unsigned int out_slow_tot; unsigned int out_slow_mc; }; struct ip_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct callback_head rcu; __be32 sl_addr[0]; }; typedef u8 dscp_t; struct fib_alias { struct hlist_node fa_list; struct fib_info *fa_info; dscp_t fa_dscp; u8 fa_type; u8 fa_state; u8 fa_slen; u32 tb_id; s16 fa_default; u8 offload; u8 trap; u8 offload_failed; struct callback_head rcu; }; struct ipv4_addr_key { __be32 addr; int vif; }; struct inetpeer_addr { union { struct ipv4_addr_key a4; struct in6_addr a6; u32 key[4]; }; __u16 family; }; struct inet_peer { struct rb_node rb_node; struct inetpeer_addr daddr; u32 metrics[17]; u32 rate_tokens; u32 n_redirects; unsigned long rate_last; union { struct { atomic_t rid; }; struct callback_head rcu; }; __u32 dtime; refcount_t refcnt; }; struct rtmsg { unsigned char rtm_family; unsigned char rtm_dst_len; unsigned char rtm_src_len; unsigned char rtm_tos; unsigned char rtm_table; unsigned char rtm_protocol; unsigned char rtm_scope; unsigned char rtm_type; unsigned int rtm_flags; }; struct fib_rt_info { struct fib_info *fi; u32 tb_id; __be32 dst; int dst_len; dscp_t dscp; u8 type; u8 offload: 1; u8 trap: 1; u8 offload_failed: 1; u8 unused: 5; }; struct rtvia { __kernel_sa_family_t rtvia_family; __u8 rtvia_addr[0]; }; struct net_offload { struct offload_callbacks callbacks; unsigned int flags; }; struct raw_hashinfo { spinlock_t lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct hlist_head ht[256]; }; enum ip_defrag_users { IP_DEFRAG_LOCAL_DELIVER = 0, IP_DEFRAG_CALL_RA_CHAIN = 1, IP_DEFRAG_CONNTRACK_IN = 2, __IP_DEFRAG_CONNTRACK_IN_END = 65537, IP_DEFRAG_CONNTRACK_OUT = 65538, __IP_DEFRAG_CONNTRACK_OUT_END = 131073, IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074, __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609, IP_DEFRAG_VS_IN = 196610, IP_DEFRAG_VS_OUT = 196611, IP_DEFRAG_VS_FWD = 196612, IP_DEFRAG_AF_PACKET = 196613, IP_DEFRAG_MACVLAN = 196614, }; enum { XFRM_DEV_OFFLOAD_UNSPECIFIED = 0, XFRM_DEV_OFFLOAD_CRYPTO = 1, XFRM_DEV_OFFLOAD_PACKET = 2, }; enum { INET_FRAG_FIRST_IN = 1, INET_FRAG_LAST_IN = 2, INET_FRAG_COMPLETE = 4, INET_FRAG_HASH_DEAD = 8, INET_FRAG_DROP = 16, }; struct ipq { struct inet_frag_queue q; u8 ecn; u16 max_df_size; int iif; unsigned int rid; struct inet_peer *peer; }; enum pkt_hash_types { PKT_HASH_TYPE_NONE = 0, PKT_HASH_TYPE_L2 = 1, PKT_HASH_TYPE_L3 = 2, PKT_HASH_TYPE_L4 = 3, }; enum { LWTUNNEL_XMIT_DONE = 0, LWTUNNEL_XMIT_CONTINUE = 256, }; struct ip_frag_state { bool DF; unsigned int hlen; unsigned int ll_rs; unsigned int mtu; unsigned int left; int offset; int ptr; __be16 not_last_frag; }; struct ip_fraglist_iter { struct sk_buff *frag; struct iphdr *iph; int offset; unsigned int hlen; }; struct ipcm_cookie { struct sockcm_cookie sockc; __be32 addr; int oif; struct ip_options_rcu *opt; __u8 protocol; __u8 ttl; __s16 tos; char priority; __u16 gso_size; u64 android_kabi_reserved1; }; struct ip_reply_arg { struct kvec iov[1]; int flags; __wsum csum; int csumoffset; int bound_dev_if; u8 tos; kuid_t uid; }; struct ip_options_data { struct ip_options_rcu opt; char data[40]; }; struct in_pktinfo { int ipi_ifindex; struct in_addr ipi_spec_dst; struct in_addr ipi_addr; }; struct ip_mreq_source { __be32 imr_multiaddr; __be32 imr_interface; __be32 imr_sourceaddr; }; struct ip_msfilter { __be32 imsf_multiaddr; __be32 imsf_interface; __u32 imsf_fmode; __u32 imsf_numsrc; union { __be32 imsf_slist[1]; struct { struct {} __empty_imsf_slist_flex; __be32 imsf_slist_flex[0]; }; }; }; struct group_source_req { __u32 gsr_interface; struct __kernel_sockaddr_storage gsr_group; struct __kernel_sockaddr_storage gsr_source; }; struct compat_group_source_req { __u32 gsr_interface; struct __kernel_sockaddr_storage gsr_group; struct __kernel_sockaddr_storage gsr_source; } __attribute__((packed)); struct group_filter { union { struct { __u32 gf_interface_aux; struct __kernel_sockaddr_storage gf_group_aux; __u32 gf_fmode_aux; __u32 gf_numsrc_aux; struct __kernel_sockaddr_storage gf_slist[1]; }; struct { __u32 gf_interface; struct __kernel_sockaddr_storage gf_group; __u32 gf_fmode; __u32 gf_numsrc; struct __kernel_sockaddr_storage gf_slist_flex[0]; }; }; }; struct compat_group_req { __u32 gr_interface; struct __kernel_sockaddr_storage gr_group; } __attribute__((packed)); struct group_req { __u32 gr_interface; struct __kernel_sockaddr_storage gr_group; }; struct compat_group_filter { union { struct { __u32 gf_interface_aux; struct __kernel_sockaddr_storage gf_group_aux; __u32 gf_fmode_aux; __u32 gf_numsrc_aux; struct __kernel_sockaddr_storage gf_slist[1]; } __attribute__((packed)); struct { __u32 gf_interface; struct __kernel_sockaddr_storage gf_group; __u32 gf_fmode; __u32 gf_numsrc; struct __kernel_sockaddr_storage gf_slist_flex[0]; } __attribute__((packed)); }; }; typedef u32 inet_ehashfn_t(const struct net *, const __be32, const __u16, const __be32, const __be16); struct tcpvegas_info { __u32 tcpv_enabled; __u32 tcpv_rttcnt; __u32 tcpv_rtt; __u32 tcpv_minrtt; }; struct tcp_dctcp_info { __u16 dctcp_enabled; __u16 dctcp_ce_state; __u32 dctcp_alpha; __u32 dctcp_ab_ecn; __u32 dctcp_ab_tot; }; struct tcp_bbr_info { __u32 bbr_bw_lo; __u32 bbr_bw_hi; __u32 bbr_min_rtt; __u32 bbr_pacing_gain; __u32 bbr_cwnd_gain; }; union tcp_cc_info { struct tcpvegas_info vegas; struct tcp_dctcp_info dctcp; struct tcp_bbr_info bbr; }; enum tsq_enum { TSQ_THROTTLED = 0, TSQ_QUEUED = 1, TCP_TSQ_DEFERRED = 2, TCP_WRITE_TIMER_DEFERRED = 3, TCP_DELACK_TIMER_DEFERRED = 4, TCP_MTU_REDUCED_DEFERRED = 5, }; enum tcp_chrono { TCP_CHRONO_UNSPEC = 0, TCP_CHRONO_BUSY = 1, TCP_CHRONO_RWND_LIMITED = 2, TCP_CHRONO_SNDBUF_LIMITED = 3, __TCP_CHRONO_MAX = 4, }; enum { TCP_NO_QUEUE = 0, TCP_RECV_QUEUE = 1, TCP_SEND_QUEUE = 2, TCP_QUEUES_NR = 3, }; enum inet_csk_ack_state_t { ICSK_ACK_SCHED = 1, ICSK_ACK_TIMER = 2, ICSK_ACK_PUSHED = 4, ICSK_ACK_PUSHED2 = 8, ICSK_ACK_NOW = 16, ICSK_ACK_NOMEM = 32, }; enum { TCP_CMSG_INQ = 1, TCP_CMSG_TS = 2, }; enum { BPF_TCP_ESTABLISHED = 1, BPF_TCP_SYN_SENT = 2, BPF_TCP_SYN_RECV = 3, BPF_TCP_FIN_WAIT1 = 4, BPF_TCP_FIN_WAIT2 = 5, BPF_TCP_TIME_WAIT = 6, BPF_TCP_CLOSE = 7, BPF_TCP_CLOSE_WAIT = 8, BPF_TCP_LAST_ACK = 9, BPF_TCP_LISTEN = 10, BPF_TCP_CLOSING = 11, BPF_TCP_NEW_SYN_RECV = 12, BPF_TCP_MAX_STATES = 13, }; enum { TCP_MIB_NUM = 0, TCP_MIB_RTOALGORITHM = 1, TCP_MIB_RTOMIN = 2, TCP_MIB_RTOMAX = 3, TCP_MIB_MAXCONN = 4, TCP_MIB_ACTIVEOPENS = 5, TCP_MIB_PASSIVEOPENS = 6, TCP_MIB_ATTEMPTFAILS = 7, TCP_MIB_ESTABRESETS = 8, TCP_MIB_CURRESTAB = 9, TCP_MIB_INSEGS = 10, TCP_MIB_OUTSEGS = 11, TCP_MIB_RETRANSSEGS = 12, TCP_MIB_INERRS = 13, TCP_MIB_OUTRSTS = 14, TCP_MIB_CSUMERRORS = 15, __TCP_MIB_MAX = 16, }; enum { TCP_NLA_PAD = 0, TCP_NLA_BUSY = 1, TCP_NLA_RWND_LIMITED = 2, TCP_NLA_SNDBUF_LIMITED = 3, TCP_NLA_DATA_SEGS_OUT = 4, TCP_NLA_TOTAL_RETRANS = 5, TCP_NLA_PACING_RATE = 6, TCP_NLA_DELIVERY_RATE = 7, TCP_NLA_SND_CWND = 8, TCP_NLA_REORDERING = 9, TCP_NLA_MIN_RTT = 10, TCP_NLA_RECUR_RETRANS = 11, TCP_NLA_DELIVERY_RATE_APP_LMT = 12, TCP_NLA_SNDQ_SIZE = 13, TCP_NLA_CA_STATE = 14, TCP_NLA_SND_SSTHRESH = 15, TCP_NLA_DELIVERED = 16, TCP_NLA_DELIVERED_CE = 17, TCP_NLA_BYTES_SENT = 18, TCP_NLA_BYTES_RETRANS = 19, TCP_NLA_DSACK_DUPS = 20, TCP_NLA_REORD_SEEN = 21, TCP_NLA_SRTT = 22, TCP_NLA_TIMEOUT_REHASH = 23, TCP_NLA_BYTES_NOTSENT = 24, TCP_NLA_EDT = 25, TCP_NLA_TTL = 26, TCP_NLA_REHASH = 27, }; struct tcp_skb_cb { __u32 seq; __u32 end_seq; union { __u32 tcp_tw_isn; struct { u16 tcp_gso_segs; u16 tcp_gso_size; }; }; __u8 tcp_flags; __u8 sacked; __u8 ip_dsfield; __u8 txstamp_ack: 1; __u8 eor: 1; __u8 has_rxtstamp: 1; __u8 unused: 5; __u32 ack_seq; union { struct { __u32 is_app_limited: 1; __u32 delivered_ce: 20; __u32 unused: 11; __u32 delivered; u64 first_tx_mstamp; u64 delivered_mstamp; } tx; union { struct inet_skb_parm h4; struct inet6_skb_parm h6; } header; }; }; struct tcp_splice_state { struct pipe_inode_info *pipe; size_t len; unsigned int flags; }; struct tcp_info { __u8 tcpi_state; __u8 tcpi_ca_state; __u8 tcpi_retransmits; __u8 tcpi_probes; __u8 tcpi_backoff; __u8 tcpi_options; __u8 tcpi_snd_wscale: 4; __u8 tcpi_rcv_wscale: 4; __u8 tcpi_delivery_rate_app_limited: 1; __u8 tcpi_fastopen_client_fail: 2; __u32 tcpi_rto; __u32 tcpi_ato; __u32 tcpi_snd_mss; __u32 tcpi_rcv_mss; __u32 tcpi_unacked; __u32 tcpi_sacked; __u32 tcpi_lost; __u32 tcpi_retrans; __u32 tcpi_fackets; __u32 tcpi_last_data_sent; __u32 tcpi_last_ack_sent; __u32 tcpi_last_data_recv; __u32 tcpi_last_ack_recv; __u32 tcpi_pmtu; __u32 tcpi_rcv_ssthresh; __u32 tcpi_rtt; __u32 tcpi_rttvar; __u32 tcpi_snd_ssthresh; __u32 tcpi_snd_cwnd; __u32 tcpi_advmss; __u32 tcpi_reordering; __u32 tcpi_rcv_rtt; __u32 tcpi_rcv_space; __u32 tcpi_total_retrans; __u64 tcpi_pacing_rate; __u64 tcpi_max_pacing_rate; __u64 tcpi_bytes_acked; __u64 tcpi_bytes_received; __u32 tcpi_segs_out; __u32 tcpi_segs_in; __u32 tcpi_notsent_bytes; __u32 tcpi_min_rtt; __u32 tcpi_data_segs_in; __u32 tcpi_data_segs_out; __u64 tcpi_delivery_rate; __u64 tcpi_busy_time; __u64 tcpi_rwnd_limited; __u64 tcpi_sndbuf_limited; __u32 tcpi_delivered; __u32 tcpi_delivered_ce; __u64 tcpi_bytes_sent; __u64 tcpi_bytes_retrans; __u32 tcpi_dsack_dups; __u32 tcpi_reord_seen; __u32 tcpi_rcv_ooopack; __u32 tcpi_snd_wnd; __u32 tcpi_rcv_wnd; __u32 tcpi_rehash; }; struct tcp_zerocopy_receive { __u64 address; __u32 length; __u32 recv_skip_hint; __u32 inq; __s32 err; __u64 copybuf_address; __s32 copybuf_len; __u32 flags; __u64 msg_control; __u64 msg_controllen; __u32 msg_flags; __u32 reserved; }; struct tcp_repair_opt { __u32 opt_code; __u32 opt_val; }; struct tcp_repair_window { __u32 snd_wl1; __u32 snd_wnd; __u32 max_window; __u32 rcv_wnd; __u32 rcv_wup; }; enum tcp_ca_ack_event_flags { CA_ACK_SLOWPATH = 1, CA_ACK_WIN_UPDATE = 2, CA_ACK_ECE = 4, }; enum tcp_queue { TCP_FRAG_IN_WRITE_QUEUE = 0, TCP_FRAG_IN_RTX_QUEUE = 1, }; enum tcp_fastopen_client_fail { TFO_STATUS_UNSPEC = 0, TFO_COOKIE_UNAVAILABLE = 1, TFO_DATA_NOT_ACKED = 2, TFO_SYN_RETRANSMITTED = 3, }; struct tcp_sacktag_state { u64 first_sackt; u64 last_sackt; u32 reord; u32 sack_delivered; int flag; unsigned int mss_now; struct rate_sample *rate; }; struct tsq_tasklet { struct tasklet_struct tasklet; struct list_head head; }; enum tsq_flags { TSQF_THROTTLED = 1, TSQF_QUEUED = 2, TCPF_TSQ_DEFERRED = 4, TCPF_WRITE_TIMER_DEFERRED = 8, TCPF_DELACK_TIMER_DEFERRED = 16, TCPF_MTU_REDUCED_DEFERRED = 32, }; enum { BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, }; struct mptcp_out_options {}; struct tcp_out_options { u16 options; u16 mss; u8 ws; u8 num_sack_blocks; u8 hash_size; u8 bpf_opt_len; __u8 *hash_location; __u32 tsval; __u32 tsecr; struct tcp_fastopen_cookie *fastopen_cookie; struct mptcp_out_options mptcp; }; union tcp_md5_addr { struct in_addr a4; struct in6_addr a6; }; struct tcp_md5sig_key { struct hlist_node node; u8 keylen; u8 family; u8 prefixlen; u8 flags; union tcp_md5_addr addr; int l3index; u8 key[80]; struct callback_head rcu; }; struct tcp_seq_afinfo { sa_family_t family; }; enum { ICMP_MIB_NUM = 0, ICMP_MIB_INMSGS = 1, ICMP_MIB_INERRORS = 2, ICMP_MIB_INDESTUNREACHS = 3, ICMP_MIB_INTIMEEXCDS = 4, ICMP_MIB_INPARMPROBS = 5, ICMP_MIB_INSRCQUENCHS = 6, ICMP_MIB_INREDIRECTS = 7, ICMP_MIB_INECHOS = 8, ICMP_MIB_INECHOREPS = 9, ICMP_MIB_INTIMESTAMPS = 10, ICMP_MIB_INTIMESTAMPREPS = 11, ICMP_MIB_INADDRMASKS = 12, ICMP_MIB_INADDRMASKREPS = 13, ICMP_MIB_OUTMSGS = 14, ICMP_MIB_OUTERRORS = 15, ICMP_MIB_OUTDESTUNREACHS = 16, ICMP_MIB_OUTTIMEEXCDS = 17, ICMP_MIB_OUTPARMPROBS = 18, ICMP_MIB_OUTSRCQUENCHS = 19, ICMP_MIB_OUTREDIRECTS = 20, ICMP_MIB_OUTECHOS = 21, ICMP_MIB_OUTECHOREPS = 22, ICMP_MIB_OUTTIMESTAMPS = 23, ICMP_MIB_OUTTIMESTAMPREPS = 24, ICMP_MIB_OUTADDRMASKS = 25, ICMP_MIB_OUTADDRMASKREPS = 26, ICMP_MIB_CSUMERRORS = 27, ICMP_MIB_RATELIMITGLOBAL = 28, ICMP_MIB_RATELIMITHOST = 29, __ICMP_MIB_MAX = 30, }; enum tcp_tw_status { TCP_TW_SUCCESS = 0, TCP_TW_RST = 1, TCP_TW_ACK = 2, TCP_TW_SYN = 3, }; enum tcp_seq_states { TCP_SEQ_STATE_LISTENING = 0, TCP_SEQ_STATE_ESTABLISHED = 1, }; struct tcp_iter_state { struct seq_net_private p; enum tcp_seq_states state; struct sock *syn_wait_sk; int bucket; int offset; int sbucket; int num; loff_t last_pos; }; struct bpf_iter__tcp { union { struct bpf_iter_meta *meta; }; union { struct sock_common *sk_common; }; uid_t uid; }; struct bpf_tcp_iter_state { struct tcp_iter_state state; unsigned int cur_sk; unsigned int end_sk; unsigned int max_sk; struct sock **batch; bool st_bucket_done; }; struct tcp_metrics_block; struct tcpm_hash_bucket { struct tcp_metrics_block __attribute__((btf_type_tag("rcu"))) *chain; }; struct tcp_fastopen_metrics { u16 mss; u16 syn_loss: 10; u16 try_exp: 2; unsigned long last_syn_loss; struct tcp_fastopen_cookie cookie; }; struct tcp_metrics_block { struct tcp_metrics_block __attribute__((btf_type_tag("rcu"))) *tcpm_next; struct net *tcpm_net; struct inetpeer_addr tcpm_saddr; struct inetpeer_addr tcpm_daddr; unsigned long tcpm_stamp; u32 tcpm_lock; u32 tcpm_vals[5]; struct tcp_fastopen_metrics tcpm_fastopen; struct callback_head callback_head; }; enum tcp_metric_index { TCP_METRIC_RTT = 0, TCP_METRIC_RTTVAR = 1, TCP_METRIC_SSTHRESH = 2, TCP_METRIC_CWND = 3, TCP_METRIC_REORDERING = 4, TCP_METRIC_RTT_US = 5, TCP_METRIC_RTTVAR_US = 6, __TCP_METRIC_MAX = 7, }; enum { TCP_METRICS_ATTR_UNSPEC = 0, TCP_METRICS_ATTR_ADDR_IPV4 = 1, TCP_METRICS_ATTR_ADDR_IPV6 = 2, TCP_METRICS_ATTR_AGE = 3, TCP_METRICS_ATTR_TW_TSVAL = 4, TCP_METRICS_ATTR_TW_TS_STAMP = 5, TCP_METRICS_ATTR_VALS = 6, TCP_METRICS_ATTR_FOPEN_MSS = 7, TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9, TCP_METRICS_ATTR_FOPEN_COOKIE = 10, TCP_METRICS_ATTR_SADDR_IPV4 = 11, TCP_METRICS_ATTR_SADDR_IPV6 = 12, TCP_METRICS_ATTR_PAD = 13, __TCP_METRICS_ATTR_MAX = 14, }; enum { TCP_METRICS_CMD_UNSPEC = 0, TCP_METRICS_CMD_GET = 1, TCP_METRICS_CMD_DEL = 2, __TCP_METRICS_CMD_MAX = 3, }; struct tcp_plb_state { u8 consec_cong_rounds: 5; u8 unused: 3; u32 pause_until; }; struct icmp_filter { __u32 data; }; struct raw_sock { struct inet_sock inet; struct icmp_filter filter; u32 ipmr_table; }; struct raw_frag_vec { struct msghdr *msg; union { struct icmphdr icmph; char c[1]; } hdr; int hlen; }; struct raw_iter_state { struct seq_net_private p; int bucket; }; struct udp_seq_afinfo { sa_family_t family; struct udp_table *udp_table; }; enum { UDP_FLAGS_CORK = 0, UDP_FLAGS_NO_CHECK6_TX = 1, UDP_FLAGS_NO_CHECK6_RX = 2, UDP_FLAGS_GRO_ENABLED = 3, UDP_FLAGS_ACCEPT_FRAGLIST = 4, UDP_FLAGS_ACCEPT_L4 = 5, UDP_FLAGS_ENCAP_ENABLED = 6, UDP_FLAGS_UDPLITE_SEND_CC = 7, UDP_FLAGS_UDPLITE_RECV_CC = 8, }; enum { UDP_MIB_NUM = 0, UDP_MIB_INDATAGRAMS = 1, UDP_MIB_NOPORTS = 2, UDP_MIB_INERRORS = 3, UDP_MIB_OUTDATAGRAMS = 4, UDP_MIB_RCVBUFERRORS = 5, UDP_MIB_SNDBUFERRORS = 6, UDP_MIB_CSUMERRORS = 7, UDP_MIB_IGNOREDMULTI = 8, UDP_MIB_MEMERRORS = 9, __UDP_MIB_MAX = 10, }; struct udp_skb_cb { union { struct inet_skb_parm h4; struct inet6_skb_parm h6; } header; __u16 cscov; __u8 partial_cov; }; struct ip_tunnel_encap_ops { size_t (*encap_hlen)(struct ip_tunnel_encap *); int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *); int (*err_handler)(struct sk_buff *, u32); }; struct udp_dev_scratch { u32 _tsize_state; u16 len; bool is_linear; bool csum_unnecessary; }; struct bpf_iter__udp { union { struct bpf_iter_meta *meta; }; union { struct udp_sock *udp_sk; }; uid_t uid; long: 0; int bucket; }; struct udp_iter_state { struct seq_net_private p; int bucket; }; struct bpf_udp_iter_state { struct udp_iter_state state; unsigned int cur_sk; unsigned int end_sk; unsigned int max_sk; int offset; struct sock **batch; bool st_bucket_done; }; struct inet_protosw { struct list_head list; unsigned short type; unsigned short protocol; struct proto *prot; const struct proto_ops *ops; unsigned char flags; }; typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *); typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *); typedef struct sock * (*udp_lookup_t)(const struct sk_buff *, __be16, __be16); struct arpreq { struct sockaddr arp_pa; struct sockaddr arp_ha; int arp_flags; struct sockaddr arp_netmask; char arp_dev[16]; }; struct icmp_err { int errno; unsigned int fatal: 1; }; struct icmp_control { enum skb_drop_reason (*handler)(struct sk_buff *); short error; }; enum { XFRM_LOOKUP_ICMP = 1, XFRM_LOOKUP_QUEUE = 2, XFRM_LOOKUP_KEEP_DST_REF = 4, }; struct icmp_bxm { struct sk_buff *skb; int offset; int data_len; struct { struct icmphdr icmph; __be32 times[3]; } data; int head_len; struct ip_options_data replyopts; }; struct icmp_extobj_hdr { __be16 length; __u8 class_num; __u8 class_type; }; struct icmp_ext_hdr { __u8 reserved1: 4; __u8 version: 4; __u8 reserved2; __sum16 checksum; }; struct icmp_ext_echo_ctype3_hdr { __be16 afi; __u8 addrlen; __u8 reserved; }; struct icmp_ext_echo_iio { struct icmp_extobj_hdr extobj_hdr; union { char name[16]; __be32 ifindex; struct { struct icmp_ext_echo_ctype3_hdr ctype3_hdr; union { __be32 ipv4_addr; struct in6_addr ipv6_addr; } ip_addr; } addr; } ident; }; struct devinet_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table devinet_vars[34]; }; enum { IFA_UNSPEC = 0, IFA_ADDRESS = 1, IFA_LOCAL = 2, IFA_LABEL = 3, IFA_BROADCAST = 4, IFA_ANYCAST = 5, IFA_CACHEINFO = 6, IFA_MULTICAST = 7, IFA_FLAGS = 8, IFA_RT_PRIORITY = 9, IFA_TARGET_NETNSID = 10, IFA_PROTO = 11, __IFA_MAX = 12, }; enum { NETCONFA_UNSPEC = 0, NETCONFA_IFINDEX = 1, NETCONFA_FORWARDING = 2, NETCONFA_RP_FILTER = 3, NETCONFA_MC_FORWARDING = 4, NETCONFA_PROXY_NEIGH = 5, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6, NETCONFA_INPUT = 7, NETCONFA_BC_FORWARDING = 8, __NETCONFA_MAX = 9, }; enum { IFLA_INET_UNSPEC = 0, IFLA_INET_CONF = 1, __IFLA_INET_MAX = 2, }; struct ifaddrmsg { __u8 ifa_family; __u8 ifa_prefixlen; __u8 ifa_flags; __u8 ifa_scope; __u32 ifa_index; }; struct ifa_cacheinfo { __u32 ifa_prefered; __u32 ifa_valid; __u32 cstamp; __u32 tstamp; }; struct inet_fill_args { u32 portid; u32 seq; int event; unsigned int flags; int netnsid; int ifindex; }; struct netconfmsg { __u8 ncm_family; }; struct in_validator_info { __be32 ivi_addr; struct in_device *ivi_dev; struct netlink_ext_ack *extack; }; struct rtentry { unsigned long rt_pad1; struct sockaddr rt_dst; struct sockaddr rt_gateway; struct sockaddr rt_genmask; unsigned short rt_flags; short rt_pad2; unsigned long rt_pad3; void *rt_pad4; short rt_metric; char __attribute__((btf_type_tag("user"))) *rt_dev; unsigned long rt_mtu; unsigned long rt_window; unsigned short rt_irtt; }; struct compat_rtentry { u32 rt_pad1; struct sockaddr rt_dst; struct sockaddr rt_gateway; struct sockaddr rt_genmask; unsigned short rt_flags; short rt_pad2; u32 rt_pad3; unsigned char rt_tos; unsigned char rt_class; short rt_pad4; short rt_metric; compat_uptr_t rt_dev; u32 rt_mtu; u32 rt_window; unsigned short rt_irtt; }; struct igmphdr { __u8 type; __u8 code; __sum16 csum; __be32 group; }; struct igmpv3_query { __u8 type; __u8 code; __sum16 csum; __be32 group; __u8 qrv: 3; __u8 suppress: 1; __u8 resv: 4; __u8 qqic; __be16 nsrcs; __be32 srcs[0]; }; struct igmpv3_grec { __u8 grec_type; __u8 grec_auxwords; __be16 grec_nsrcs; __be32 grec_mca; __be32 grec_src[0]; }; struct igmpv3_report { __u8 type; __u8 resv1; __sum16 csum; __be16 resv2; __be16 ngrec; struct igmpv3_grec grec[0]; }; struct igmp_mc_iter_state { struct seq_net_private p; struct net_device *dev; struct in_device *in_dev; }; struct igmp_mcf_iter_state { struct seq_net_private p; struct net_device *dev; struct in_device *idev; struct ip_mc_list *im; }; struct nl_info { struct nlmsghdr *nlh; struct net *nl_net; u32 portid; u8 skip_notify: 1; u8 skip_notify_kernel: 1; }; struct rtnexthop; struct fib_config { u8 fc_dst_len; dscp_t fc_dscp; u8 fc_protocol; u8 fc_scope; u8 fc_type; u8 fc_gw_family; u32 fc_table; __be32 fc_dst; union { __be32 fc_gw4; struct in6_addr fc_gw6; }; int fc_oif; u32 fc_flags; u32 fc_priority; __be32 fc_prefsrc; u32 fc_nh_id; struct nlattr *fc_mx; struct rtnexthop *fc_mp; int fc_mx_len; int fc_mp_len; u32 fc_flow; u32 fc_nlflags; struct nl_info fc_nlinfo; struct nlattr *fc_encap; u16 fc_encap_type; }; struct rtnexthop { unsigned short rtnh_len; unsigned char rtnh_flags; unsigned char rtnh_hops; int rtnh_ifindex; }; struct fib_dump_filter { u32 table_id; bool filter_set; bool dump_routes; bool dump_exceptions; unsigned char protocol; unsigned char rt_type; unsigned int flags; struct net_device *dev; }; struct fib_result_nl { __be32 fl_addr; u32 fl_mark; unsigned char fl_tos; unsigned char fl_scope; unsigned char tb_id_in; unsigned char tb_id; unsigned char prefixlen; unsigned char nh_sel; unsigned char type; unsigned char scope; int err; }; struct fib_prop { int error; u8 scope; }; struct fib6_config { u32 fc_table; u32 fc_metric; int fc_dst_len; int fc_src_len; int fc_ifindex; u32 fc_flags; u32 fc_protocol; u16 fc_type; u16 fc_delete_all_nh: 1; u16 fc_ignore_dev_down: 1; u16 __unused: 14; u32 fc_nh_id; struct in6_addr fc_dst; struct in6_addr fc_src; struct in6_addr fc_prefsrc; struct in6_addr fc_gateway; unsigned long fc_expires; struct nlattr *fc_mx; int fc_mx_len; int fc_mp_len; struct nlattr *fc_mp; struct nl_info fc_nlinfo; struct nlattr *fc_encap; u16 fc_encap_type; bool fc_is_fdb; u64 android_kabi_reserved1; }; struct fib_nh_notifier_info { struct fib_notifier_info info; struct fib_nh *fib_nh; }; typedef unsigned int t_key; struct key_vector { t_key key; unsigned char pos; unsigned char bits; unsigned char slen; union { struct hlist_head leaf; struct { struct {} __empty_tnode; struct key_vector __attribute__((btf_type_tag("rcu"))) *tnode[0]; }; }; }; struct trie { struct key_vector kv[1]; }; struct tnode { struct callback_head rcu; t_key empty_children; t_key full_children; struct key_vector __attribute__((btf_type_tag("rcu"))) *parent; struct key_vector kv[1]; }; struct fib_entry_notifier_info { struct fib_notifier_info info; u32 dst; int dst_len; struct fib_info *fi; dscp_t dscp; u8 type; u32 tb_id; }; struct trie_stat { unsigned int totdepth; unsigned int maxdepth; unsigned int tnodes; unsigned int leaves; unsigned int nullpointers; unsigned int prefixes; unsigned int nodesizes[32]; }; struct fib_trie_iter { struct seq_net_private p; struct fib_table *tb; struct key_vector *tnode; unsigned int index; unsigned int depth; }; struct fib_route_iter { struct seq_net_private p; struct fib_table *main_tb; struct key_vector *tnode; loff_t pos; t_key key; }; struct ipfrag_skb_cb { union { struct inet_skb_parm h4; struct inet6_skb_parm h6; }; struct sk_buff *next_frag; int frag_run_len; int ip_defrag_offset; }; struct ping_table { struct hlist_head hash[64]; spinlock_t lock; }; struct pingv6_ops { int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *); void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *); void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *); int (*icmpv6_err_convert)(u8, u8, int *); void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int); }; struct ping_iter_state { struct seq_net_private p; int bucket; sa_family_t family; }; struct pingfakehdr { struct icmphdr icmph; struct msghdr *msg; sa_family_t family; __wsum wcheck; }; struct ip6_tnl_encap_ops { size_t (*encap_hlen)(struct ip_tunnel_encap *); int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *); int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); }; struct lwtunnel_encap_ops { int (*build_state)(struct net *, struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *); void (*destroy_state)(struct lwtunnel_state *); int (*output)(struct net *, struct sock *, struct sk_buff *); int (*input)(struct sk_buff *); int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *); int (*get_encap_size)(struct lwtunnel_state *); int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *); int (*xmit)(struct sk_buff *); struct module *owner; }; enum { IFLA_IPTUN_UNSPEC = 0, IFLA_IPTUN_LINK = 1, IFLA_IPTUN_LOCAL = 2, IFLA_IPTUN_REMOTE = 3, IFLA_IPTUN_TTL = 4, IFLA_IPTUN_TOS = 5, IFLA_IPTUN_ENCAP_LIMIT = 6, IFLA_IPTUN_FLOWINFO = 7, IFLA_IPTUN_FLAGS = 8, IFLA_IPTUN_PROTO = 9, IFLA_IPTUN_PMTUDISC = 10, IFLA_IPTUN_6RD_PREFIX = 11, IFLA_IPTUN_6RD_RELAY_PREFIX = 12, IFLA_IPTUN_6RD_PREFIXLEN = 13, IFLA_IPTUN_6RD_RELAY_PREFIXLEN = 14, IFLA_IPTUN_ENCAP_TYPE = 15, IFLA_IPTUN_ENCAP_FLAGS = 16, IFLA_IPTUN_ENCAP_SPORT = 17, IFLA_IPTUN_ENCAP_DPORT = 18, IFLA_IPTUN_COLLECT_METADATA = 19, IFLA_IPTUN_FWMARK = 20, __IFLA_IPTUN_MAX = 21, }; enum lwtunnel_ip_t { LWTUNNEL_IP_UNSPEC = 0, LWTUNNEL_IP_ID = 1, LWTUNNEL_IP_DST = 2, LWTUNNEL_IP_SRC = 3, LWTUNNEL_IP_TTL = 4, LWTUNNEL_IP_TOS = 5, LWTUNNEL_IP_FLAGS = 6, LWTUNNEL_IP_PAD = 7, LWTUNNEL_IP_OPTS = 8, __LWTUNNEL_IP_MAX = 9, }; enum { LWTUNNEL_IP_OPTS_UNSPEC = 0, LWTUNNEL_IP_OPTS_GENEVE = 1, LWTUNNEL_IP_OPTS_VXLAN = 2, LWTUNNEL_IP_OPTS_ERSPAN = 3, __LWTUNNEL_IP_OPTS_MAX = 4, }; enum { LWTUNNEL_IP_OPT_GENEVE_UNSPEC = 0, LWTUNNEL_IP_OPT_GENEVE_CLASS = 1, LWTUNNEL_IP_OPT_GENEVE_TYPE = 2, LWTUNNEL_IP_OPT_GENEVE_DATA = 3, __LWTUNNEL_IP_OPT_GENEVE_MAX = 4, }; enum { LWTUNNEL_IP_OPT_VXLAN_UNSPEC = 0, LWTUNNEL_IP_OPT_VXLAN_GBP = 1, __LWTUNNEL_IP_OPT_VXLAN_MAX = 2, }; enum { LWTUNNEL_IP_OPT_ERSPAN_UNSPEC = 0, LWTUNNEL_IP_OPT_ERSPAN_VER = 1, LWTUNNEL_IP_OPT_ERSPAN_INDEX = 2, LWTUNNEL_IP_OPT_ERSPAN_DIR = 3, LWTUNNEL_IP_OPT_ERSPAN_HWID = 4, __LWTUNNEL_IP_OPT_ERSPAN_MAX = 5, }; enum lwtunnel_ip6_t { LWTUNNEL_IP6_UNSPEC = 0, LWTUNNEL_IP6_ID = 1, LWTUNNEL_IP6_DST = 2, LWTUNNEL_IP6_SRC = 3, LWTUNNEL_IP6_HOPLIMIT = 4, LWTUNNEL_IP6_TC = 5, LWTUNNEL_IP6_FLAGS = 6, LWTUNNEL_IP6_PAD = 7, LWTUNNEL_IP6_OPTS = 8, __LWTUNNEL_IP6_MAX = 9, }; enum nexthop_event_type { NEXTHOP_EVENT_DEL = 0, NEXTHOP_EVENT_REPLACE = 1, NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE = 2, NEXTHOP_EVENT_BUCKET_REPLACE = 3, }; enum nh_notifier_info_type { NH_NOTIFIER_INFO_TYPE_SINGLE = 0, NH_NOTIFIER_INFO_TYPE_GRP = 1, NH_NOTIFIER_INFO_TYPE_RES_TABLE = 2, NH_NOTIFIER_INFO_TYPE_RES_BUCKET = 3, }; enum { NHA_UNSPEC = 0, NHA_ID = 1, NHA_GROUP = 2, NHA_GROUP_TYPE = 3, NHA_BLACKHOLE = 4, NHA_OIF = 5, NHA_GATEWAY = 6, NHA_ENCAP_TYPE = 7, NHA_ENCAP = 8, NHA_GROUPS = 9, NHA_MASTER = 10, NHA_FDB = 11, NHA_RES_GROUP = 12, NHA_RES_BUCKET = 13, __NHA_MAX = 14, }; enum { NEXTHOP_GRP_TYPE_MPATH = 0, NEXTHOP_GRP_TYPE_RES = 1, __NEXTHOP_GRP_TYPE_MAX = 2, }; enum { NHA_RES_GROUP_UNSPEC = 0, NHA_RES_GROUP_PAD = 0, NHA_RES_GROUP_BUCKETS = 1, NHA_RES_GROUP_IDLE_TIMER = 2, NHA_RES_GROUP_UNBALANCED_TIMER = 3, NHA_RES_GROUP_UNBALANCED_TIME = 4, __NHA_RES_GROUP_MAX = 5, }; enum { NHA_RES_BUCKET_UNSPEC = 0, NHA_RES_BUCKET_PAD = 0, NHA_RES_BUCKET_INDEX = 1, NHA_RES_BUCKET_IDLE_TIME = 2, NHA_RES_BUCKET_NH_ID = 3, __NHA_RES_BUCKET_MAX = 4, }; struct nh_notifier_single_info; struct nh_notifier_grp_info; struct nh_notifier_res_table_info; struct nh_notifier_res_bucket_info; struct nh_notifier_info { struct net *net; struct netlink_ext_ack *extack; u32 id; enum nh_notifier_info_type type; union { struct nh_notifier_single_info *nh; struct nh_notifier_grp_info *nh_grp; struct nh_notifier_res_table_info *nh_res_table; struct nh_notifier_res_bucket_info *nh_res_bucket; }; }; struct nh_notifier_single_info { struct net_device *dev; u8 gw_family; union { __be32 ipv4; struct in6_addr ipv6; }; u8 is_reject: 1; u8 is_fdb: 1; u8 has_encap: 1; }; struct nh_notifier_grp_entry_info { u8 weight; u32 id; struct nh_notifier_single_info nh; }; struct nh_notifier_grp_info { u16 num_nh; bool is_fdb; struct nh_notifier_grp_entry_info nh_entries[0]; }; struct nh_notifier_res_table_info { u16 num_nh_buckets; struct nh_notifier_single_info nhs[0]; }; struct nh_notifier_res_bucket_info { u16 bucket_index; unsigned int idle_timer_ms; bool force; struct nh_notifier_single_info old_nh; struct nh_notifier_single_info new_nh; }; struct nh_config { u32 nh_id; u8 nh_family; u8 nh_protocol; u8 nh_blackhole; u8 nh_fdb; u32 nh_flags; int nh_ifindex; struct net_device *dev; union { __be32 ipv4; struct in6_addr ipv6; } gw; struct nlattr *nh_grp; u16 nh_grp_type; u16 nh_grp_res_num_buckets; unsigned long nh_grp_res_idle_timer; unsigned long nh_grp_res_unbalanced_timer; bool nh_grp_res_has_num_buckets; bool nh_grp_res_has_idle_timer; bool nh_grp_res_has_unbalanced_timer; struct nlattr *nh_encap; u16 nh_encap_type; u32 nlflags; struct nl_info nlinfo; }; struct nhmsg { unsigned char nh_family; unsigned char nh_scope; unsigned char nh_protocol; unsigned char resvd; unsigned int nh_flags; }; struct nexthop_grp { __u32 id; __u8 weight; __u8 resvd1; __u16 resvd2; }; struct nh_dump_filter { u32 nh_id; int dev_idx; int master_idx; bool group_filter; bool fdb_filter; u32 res_bucket_nh_id; }; struct rtm_dump_nh_ctx { u32 idx; }; struct rtm_dump_res_bucket_ctx { struct rtm_dump_nh_ctx nh; u16 bucket_index; }; struct rtm_dump_nexthop_bucket_data { struct rtm_dump_res_bucket_ctx *ctx; struct nh_dump_filter filter; }; struct udp_tunnel_nic_ops { void (*get_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); void (*set_port_priv)(struct net_device *, unsigned int, unsigned int, u8); void (*add_port)(struct net_device *, struct udp_tunnel_info *); void (*del_port)(struct net_device *, struct udp_tunnel_info *); void (*reset_ntf)(struct net_device *); size_t (*dump_size)(struct net_device *, unsigned int); int (*dump_write)(struct net_device *, unsigned int, struct sk_buff *); }; enum tunnel_encap_types { TUNNEL_ENCAP_NONE = 0, TUNNEL_ENCAP_FOU = 1, TUNNEL_ENCAP_GUE = 2, TUNNEL_ENCAP_MPLS = 3, }; struct ip_tunnel_prl_entry; struct ip_tunnel { struct ip_tunnel __attribute__((btf_type_tag("rcu"))) *next; struct hlist_node hash_node; struct net_device *dev; netdevice_tracker dev_tracker; struct net *net; unsigned long err_time; int err_count; u32 i_seqno; atomic_t o_seqno; int tun_hlen; u32 index; u8 erspan_ver; u8 dir; u16 hwid; struct dst_cache dst_cache; struct ip_tunnel_parm parms; int mlink; int encap_hlen; int hlen; struct ip_tunnel_encap encap; struct ip_tunnel_prl_entry __attribute__((btf_type_tag("rcu"))) *prl; unsigned int prl_count; unsigned int ip_tnl_net_id; struct gro_cells gro_cells; __u32 fwmark; bool collect_md; bool ignore_df; }; struct ip_tunnel_prl_entry { struct ip_tunnel_prl_entry __attribute__((btf_type_tag("rcu"))) *next; __be32 addr; u16 flags; struct callback_head callback_head; }; struct ip_tunnel_net { struct net_device *fb_tunnel_dev; struct rtnl_link_ops *rtnl_link_ops; struct hlist_head tunnels[128]; struct ip_tunnel __attribute__((btf_type_tag("rcu"))) *collect_md_tun; int type; }; struct tnl_ptk_info { __be16 flags; __be16 proto; __be32 key; __be32 seq; int hdr_len; }; struct snmp_mib { const char *name; int entry; }; struct fib4_rule { struct fib_rule common; u8 dst_len; u8 src_len; dscp_t dscp; __be32 src; __be32 srcmask; __be32 dst; __be32 dstmask; }; enum { MFC_STATIC = 1, MFC_OFFLOAD = 2, }; struct mr_mfc { struct rhlist_head mnode; unsigned short mfc_parent; int mfc_flags; union { struct { unsigned long expires; struct sk_buff_head unresolved; } unres; struct { unsigned long last_assert; int minvif; int maxvif; unsigned long bytes; unsigned long pkt; unsigned long wrong_if; unsigned long lastuse; unsigned char ttls[32]; refcount_t refcount; } res; } mfc_un; struct list_head list; struct callback_head rcu; void (*free)(struct callback_head *); }; struct mr_table_ops { const struct rhashtable_params *rht_params; void *cmparg_any; }; struct vif_device { struct net_device __attribute__((btf_type_tag("rcu"))) *dev; netdevice_tracker dev_tracker; unsigned long bytes_in; unsigned long bytes_out; unsigned long pkt_in; unsigned long pkt_out; unsigned long rate_limit; unsigned char threshold; unsigned short flags; int link; struct netdev_phys_item_id dev_parent_id; __be32 local; __be32 remote; }; struct mr_table { struct list_head list; possible_net_t net; struct mr_table_ops ops; u32 id; struct sock __attribute__((btf_type_tag("rcu"))) *mroute_sk; struct timer_list ipmr_expire_timer; struct list_head mfc_unres_queue; struct vif_device vif_table[32]; struct rhltable mfc_hash; struct list_head mfc_cache_list; int maxvif; atomic_t cache_resolve_queue_len; bool mroute_do_assert; bool mroute_do_pim; bool mroute_do_wrvifwhole; int mroute_reg_vif_num; }; struct mr_vif_iter { struct seq_net_private p; struct mr_table *mrt; int ct; }; struct mr_mfc_iter { struct seq_net_private p; struct mr_table *mrt; struct list_head *cache; spinlock_t *lock; }; struct vif_entry_notifier_info { struct fib_notifier_info info; struct net_device *dev; unsigned short vif_index; unsigned short vif_flags; u32 tb_id; }; struct mfc_entry_notifier_info { struct fib_notifier_info info; struct mr_mfc *mfc; u32 tb_id; }; struct rta_mfc_stats { __u64 mfcs_packets; __u64 mfcs_bytes; __u64 mfcs_wrong_if; }; struct xfrm_tunnel { int (*handler)(struct sk_buff *); int (*cb_handler)(struct sk_buff *, int); int (*err_handler)(struct sk_buff *, u32); struct xfrm_tunnel __attribute__((btf_type_tag("rcu"))) *next; int priority; }; struct gre_protocol { int (*handler)(struct sk_buff *); void (*err_handler)(struct sk_buff *, u32); }; struct erspan_base_hdr { __u8 vlan_upper: 4; __u8 ver: 4; __u8 vlan: 8; __u8 session_id_upper: 2; __u8 t: 1; __u8 en: 2; __u8 cos: 3; __u8 session_id: 8; }; enum { IFLA_GRE_UNSPEC = 0, IFLA_GRE_LINK = 1, IFLA_GRE_IFLAGS = 2, IFLA_GRE_OFLAGS = 3, IFLA_GRE_IKEY = 4, IFLA_GRE_OKEY = 5, IFLA_GRE_LOCAL = 6, IFLA_GRE_REMOTE = 7, IFLA_GRE_TTL = 8, IFLA_GRE_TOS = 9, IFLA_GRE_PMTUDISC = 10, IFLA_GRE_ENCAP_LIMIT = 11, IFLA_GRE_FLOWINFO = 12, IFLA_GRE_FLAGS = 13, IFLA_GRE_ENCAP_TYPE = 14, IFLA_GRE_ENCAP_FLAGS = 15, IFLA_GRE_ENCAP_SPORT = 16, IFLA_GRE_ENCAP_DPORT = 17, IFLA_GRE_COLLECT_METADATA = 18, IFLA_GRE_IGNORE_DF = 19, IFLA_GRE_FWMARK = 20, IFLA_GRE_ERSPAN_INDEX = 21, IFLA_GRE_ERSPAN_VER = 22, IFLA_GRE_ERSPAN_DIR = 23, IFLA_GRE_ERSPAN_HWID = 24, __IFLA_GRE_MAX = 25, }; enum erspan_encap_type { ERSPAN_ENCAP_NOVLAN = 0, ERSPAN_ENCAP_ISL = 1, ERSPAN_ENCAP_8021Q = 2, ERSPAN_ENCAP_INFRAME = 3, }; enum erspan_bso { BSO_NOERROR = 0, BSO_SHORT = 1, BSO_OVERSIZED = 2, BSO_BAD = 3, }; struct qtag_prefix { __be16 eth_type; __be16 tci; }; struct erspan_md2 { __be32 timestamp; __be16 sgt; __u8 hwid_upper: 2; __u8 ft: 5; __u8 p: 1; __u8 o: 1; __u8 gra: 2; __u8 dir: 1; __u8 hwid: 4; }; struct erspan_metadata { int version; union { __be32 index; struct erspan_md2 md2; } u; }; struct udp_tunnel_nic_table_entry; struct udp_tunnel_nic { struct work_struct work; struct net_device *dev; u8 need_sync: 1; u8 need_replay: 1; u8 work_pending: 1; unsigned int n_tables; unsigned long missed; struct udp_tunnel_nic_table_entry **entries; }; struct udp_tunnel_nic_table_entry { __be16 port; u8 type; u8 flags; u16 use_cnt; u8 hw_priv; }; enum udp_parsable_tunnel_type { UDP_TUNNEL_TYPE_VXLAN = 1, UDP_TUNNEL_TYPE_GENEVE = 2, UDP_TUNNEL_TYPE_VXLAN_GPE = 4, }; enum udp_tunnel_nic_table_entry_flags { UDP_TUNNEL_NIC_ENTRY_ADD = 1, UDP_TUNNEL_NIC_ENTRY_DEL = 2, UDP_TUNNEL_NIC_ENTRY_OP_FAIL = 4, UDP_TUNNEL_NIC_ENTRY_FROZEN = 8, }; struct udp_tunnel_nic_shared_node { struct net_device *dev; struct list_head list; }; struct xfrm4_protocol { int (*handler)(struct sk_buff *); int (*input_handler)(struct sk_buff *, int, __be32, int); int (*cb_handler)(struct sk_buff *, int); int (*err_handler)(struct sk_buff *, u32); struct xfrm4_protocol __attribute__((btf_type_tag("rcu"))) *next; int priority; }; enum { IFLA_VTI_UNSPEC = 0, IFLA_VTI_LINK = 1, IFLA_VTI_IKEY = 2, IFLA_VTI_OKEY = 3, IFLA_VTI_LOCAL = 4, IFLA_VTI_REMOTE = 5, IFLA_VTI_FWMARK = 6, __IFLA_VTI_MAX = 7, }; enum { LINUX_MIB_XFRMNUM = 0, LINUX_MIB_XFRMINERROR = 1, LINUX_MIB_XFRMINBUFFERERROR = 2, LINUX_MIB_XFRMINHDRERROR = 3, LINUX_MIB_XFRMINNOSTATES = 4, LINUX_MIB_XFRMINSTATEPROTOERROR = 5, LINUX_MIB_XFRMINSTATEMODEERROR = 6, LINUX_MIB_XFRMINSTATESEQERROR = 7, LINUX_MIB_XFRMINSTATEEXPIRED = 8, LINUX_MIB_XFRMINSTATEMISMATCH = 9, LINUX_MIB_XFRMINSTATEINVALID = 10, LINUX_MIB_XFRMINTMPLMISMATCH = 11, LINUX_MIB_XFRMINNOPOLS = 12, LINUX_MIB_XFRMINPOLBLOCK = 13, LINUX_MIB_XFRMINPOLERROR = 14, LINUX_MIB_XFRMOUTERROR = 15, LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16, LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17, LINUX_MIB_XFRMOUTNOSTATES = 18, LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19, LINUX_MIB_XFRMOUTSTATEMODEERROR = 20, LINUX_MIB_XFRMOUTSTATESEQERROR = 21, LINUX_MIB_XFRMOUTSTATEEXPIRED = 22, LINUX_MIB_XFRMOUTPOLBLOCK = 23, LINUX_MIB_XFRMOUTPOLDEAD = 24, LINUX_MIB_XFRMOUTPOLERROR = 25, LINUX_MIB_XFRMFWDHDRERROR = 26, LINUX_MIB_XFRMOUTSTATEINVALID = 27, LINUX_MIB_XFRMACQUIREERROR = 28, __LINUX_MIB_XFRMMAX = 29, }; struct ip6_tnl; struct xfrm_tunnel_skb_cb { union { struct inet_skb_parm h4; struct inet6_skb_parm h6; } header; union { struct ip_tunnel *ip4; struct ip6_tnl *ip6; } tunnel; }; struct xfrm_spi_skb_cb { struct xfrm_tunnel_skb_cb header; unsigned int daddroff; unsigned int family; __be32 seq; }; struct xfrm_mode_skb_cb { struct xfrm_tunnel_skb_cb header; __be16 id; __be16 frag_off; u8 ihl; u8 tos; u8 ttl; u8 protocol; u8 optlen; u8 flow_lbl[3]; }; struct ip_comp_hdr { __u8 nexthdr; __u8 flags; __be16 cpi; }; struct xfrm_skb_cb { struct xfrm_tunnel_skb_cb header; union { struct { __u32 low; __u32 hi; } output; struct { __be32 low; __be32 hi; } input; } seq; }; struct esp_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; }; struct esp_info { struct ip_esp_hdr *esph; __be64 seqno; int tfclen; int tailen; int plen; int clen; int len; int nfrags; __u8 proto; bool inplace; }; struct esp_output_extra { __be32 seqhi; u32 esphoff; }; struct xfrm_algo_aead_info { char *geniv; u16 icv_truncbits; }; struct xfrm_algo_auth_info { u16 icv_truncbits; u16 icv_fullbits; }; struct xfrm_algo_encr_info { char *geniv; u16 blockbits; u16 defkeybits; }; struct xfrm_algo_comp_info { u16 threshold; }; struct sadb_alg { __u8 sadb_alg_id; __u8 sadb_alg_ivlen; __u16 sadb_alg_minbits; __u16 sadb_alg_maxbits; __u16 sadb_alg_reserved; }; struct xfrm_algo_desc { char *name; char *compat; u8 available: 1; u8 pfkey_supported: 1; union { struct xfrm_algo_aead_info aead; struct xfrm_algo_auth_info auth; struct xfrm_algo_encr_info encr; struct xfrm_algo_comp_info comp; } uinfo; struct sadb_alg desc; }; enum nf_ip_trace_comments { NF_IP_TRACE_COMMENT_RULE = 0, NF_IP_TRACE_COMMENT_RETURN = 1, NF_IP_TRACE_COMMENT_POLICY = 2, }; struct ipt_error { struct ipt_entry entry; struct xt_error_target target; }; struct ipt_replace { char name[32]; unsigned int valid_hooks; unsigned int num_entries; unsigned int size; unsigned int hook_entry[5]; unsigned int underflow[5]; unsigned int num_counters; struct xt_counters __attribute__((btf_type_tag("user"))) *counters; struct ipt_entry entries[0]; }; struct ipt_standard { struct ipt_entry entry; struct xt_standard_target target; }; struct ipt_getinfo { char name[32]; unsigned int valid_hooks; unsigned int hook_entry[5]; unsigned int underflow[5]; unsigned int num_entries; unsigned int size; }; struct ipt_get_entries { char name[32]; unsigned int size; struct ipt_entry entrytable[0]; }; struct xt_get_revision { char name[29]; __u8 revision; }; struct iptable_nat_pernet { struct nf_hook_ops *nf_nat_ops; }; enum ipt_reject_with { IPT_ICMP_NET_UNREACHABLE = 0, IPT_ICMP_HOST_UNREACHABLE = 1, IPT_ICMP_PROT_UNREACHABLE = 2, IPT_ICMP_PORT_UNREACHABLE = 3, IPT_ICMP_ECHOREPLY = 4, IPT_ICMP_NET_PROHIBITED = 5, IPT_ICMP_HOST_PROHIBITED = 6, IPT_TCP_RESET = 7, IPT_ICMP_ADMIN_PROHIBITED = 8, }; struct ipt_reject_info { enum ipt_reject_with with; }; struct arpt_devaddr_info { char addr[16]; char mask[16]; }; struct arpt_arp { struct in_addr src; struct in_addr tgt; struct in_addr smsk; struct in_addr tmsk; __u8 arhln; __u8 arhln_mask; struct arpt_devaddr_info src_devaddr; struct arpt_devaddr_info tgt_devaddr; __be16 arpop; __be16 arpop_mask; __be16 arhrd; __be16 arhrd_mask; __be16 arpro; __be16 arpro_mask; char iniface[16]; char outiface[16]; unsigned char iniface_mask[16]; unsigned char outiface_mask[16]; __u8 flags; __u16 invflags; }; struct arpt_entry { struct arpt_arp arp; __u16 target_offset; __u16 next_offset; unsigned int comefrom; struct xt_counters counters; unsigned char elems[0]; }; struct arpt_error { struct arpt_entry entry; struct xt_error_target target; }; struct arpt_replace { char name[32]; unsigned int valid_hooks; unsigned int num_entries; unsigned int size; unsigned int hook_entry[3]; unsigned int underflow[3]; unsigned int num_counters; struct xt_counters __attribute__((btf_type_tag("user"))) *counters; struct arpt_entry entries[0]; }; struct arpt_standard { struct arpt_entry entry; struct xt_standard_target target; }; struct arpt_getinfo { char name[32]; unsigned int valid_hooks; unsigned int hook_entry[3]; unsigned int underflow[3]; unsigned int num_entries; unsigned int size; }; struct arpt_get_entries { char name[32]; unsigned int size; struct arpt_entry entrytable[0]; }; struct arpt_mangle { char src_devaddr[16]; char tgt_devaddr[16]; union { struct in_addr src_ip; } u_s; union { struct in_addr tgt_ip; } u_t; __u8 flags; int target; }; struct inet_diag_req_v2; struct inet_diag_msg; struct inet_diag_handler { void (*dump)(struct sk_buff *, struct netlink_callback *, const struct inet_diag_req_v2 *); int (*dump_one)(struct netlink_callback *, const struct inet_diag_req_v2 *); void (*idiag_get_info)(struct sock *, struct inet_diag_msg *, void *); int (*idiag_get_aux)(struct sock *, bool, struct sk_buff *); size_t (*idiag_get_aux_size)(struct sock *, bool); int (*destroy)(struct sk_buff *, const struct inet_diag_req_v2 *); __u16 idiag_type; __u16 idiag_info_size; }; struct inet_diag_sockid { __be16 idiag_sport; __be16 idiag_dport; __be32 idiag_src[4]; __be32 idiag_dst[4]; __u32 idiag_if; __u32 idiag_cookie[2]; }; struct inet_diag_req_v2 { __u8 sdiag_family; __u8 sdiag_protocol; __u8 idiag_ext; __u8 pad; __u32 idiag_states; struct inet_diag_sockid id; }; struct inet_diag_msg { __u8 idiag_family; __u8 idiag_state; __u8 idiag_timer; __u8 idiag_retrans; struct inet_diag_sockid id; __u32 idiag_expires; __u32 idiag_rqueue; __u32 idiag_wqueue; __u32 idiag_uid; __u32 idiag_inode; }; enum { INET_DIAG_NONE = 0, INET_DIAG_MEMINFO = 1, INET_DIAG_INFO = 2, INET_DIAG_VEGASINFO = 3, INET_DIAG_CONG = 4, INET_DIAG_TOS = 5, INET_DIAG_TCLASS = 6, INET_DIAG_SKMEMINFO = 7, INET_DIAG_SHUTDOWN = 8, INET_DIAG_DCTCPINFO = 9, INET_DIAG_PROTOCOL = 10, INET_DIAG_SKV6ONLY = 11, INET_DIAG_LOCALS = 12, INET_DIAG_PEERS = 13, INET_DIAG_PAD = 14, INET_DIAG_MARK = 15, INET_DIAG_BBRINFO = 16, INET_DIAG_CLASS_ID = 17, INET_DIAG_MD5SIG = 18, INET_DIAG_ULP_INFO = 19, INET_DIAG_SK_BPF_STORAGES = 20, INET_DIAG_CGROUP_ID = 21, INET_DIAG_SOCKOPT = 22, __INET_DIAG_MAX = 23, }; enum { INET_DIAG_REQ_NONE = 0, INET_DIAG_REQ_BYTECODE = 1, INET_DIAG_REQ_SK_BPF_STORAGES = 2, INET_DIAG_REQ_PROTOCOL = 3, __INET_DIAG_REQ_MAX = 4, }; enum { INET_DIAG_BC_NOP = 0, INET_DIAG_BC_JMP = 1, INET_DIAG_BC_S_GE = 2, INET_DIAG_BC_S_LE = 3, INET_DIAG_BC_D_GE = 4, INET_DIAG_BC_D_LE = 5, INET_DIAG_BC_AUTO = 6, INET_DIAG_BC_S_COND = 7, INET_DIAG_BC_D_COND = 8, INET_DIAG_BC_DEV_COND = 9, INET_DIAG_BC_MARK_COND = 10, INET_DIAG_BC_S_EQ = 11, INET_DIAG_BC_D_EQ = 12, INET_DIAG_BC_CGROUP_COND = 13, }; struct inet_diag_hostcond { __u8 family; __u8 prefix_len; int port; __be32 addr[0]; }; struct inet_diag_markcond { __u32 mark; __u32 mask; }; struct inet_diag_dump_data { struct nlattr *req_nlas[4]; struct bpf_sk_storage_diag *bpf_stg_diag; }; struct inet_diag_entry { const __be32 *saddr; const __be32 *daddr; u16 sport; u16 dport; u16 family; u16 userlocks; u32 ifindex; u32 mark; u64 cgroup_id; }; struct inet_diag_bc_op { unsigned char code; unsigned char yes; unsigned short no; }; struct inet_diag_req { __u8 idiag_family; __u8 idiag_src_len; __u8 idiag_dst_len; __u8 idiag_ext; struct inet_diag_sockid id; __u32 idiag_states; __u32 idiag_dbs; }; struct inet_diag_sockopt { __u8 recverr: 1; __u8 is_icsk: 1; __u8 freebind: 1; __u8 hdrincl: 1; __u8 mc_loop: 1; __u8 transparent: 1; __u8 mc_all: 1; __u8 nodefrag: 1; __u8 bind_address_no_port: 1; __u8 recverr_rfc4884: 1; __u8 defer_connect: 1; __u8 unused: 5; }; struct inet_diag_meminfo { __u32 idiag_rmem; __u32 idiag_wmem; __u32 idiag_fmem; __u32 idiag_tmem; }; enum { INET_ULP_INFO_UNSPEC = 0, INET_ULP_INFO_NAME = 1, INET_ULP_INFO_TLS = 2, INET_ULP_INFO_MPTCP = 3, __INET_ULP_INFO_MAX = 4, }; struct bictcp { u32 cnt; u32 last_max_cwnd; u32 last_cwnd; u32 last_time; u32 bic_origin_point; u32 bic_K; u32 delay_min; u32 epoch_start; u32 ack_cnt; u32 tcp_cwnd; u16 unused; u8 sample_cnt; u8 found; u32 round_start; u32 end_seq; u32 last_ack; u32 curr_rtt; }; enum { TCP_BPF_IPV4 = 0, TCP_BPF_IPV6 = 1, TCP_BPF_NUM_PROTS = 2, }; enum { TCP_BPF_BASE = 0, TCP_BPF_TX = 1, TCP_BPF_RX = 2, TCP_BPF_TXRX = 3, TCP_BPF_NUM_CFGS = 4, }; struct tx_work { struct delayed_work work; struct sock *sk; }; struct tls_rec; struct tls_sw_context_tx { struct crypto_aead *aead_send; struct crypto_wait async_wait; struct tx_work tx_work; struct tls_rec *open_rec; struct list_head tx_list; atomic_t encrypt_pending; u8 async_capable: 1; unsigned long tx_bitmask; u64 android_kabi_reserved1; }; enum { UDP_BPF_IPV4 = 0, UDP_BPF_IPV6 = 1, UDP_BPF_NUM_PROTS = 2, }; struct xfrm_policy_afinfo { struct dst_ops *dst_ops; struct dst_entry * (*dst_lookup)(struct net *, int, int, const xfrm_address_t *, const xfrm_address_t *, u32); int (*get_saddr)(struct net *, int, xfrm_address_t *, xfrm_address_t *, u32); int (*fill_dst)(struct xfrm_dst *, struct net_device *, const struct flowi *); struct dst_entry * (*blackhole_route)(struct net *, struct dst_entry *); }; struct xfrm_state_afinfo { u8 family; u8 proto; const struct xfrm_type_offload *type_offload_esp; const struct xfrm_type *type_esp; const struct xfrm_type *type_ipip; const struct xfrm_type *type_ipip6; const struct xfrm_type *type_comp; const struct xfrm_type *type_ah; const struct xfrm_type *type_routing; const struct xfrm_type *type_dstopts; int (*output)(struct net *, struct sock *, struct sk_buff *); int (*transport_finish)(struct sk_buff *, int); void (*local_error)(struct sk_buff *, u32); }; struct xfrm_input_afinfo { u8 family; bool is_ipip; int (*callback)(struct sk_buff *, u8, int); }; typedef u64 (*btf_bpf_tcp_send_ack)(struct tcp_sock *, u32); struct xfrm_if_decode_session_result; struct xfrm_if_cb { bool (*decode_session)(struct sk_buff *, unsigned short, struct xfrm_if_decode_session_result *); }; struct xfrm_if_decode_session_result { struct net *net; u32 if_id; }; enum { XFRM_POLICY_TYPE_MAIN = 0, XFRM_POLICY_TYPE_SUB = 1, XFRM_POLICY_TYPE_MAX = 2, XFRM_POLICY_TYPE_ANY = 255, }; enum xfrm_pol_inexact_candidate_type { XFRM_POL_CAND_BOTH = 0, XFRM_POL_CAND_SADDR = 1, XFRM_POL_CAND_DADDR = 2, XFRM_POL_CAND_ANY = 3, XFRM_POL_CAND_MAX = 4, }; enum { XFRM_STATE_VOID = 0, XFRM_STATE_ACQ = 1, XFRM_STATE_VALID = 2, XFRM_STATE_ERROR = 3, XFRM_STATE_EXPIRED = 4, XFRM_STATE_DEAD = 5, }; struct xfrm_pol_inexact_node { struct rb_node node; union { xfrm_address_t addr; struct callback_head rcu; }; u8 prefixlen; struct rb_root root; struct hlist_head hhead; }; struct xfrm_pol_inexact_key { possible_net_t net; u32 if_id; u16 family; u8 dir; u8 type; }; struct xfrm_pol_inexact_bin { struct xfrm_pol_inexact_key k; struct rhash_head head; struct hlist_head hhead; seqcount_spinlock_t count; struct rb_root root_d; struct rb_root root_s; struct list_head inexact_bins; struct callback_head rcu; }; struct ip6_mh { __u8 ip6mh_proto; __u8 ip6mh_hdrlen; __u8 ip6mh_type; __u8 ip6mh_reserved; __u16 ip6mh_cksum; __u8 data[0]; }; struct xfrm_flo { struct dst_entry *dst_orig; u8 flags; }; struct xfrm_pol_inexact_candidates { struct hlist_head *res[4]; }; struct xfrm_migrate { xfrm_address_t old_daddr; xfrm_address_t old_saddr; xfrm_address_t new_daddr; xfrm_address_t new_saddr; u8 proto; u8 mode; u16 reserved; u32 reqid; u16 old_family; u16 new_family; }; struct xfrm_kmaddress { xfrm_address_t local; xfrm_address_t remote; u32 reserved; u16 family; }; struct xfrmk_spdinfo { u32 incnt; u32 outcnt; u32 fwdcnt; u32 inscnt; u32 outscnt; u32 fwdscnt; u32 spdhcnt; u32 spdhmcnt; }; struct xfrm_policy_walk { struct xfrm_policy_walk_entry walk; u8 type; u32 seq; }; enum { XFRM_MSG_BASE = 16, XFRM_MSG_NEWSA = 16, XFRM_MSG_DELSA = 17, XFRM_MSG_GETSA = 18, XFRM_MSG_NEWPOLICY = 19, XFRM_MSG_DELPOLICY = 20, XFRM_MSG_GETPOLICY = 21, XFRM_MSG_ALLOCSPI = 22, XFRM_MSG_ACQUIRE = 23, XFRM_MSG_EXPIRE = 24, XFRM_MSG_UPDPOLICY = 25, XFRM_MSG_UPDSA = 26, XFRM_MSG_POLEXPIRE = 27, XFRM_MSG_FLUSHSA = 28, XFRM_MSG_FLUSHPOLICY = 29, XFRM_MSG_NEWAE = 30, XFRM_MSG_GETAE = 31, XFRM_MSG_REPORT = 32, XFRM_MSG_MIGRATE = 33, XFRM_MSG_NEWSADINFO = 34, XFRM_MSG_GETSADINFO = 35, XFRM_MSG_NEWSPDINFO = 36, XFRM_MSG_GETSPDINFO = 37, XFRM_MSG_MAPPING = 38, XFRM_MSG_SETDEFAULT = 39, XFRM_MSG_GETDEFAULT = 40, __XFRM_MSG_MAX = 41, }; enum { XFRM_MODE_FLAG_TUNNEL = 1, }; enum xfrm_attr_type_t { XFRMA_UNSPEC = 0, XFRMA_ALG_AUTH = 1, XFRMA_ALG_CRYPT = 2, XFRMA_ALG_COMP = 3, XFRMA_ENCAP = 4, XFRMA_TMPL = 5, XFRMA_SA = 6, XFRMA_POLICY = 7, XFRMA_SEC_CTX = 8, XFRMA_LTIME_VAL = 9, XFRMA_REPLAY_VAL = 10, XFRMA_REPLAY_THRESH = 11, XFRMA_ETIMER_THRESH = 12, XFRMA_SRCADDR = 13, XFRMA_COADDR = 14, XFRMA_LASTUSED = 15, XFRMA_POLICY_TYPE = 16, XFRMA_MIGRATE = 17, XFRMA_ALG_AEAD = 18, XFRMA_KMADDRESS = 19, XFRMA_ALG_AUTH_TRUNC = 20, XFRMA_MARK = 21, XFRMA_TFCPAD = 22, XFRMA_REPLAY_ESN_VAL = 23, XFRMA_SA_EXTRA_FLAGS = 24, XFRMA_PROTO = 25, XFRMA_ADDRESS_FILTER = 26, XFRMA_PAD = 27, XFRMA_OFFLOAD_DEV = 28, XFRMA_SET_MARK = 29, XFRMA_SET_MARK_MASK = 30, XFRMA_IF_ID = 31, XFRMA_MTIMER_THRESH = 32, __XFRMA_MAX = 33, }; enum xfrm_ae_ftype_t { XFRM_AE_UNSPEC = 0, XFRM_AE_RTHR = 1, XFRM_AE_RVAL = 2, XFRM_AE_LVAL = 4, XFRM_AE_ETHR = 8, XFRM_AE_CR = 16, XFRM_AE_CE = 32, XFRM_AE_CU = 64, __XFRM_AE_MAX = 65, }; enum xfrm_nlgroups { XFRMNLGRP_NONE = 0, XFRMNLGRP_ACQUIRE = 1, XFRMNLGRP_EXPIRE = 2, XFRMNLGRP_SA = 3, XFRMNLGRP_POLICY = 4, XFRMNLGRP_AEVENTS = 5, XFRMNLGRP_REPORT = 6, XFRMNLGRP_MIGRATE = 7, XFRMNLGRP_MAPPING = 8, __XFRMNLGRP_MAX = 9, }; struct km_event; struct xfrm_mgr { struct list_head list; int (*notify)(struct xfrm_state *, const struct km_event *); int (*acquire)(struct xfrm_state *, struct xfrm_tmpl *, struct xfrm_policy *); struct xfrm_policy * (*compile_policy)(struct sock *, int, u8 *, int, int *); int (*new_mapping)(struct xfrm_state *, xfrm_address_t *, __be16); int (*notify_policy)(struct xfrm_policy *, int, const struct km_event *); int (*report)(struct net *, u8, struct xfrm_selector *, xfrm_address_t *); int (*migrate)(const struct xfrm_selector *, u8, u8, const struct xfrm_migrate *, int, const struct xfrm_kmaddress *, const struct xfrm_encap_tmpl *); bool (*is_alive)(const struct km_event *); }; struct km_event { union { u32 hard; u32 proto; u32 byid; u32 aevent; u32 type; } data; u32 seq; u32 portid; u32 event; struct net *net; }; struct xfrm_user_sec_ctx { __u16 len; __u16 exttype; __u8 ctx_alg; __u8 ctx_doi; __u16 ctx_len; }; struct xfrmk_sadinfo { u32 sadhcnt; u32 sadhmcnt; u32 sadcnt; }; struct xfrm_trans_tasklet { struct work_struct work; spinlock_t queue_lock; struct sk_buff_head queue; }; struct __ip6_tnl_parm { char name[16]; int link; __u8 proto; __u8 encap_limit; __u8 hop_limit; bool collect_md; __be32 flowinfo; __u32 flags; struct in6_addr laddr; struct in6_addr raddr; __be16 i_flags; __be16 o_flags; __be32 i_key; __be32 o_key; __u32 fwmark; __u32 index; __u8 erspan_ver; __u8 dir; __u16 hwid; }; struct ip6_tnl { struct ip6_tnl __attribute__((btf_type_tag("rcu"))) *next; struct net_device *dev; netdevice_tracker dev_tracker; struct net *net; struct __ip6_tnl_parm parms; struct flowi fl; struct dst_cache dst_cache; struct gro_cells gro_cells; int err_count; unsigned long err_time; __u32 i_seqno; atomic_t o_seqno; int hlen; int tun_hlen; int encap_hlen; struct ip_tunnel_encap encap; int mlink; }; struct xfrm_trans_cb { union { struct inet_skb_parm h4; struct inet6_skb_parm h6; } header; int (*finish)(struct net *, struct sock *, struct sk_buff *); struct net *net; }; struct ip_beet_phdr { __u8 nexthdr; __u8 hdrlen; __u8 padlen; __u8 reserved; }; struct xfrm_algo_list { struct xfrm_algo_desc *algs; int entries; u32 type; u32 mask; }; struct xfrm_aead_name { const char *name; int icvbits; }; struct xfrm_link { int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **, struct netlink_ext_ack *); int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); const struct nla_policy *nla_pol; int nla_max; }; enum { XFRM_DEV_OFFLOAD_IN = 1, XFRM_DEV_OFFLOAD_OUT = 2, XFRM_DEV_OFFLOAD_FWD = 3, }; enum { XFRM_SHARE_ANY = 0, XFRM_SHARE_SESSION = 1, XFRM_SHARE_USER = 2, XFRM_SHARE_UNIQUE = 3, }; enum xfrm_sadattr_type_t { XFRMA_SAD_UNSPEC = 0, XFRMA_SAD_CNT = 1, XFRMA_SAD_HINFO = 2, __XFRMA_SAD_MAX = 3, }; enum xfrm_spdattr_type_t { XFRMA_SPD_UNSPEC = 0, XFRMA_SPD_INFO = 1, XFRMA_SPD_HINFO = 2, XFRMA_SPD_IPV4_HTHRESH = 3, XFRMA_SPD_IPV6_HTHRESH = 4, __XFRMA_SPD_MAX = 5, }; struct xfrm_userpolicy_info { struct xfrm_selector sel; struct xfrm_lifetime_cfg lft; struct xfrm_lifetime_cur curlft; __u32 priority; __u32 index; __u8 dir; __u8 action; __u8 flags; __u8 share; }; struct xfrm_user_tmpl { struct xfrm_id id; __u16 family; xfrm_address_t saddr; __u32 reqid; __u8 mode; __u8 share; __u8 optional; __u32 aalgos; __u32 ealgos; __u32 calgos; }; struct xfrm_usersa_info { struct xfrm_selector sel; struct xfrm_id id; xfrm_address_t saddr; struct xfrm_lifetime_cfg lft; struct xfrm_lifetime_cur curlft; struct xfrm_stats stats; __u32 seq; __u32 reqid; __u16 family; __u8 mode; __u8 replay_window; __u8 flags; }; struct xfrm_usersa_id { xfrm_address_t daddr; __be32 spi; __u16 family; __u8 proto; }; struct xfrm_usersa_flush { __u8 proto; }; struct xfrm_user_expire { struct xfrm_usersa_info state; __u8 hard; }; struct xfrm_translator { int (*alloc_compat)(struct sk_buff *, const struct nlmsghdr *); struct nlmsghdr * (*rcv_msg_compat)(const struct nlmsghdr *, int, const struct nla_policy *, struct netlink_ext_ack *); int (*xlate_user_policy_sockptr)(u8 **, int); struct module *owner; }; struct xfrm_user_acquire { struct xfrm_id id; xfrm_address_t saddr; struct xfrm_selector sel; struct xfrm_userpolicy_info policy; __u32 aalgos; __u32 ealgos; __u32 calgos; __u32 seq; }; struct xfrm_user_offload { int ifindex; __u8 flags; }; struct xfrm_user_mapping { struct xfrm_usersa_id id; __u32 reqid; xfrm_address_t old_saddr; xfrm_address_t new_saddr; __be16 old_sport; __be16 new_sport; }; struct xfrm_userpolicy_id { struct xfrm_selector sel; __u32 index; __u8 dir; }; struct xfrm_user_polexpire { struct xfrm_userpolicy_info pol; __u8 hard; }; struct xfrm_user_report { __u8 proto; struct xfrm_selector sel; }; struct xfrm_user_kmaddress { xfrm_address_t local; xfrm_address_t remote; __u32 reserved; __u16 family; }; struct xfrm_user_migrate { xfrm_address_t old_daddr; xfrm_address_t old_saddr; xfrm_address_t new_daddr; xfrm_address_t new_saddr; __u8 proto; __u8 mode; __u16 reserved; __u32 reqid; __u16 old_family; __u16 new_family; }; struct xfrm_userpolicy_type { __u8 type; __u16 reserved1; __u8 reserved2; }; struct xfrm_dump_info { struct sk_buff *in_skb; struct sk_buff *out_skb; u32 nlmsg_seq; u16 nlmsg_flags; }; struct xfrmu_sadhinfo { __u32 sadhcnt; __u32 sadhmcnt; }; struct xfrmu_spdinfo { __u32 incnt; __u32 outcnt; __u32 fwdcnt; __u32 inscnt; __u32 outscnt; __u32 fwdscnt; }; struct xfrmu_spdhinfo { __u32 spdhcnt; __u32 spdhmcnt; }; struct xfrmu_spdhthresh { __u8 lbits; __u8 rbits; }; struct xfrm_userpolicy_default { __u8 in; __u8 fwd; __u8 out; }; struct xfrm_aevent_id { struct xfrm_usersa_id sa_id; xfrm_address_t saddr; __u32 flags; __u32 reqid; }; struct xfrm_userspi_info { struct xfrm_usersa_info info; __u32 min; __u32 max; }; struct ipcomp_tfms { struct list_head list; struct crypto_comp * __attribute__((btf_type_tag("percpu"))) *tfms; int users; }; struct ipcomp_data { u16 threshold; struct crypto_comp * __attribute__((btf_type_tag("percpu"))) *tfms; }; struct xfrm6_tunnel { int (*handler)(struct sk_buff *); int (*cb_handler)(struct sk_buff *, int); int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); struct xfrm6_tunnel __attribute__((btf_type_tag("rcu"))) *next; int priority; }; struct xfrm6_protocol { int (*handler)(struct sk_buff *); int (*input_handler)(struct sk_buff *, int, __be32, int); int (*cb_handler)(struct sk_buff *, int); int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); struct xfrm6_protocol __attribute__((btf_type_tag("rcu"))) *next; int priority; }; enum { LWT_XFRM_UNSPEC = 0, LWT_XFRM_IF_ID = 1, LWT_XFRM_LINK = 2, __LWT_XFRM_MAX = 3, }; enum { IFLA_XFRM_UNSPEC = 0, IFLA_XFRM_LINK = 1, IFLA_XFRM_IF_ID = 2, IFLA_XFRM_COLLECT_METADATA = 3, __IFLA_XFRM_MAX = 4, }; struct xfrm_if_parms { int link; u32 if_id; bool collect_md; }; struct xfrm_if { struct xfrm_if __attribute__((btf_type_tag("rcu"))) *next; struct net_device *dev; struct net *net; struct xfrm_if_parms p; struct gro_cells gro_cells; }; struct xfrmi_net { struct xfrm_if __attribute__((btf_type_tag("rcu"))) *xfrmi[256]; struct xfrm_if __attribute__((btf_type_tag("rcu"))) *collect_md_xfrmi; }; struct bpf_xfrm_info { u32 if_id; int link; }; enum unix_socket_lock_class { U_LOCK_NORMAL = 0, U_LOCK_SECOND = 1, U_LOCK_DIAG = 2, U_LOCK_GC_LISTENER = 3, }; struct unix_skb_parms { struct pid *pid; kuid_t uid; kgid_t gid; struct scm_fp_list *fp; u32 secid; u32 consumed; }; struct bpf_iter__unix { union { struct bpf_iter_meta *meta; }; union { struct unix_sock *unix_sk; }; uid_t uid; }; struct bpf_unix_iter_state { struct seq_net_private p; unsigned int cur_sk; unsigned int end_sk; unsigned int max_sk; struct sock **batch; bool st_bucket_done; }; struct unix_stream_read_state { int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); struct socket *socket; struct msghdr *msg; struct pipe_inode_info *pipe; size_t size; int flags; unsigned int splice_flags; }; struct ipv6_params { __s32 disable_ipv6; __s32 autoconf; }; struct ioam6_pernet_data { struct mutex lock; struct rhashtable namespaces; struct rhashtable schemas; }; struct ipv6_stub { int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *); int (*ipv6_route_input)(struct sk_buff *); struct fib6_table * (*fib6_get_table)(struct net *, u32); int (*fib6_lookup)(struct net *, int, struct flowi6 *, struct fib6_result *, int); int (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, struct fib6_result *, int); void (*fib6_select_path)(const struct net *, struct fib6_result *, struct flowi6 *, int, bool, const struct sk_buff *, int); u32 (*ip6_mtu_from_fib6)(const struct fib6_result *, const struct in6_addr *, const struct in6_addr *); int (*fib6_nh_init)(struct net *, struct fib6_nh *, struct fib6_config *, gfp_t, struct netlink_ext_ack *); void (*fib6_nh_release)(struct fib6_nh *); void (*fib6_nh_release_dsts)(struct fib6_nh *); void (*fib6_update_sernum)(struct net *, struct fib6_info *); int (*ip6_del_rt)(struct net *, struct fib6_info *, bool); void (*fib6_rt_update)(struct net *, struct fib6_info *, struct nl_info *); void (*udpv6_encap_enable)(); void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool); void (*xfrm6_local_rxpmtu)(struct sk_buff *, u32); int (*xfrm6_udp_encap_rcv)(struct sock *, struct sk_buff *); int (*xfrm6_rcv_encap)(struct sk_buff *, int, __be32, int); struct neigh_table *nd_tbl; int (*ipv6_fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); struct net_device * (*ipv6_dev_find)(struct net *, const struct in6_addr *, struct net_device *); }; enum flowlabel_reflect { FLOWLABEL_REFLECT_ESTABLISHED = 1, FLOWLABEL_REFLECT_TCP_RESET = 2, FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, }; struct in6_rtmsg { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; __u32 rtmsg_type; __u16 rtmsg_dst_len; __u16 rtmsg_src_len; __u32 rtmsg_metric; unsigned long rtmsg_info; __u32 rtmsg_flags; int rtmsg_ifindex; }; struct compat_in6_rtmsg { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; u32 rtmsg_type; u16 rtmsg_dst_len; u16 rtmsg_src_len; u32 rtmsg_metric; u32 rtmsg_info; u32 rtmsg_flags; s32 rtmsg_ifindex; }; struct ac6_iter_state { struct seq_net_private p; struct net_device *dev; struct inet6_dev *idev; }; enum { ICMP6_MIB_NUM = 0, ICMP6_MIB_INMSGS = 1, ICMP6_MIB_INERRORS = 2, ICMP6_MIB_OUTMSGS = 3, ICMP6_MIB_OUTERRORS = 4, ICMP6_MIB_CSUMERRORS = 5, ICMP6_MIB_RATELIMITHOST = 6, __ICMP6_MIB_MAX = 7, }; struct ip6_frag_state { u8 *prevhdr; unsigned int hlen; unsigned int mtu; unsigned int left; int offset; int ptr; int hroom; int troom; __be32 frag_id; u8 nexthdr; }; struct ip6_fraglist_iter { struct ipv6hdr *tmp_hdr; struct sk_buff *frag; int offset; unsigned int hlen; __be32 frag_id; u8 nexthdr; }; struct hop_jumbo_hdr { u8 nexthdr; u8 hdrlen; u8 tlv_type; u8 tlv_len; __be32 jumbo_payload_len; }; struct ip6_ra_chain { struct ip6_ra_chain *next; struct sock *sk; int sel; void (*destructor)(struct sock *); }; struct ipcm6_cookie { struct sockcm_cookie sockc; __s16 hlimit; __s16 tclass; __u16 gso_size; __s8 dontfrag; struct ipv6_txoptions *opt; }; struct inet6_protocol { int (*handler)(struct sk_buff *); int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); unsigned int flags; }; enum { INET6_IFADDR_STATE_PREDAD = 0, INET6_IFADDR_STATE_DAD = 1, INET6_IFADDR_STATE_POSTDAD = 2, INET6_IFADDR_STATE_ERRDAD = 3, INET6_IFADDR_STATE_DEAD = 4, }; enum { IPV6_SADDR_RULE_INIT = 0, IPV6_SADDR_RULE_LOCAL = 1, IPV6_SADDR_RULE_SCOPE = 2, IPV6_SADDR_RULE_PREFERRED = 3, IPV6_SADDR_RULE_HOA = 4, IPV6_SADDR_RULE_OIF = 5, IPV6_SADDR_RULE_LABEL = 6, IPV6_SADDR_RULE_PRIVACY = 7, IPV6_SADDR_RULE_ORCHID = 8, IPV6_SADDR_RULE_PREFIX = 9, IPV6_SADDR_RULE_NOT_OPTIMISTIC = 10, IPV6_SADDR_RULE_MAX = 11, }; enum { DAD_PROCESS = 0, DAD_BEGIN = 1, DAD_ABORT = 2, }; enum cleanup_prefix_rt_t { CLEANUP_PREFIX_RT_NOP = 0, CLEANUP_PREFIX_RT_DEL = 1, CLEANUP_PREFIX_RT_EXPIRE = 2, }; enum { DEVCONF_FORWARDING = 0, DEVCONF_HOPLIMIT = 1, DEVCONF_MTU6 = 2, DEVCONF_ACCEPT_RA = 3, DEVCONF_ACCEPT_REDIRECTS = 4, DEVCONF_AUTOCONF = 5, DEVCONF_DAD_TRANSMITS = 6, DEVCONF_RTR_SOLICITS = 7, DEVCONF_RTR_SOLICIT_INTERVAL = 8, DEVCONF_RTR_SOLICIT_DELAY = 9, DEVCONF_USE_TEMPADDR = 10, DEVCONF_TEMP_VALID_LFT = 11, DEVCONF_TEMP_PREFERED_LFT = 12, DEVCONF_REGEN_MAX_RETRY = 13, DEVCONF_MAX_DESYNC_FACTOR = 14, DEVCONF_MAX_ADDRESSES = 15, DEVCONF_FORCE_MLD_VERSION = 16, DEVCONF_ACCEPT_RA_DEFRTR = 17, DEVCONF_ACCEPT_RA_PINFO = 18, DEVCONF_ACCEPT_RA_RTR_PREF = 19, DEVCONF_RTR_PROBE_INTERVAL = 20, DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21, DEVCONF_PROXY_NDP = 22, DEVCONF_OPTIMISTIC_DAD = 23, DEVCONF_ACCEPT_SOURCE_ROUTE = 24, DEVCONF_MC_FORWARDING = 25, DEVCONF_DISABLE_IPV6 = 26, DEVCONF_ACCEPT_DAD = 27, DEVCONF_FORCE_TLLAO = 28, DEVCONF_NDISC_NOTIFY = 29, DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30, DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31, DEVCONF_SUPPRESS_FRAG_NDISC = 32, DEVCONF_ACCEPT_RA_FROM_LOCAL = 33, DEVCONF_USE_OPTIMISTIC = 34, DEVCONF_ACCEPT_RA_MTU = 35, DEVCONF_STABLE_SECRET = 36, DEVCONF_USE_OIF_ADDRS_ONLY = 37, DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38, DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39, DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40, DEVCONF_DROP_UNSOLICITED_NA = 41, DEVCONF_KEEP_ADDR_ON_DOWN = 42, DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43, DEVCONF_SEG6_ENABLED = 44, DEVCONF_SEG6_REQUIRE_HMAC = 45, DEVCONF_ENHANCED_DAD = 46, DEVCONF_ADDR_GEN_MODE = 47, DEVCONF_DISABLE_POLICY = 48, DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49, DEVCONF_NDISC_TCLASS = 50, DEVCONF_RPL_SEG_ENABLED = 51, DEVCONF_RA_DEFRTR_METRIC = 52, DEVCONF_IOAM6_ENABLED = 53, DEVCONF_IOAM6_ID = 54, DEVCONF_IOAM6_ID_WIDE = 55, DEVCONF_NDISC_EVICT_NOCARRIER = 56, DEVCONF_ACCEPT_UNTRACKED_NA = 57, DEVCONF_ACCEPT_RA_MIN_LFT = 58, DEVCONF_MAX = 59, }; enum { IFLA_INET6_UNSPEC = 0, IFLA_INET6_FLAGS = 1, IFLA_INET6_CONF = 2, IFLA_INET6_STATS = 3, IFLA_INET6_MCAST = 4, IFLA_INET6_CACHEINFO = 5, IFLA_INET6_ICMP6STATS = 6, IFLA_INET6_TOKEN = 7, IFLA_INET6_ADDR_GEN_MODE = 8, IFLA_INET6_RA_MTU = 9, __IFLA_INET6_MAX = 10, }; enum { PREFIX_UNSPEC = 0, PREFIX_ADDRESS = 1, PREFIX_CACHEINFO = 2, __PREFIX_MAX = 3, }; enum addr_type_t { UNICAST_ADDR = 0, MULTICAST_ADDR = 1, ANYCAST_ADDR = 2, }; union fwnet_hwaddr { u8 u[16]; struct { __be64 uniq_id; u8 max_rec; u8 sspd; u8 fifo[6]; } uc; }; struct prefix_cacheinfo { __u32 preferred_time; __u32 valid_time; }; struct prefixmsg { unsigned char prefix_family; unsigned char prefix_pad1; unsigned short prefix_pad2; int prefix_ifindex; unsigned char prefix_type; unsigned char prefix_len; unsigned char prefix_flags; unsigned char prefix_pad3; }; struct in6_ifreq { struct in6_addr ifr6_addr; __u32 ifr6_prefixlen; int ifr6_ifindex; }; struct if6_iter_state { struct seq_net_private p; int bucket; int offset; }; struct inet6_fill_args { u32 portid; u32 seq; int event; unsigned int flags; int netnsid; int ifindex; enum addr_type_t type; }; struct ipv6_saddr_score { int rule; int addr_type; struct inet6_ifaddr *ifa; unsigned long scorebits[1]; int scopedist; int matchlen; }; struct ipv6_saddr_dst { const struct in6_addr *addr; int ifindex; int scope; int label; unsigned int prefs; }; struct ifa6_config { const struct in6_addr *pfx; unsigned int plen; u8 ifa_proto; const struct in6_addr *peer_pfx; u32 rt_priority; u32 ifa_flags; u32 preferred_lft; u32 valid_lft; u16 scope; }; struct in6_validator_info { struct in6_addr i6vi_addr; struct inet6_dev *i6vi_dev; struct netlink_ext_ack *extack; }; struct ifla_cacheinfo { __u32 max_reasm_len; __u32 tstamp; __u32 reachable_time; __u32 retrans_time; }; struct ip6addrlbl_init_table { const struct in6_addr *prefix; int prefixlen; u32 label; }; enum { IFAL_ADDRESS = 1, IFAL_LABEL = 2, __IFAL_MAX = 3, }; struct ip6addrlbl_entry { struct in6_addr prefix; int prefixlen; int ifindex; int addrtype; u32 label; struct hlist_node list; struct callback_head rcu; }; struct ifaddrlblmsg { __u8 ifal_family; __u8 __ifal_reserved; __u8 ifal_prefixlen; __u8 ifal_flags; __u32 ifal_index; __u32 ifal_seq; }; typedef void (*btf_trace_fib6_table_lookup)(void *, const struct net *, const struct fib6_result *, struct fib6_table *, const struct flowi6 *); enum rt6_nud_state { RT6_NUD_FAIL_HARD = -3, RT6_NUD_FAIL_PROBE = -2, RT6_NUD_FAIL_DO_RR = -1, RT6_NUD_SUCCEED = 1, }; struct route_info { __u8 type; __u8 length; __u8 prefix_len; __u8 reserved_l: 3; __u8 route_pref: 2; __u8 reserved_h: 3; __be32 lifetime; __u8 prefix[0]; }; struct rt6_rtnl_dump_arg { struct sk_buff *skb; struct netlink_callback *cb; struct net *net; struct fib_dump_filter filter; }; struct trace_event_raw_fib6_table_lookup { struct trace_entry ent; u32 tb_id; int err; int oif; int iif; __u8 tos; __u8 scope; __u8 flags; __u8 src[16]; __u8 dst[16]; u16 sport; u16 dport; u8 proto; u8 rt_type; char name[16]; __u8 gw[16]; char __data[0]; }; struct rt6_exception { struct hlist_node hlist; struct rt6_info *rt6i; unsigned long stamp; struct callback_head rcu; }; struct __rt6_probe_work { struct work_struct work; struct in6_addr target; struct net_device *dev; netdevice_tracker dev_tracker; }; struct ip6rd_flowi { struct flowi6 fl6; struct in6_addr gateway; }; struct arg_dev_net_ip { struct net *net; struct in6_addr *addr; }; struct rt6_mtu_change_arg { struct net_device *dev; unsigned int mtu; struct fib6_info *f6i; }; struct rt6_nh { struct fib6_info *fib6_info; struct fib6_config r_cfg; struct list_head next; }; typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int); struct fib6_nh_dm_arg { struct net *net; const struct in6_addr *saddr; int oif; int flags; struct fib6_nh *nh; }; struct fib6_gc_args { int timeout; int more; }; struct fib6_nh_match_arg { const struct net_device *dev; const struct in6_addr *gw; struct fib6_nh *match; }; struct fib6_nh_del_cached_rt_arg { struct fib6_config *cfg; struct fib6_info *f6i; }; struct arg_netdev_event { const struct net_device *dev; union { unsigned char nh_flags; unsigned long event; }; }; struct trace_event_data_offsets_fib6_table_lookup {}; struct fib6_nh_age_excptn_arg { struct fib6_gc_args *gc_args; unsigned long now; }; struct netevent_redirect { struct dst_entry *old; struct dst_entry *new; struct neighbour *neigh; const void *daddr; }; struct fib6_nh_exception_dump_walker { struct rt6_rtnl_dump_arg *dump; struct fib6_info *rt; unsigned int flags; unsigned int skip; unsigned int count; }; struct fib6_nh_frl_arg { u32 flags; int oif; int strict; int *mpri; bool *do_rr; struct fib6_nh *nh; }; struct fib6_nh_rd_arg { struct fib6_result *res; struct flowi6 *fl6; const struct in6_addr *gw; struct rt6_info **ret; }; struct fib6_nh_excptn_arg { struct rt6_info *rt; int plen; }; enum fib6_walk_state { FWS_L = 0, FWS_R = 1, FWS_C = 2, FWS_U = 3, }; enum { FIB6_NO_SERNUM_CHANGE = 0, }; struct fib6_walker { struct list_head lh; struct fib6_node *root; struct fib6_node *node; struct fib6_info *leaf; enum fib6_walk_state state; unsigned int skip; unsigned int count; unsigned int skip_in_node; int (*func)(struct fib6_walker *); void *args; }; struct fib6_cleaner { struct fib6_walker w; struct net *net; int (*func)(struct fib6_info *, void *); int sernum; void *arg; bool skip_notify; }; struct fib6_dump_arg { struct net *net; struct notifier_block *nb; struct netlink_ext_ack *extack; }; struct fib6_entry_notifier_info { struct fib_notifier_info info; struct fib6_info *rt; unsigned int nsiblings; }; struct lookup_args { int offset; const struct in6_addr *addr; }; struct bpf_iter__ipv6_route { union { struct bpf_iter_meta *meta; }; union { struct fib6_info *rt; }; }; struct ipv6_route_iter { struct seq_net_private p; struct fib6_walker w; loff_t skip; struct fib6_table *tbl; int sernum; }; struct fib6_nh_pcpu_arg { struct fib6_info *from; const struct fib6_table *table; }; struct ipv6_sr_hdr { __u8 nexthdr; __u8 hdrlen; __u8 type; __u8 segments_left; __u8 first_segment; __u8 flags; __u16 tag; struct in6_addr segments[0]; }; struct in6_flowlabel_req { struct in6_addr flr_dst; __be32 flr_label; __u8 flr_action; __u8 flr_share; __u16 flr_flags; __u16 flr_expires; __u16 flr_linger; __u32 __flr_pad; }; struct ipv6_mreq { struct in6_addr ipv6mr_multiaddr; int ipv6mr_ifindex; }; struct ip6_mtuinfo { struct sockaddr_in6 ip6m_addr; __u32 ip6m_mtu; }; enum { NDUSEROPT_UNSPEC = 0, NDUSEROPT_SRCADDR = 1, __NDUSEROPT_MAX = 2, }; struct nd_msg { struct icmp6hdr icmph; struct in6_addr target; __u8 opt[0]; }; struct rs_msg { struct icmp6hdr icmph; __u8 opt[0]; }; struct ra_msg { struct icmp6hdr icmph; __be32 reachable_time; __be32 retrans_timer; }; struct nduseroptmsg { unsigned char nduseropt_family; unsigned char nduseropt_pad1; unsigned short nduseropt_opts_len; int nduseropt_ifindex; __u8 nduseropt_icmp_type; __u8 nduseropt_icmp_code; unsigned short nduseropt_pad2; unsigned int nduseropt_pad3; }; typedef u32 inet6_ehashfn_t(const struct net *, const struct in6_addr *, const u16, const struct in6_addr *, const __be16); typedef int mh_filter_t(struct sock *, struct sk_buff *); struct icmp6_filter { __u32 data[8]; }; struct raw6_sock { struct inet_sock inet; __u32 checksum; __u32 offset; struct icmp6_filter filter; __u32 ip6mr_table; struct ipv6_pinfo inet6; }; struct raw6_frag_vec { struct msghdr *msg; int hlen; char c[4]; }; struct icmp6_err { int err; int fatal; }; struct ipv6_destopt_hao { __u8 type; __u8 length; struct in6_addr addr; } __attribute__((packed)); struct icmpv6_msg { struct sk_buff *skb; int offset; uint8_t type; }; struct mld2_grec { __u8 grec_type; __u8 grec_auxwords; __be16 grec_nsrcs; struct in6_addr grec_mca; struct in6_addr grec_src[0]; }; struct mld2_report { struct icmp6hdr mld2r_hdr; struct mld2_grec mld2r_grec[0]; }; struct mld_msg { struct icmp6hdr mld_hdr; struct in6_addr mld_mca; }; struct mld2_query { struct icmp6hdr mld2q_hdr; struct in6_addr mld2q_mca; __u8 mld2q_qrv: 3; __u8 mld2q_suppress: 1; __u8 mld2q_resv2: 4; __u8 mld2q_qqic; __be16 mld2q_nsrcs; struct in6_addr mld2q_srcs[0]; }; struct igmp6_mc_iter_state { struct seq_net_private p; struct net_device *dev; struct inet6_dev *idev; }; struct igmp6_mcf_iter_state { struct seq_net_private p; struct net_device *dev; struct inet6_dev *idev; struct ifmcaddr6 *im; }; enum ip6_defrag_users { IP6_DEFRAG_LOCAL_DELIVER = 0, IP6_DEFRAG_CONNTRACK_IN = 1, __IP6_DEFRAG_CONNTRACK_IN = 65536, IP6_DEFRAG_CONNTRACK_OUT = 65537, __IP6_DEFRAG_CONNTRACK_OUT = 131072, IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073, __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608, }; struct frag_queue { struct inet_frag_queue q; int iif; __u16 nhoffset; u8 ecn; }; struct rt0_hdr { struct ipv6_rt_hdr rt_hdr; __u32 reserved; struct in6_addr addr[0]; }; struct ipv6_rpl_sr_hdr { __u8 nexthdr; __u8 hdrlen; __u8 type; __u8 segments_left; __u32 cmpre: 4; __u32 cmpri: 4; __u32 reserved: 4; __u32 pad: 4; __u32 reserved1: 16; union { struct { struct {} __empty_addr; struct in6_addr addr[0]; }; struct { struct {} __empty_data; __u8 data[0]; }; } segments; }; struct ioam6_hdr { __u8 opt_type; __u8 opt_len; char: 8; __u8 type; }; struct ioam6_trace_hdr { __be16 namespace_id; char: 2; __u8 overflow: 1; __u8 nodelen: 5; __u8 remlen: 7; union { __be32 type_be32; struct { __u32 bit7: 1; __u32 bit6: 1; __u32 bit5: 1; __u32 bit4: 1; __u32 bit3: 1; __u32 bit2: 1; __u32 bit1: 1; __u32 bit0: 1; __u32 bit15: 1; __u32 bit14: 1; __u32 bit13: 1; __u32 bit12: 1; __u32 bit11: 1; __u32 bit10: 1; __u32 bit9: 1; __u32 bit8: 1; __u32 bit23: 1; __u32 bit22: 1; __u32 bit21: 1; __u32 bit20: 1; __u32 bit19: 1; __u32 bit18: 1; __u32 bit17: 1; __u32 bit16: 1; } type; }; __u8 data[0]; }; struct ioam6_schema; struct ioam6_namespace { struct rhash_head head; struct callback_head rcu; struct ioam6_schema __attribute__((btf_type_tag("rcu"))) *schema; __be16 id; __be32 data; __be64 data_wide; }; struct ioam6_schema { struct rhash_head head; struct callback_head rcu; struct ioam6_namespace __attribute__((btf_type_tag("rcu"))) *ns; u32 id; int len; __be32 hdr; u8 data[0]; }; struct static_key_false_deferred { struct static_key_false key; unsigned long timeout; struct delayed_work work; }; struct ip6fl_iter_state { struct seq_net_private p; struct pid_namespace *pid_ns; int bucket; }; enum { SEG6_ATTR_UNSPEC = 0, SEG6_ATTR_DST = 1, SEG6_ATTR_DSTLEN = 2, SEG6_ATTR_HMACKEYID = 3, SEG6_ATTR_SECRET = 4, SEG6_ATTR_SECRETLEN = 5, SEG6_ATTR_ALGID = 6, SEG6_ATTR_HMACINFO = 7, __SEG6_ATTR_MAX = 8, }; enum { SEG6_CMD_UNSPEC = 0, SEG6_CMD_SETHMAC = 1, SEG6_CMD_DUMPHMAC = 2, SEG6_CMD_SET_TUNSRC = 3, SEG6_CMD_GET_TUNSRC = 4, __SEG6_CMD_MAX = 5, }; struct sr6_tlv { __u8 type; __u8 len; __u8 data[0]; }; enum { IOAM6_ATTR_UNSPEC = 0, IOAM6_ATTR_NS_ID = 1, IOAM6_ATTR_NS_DATA = 2, IOAM6_ATTR_NS_DATA_WIDE = 3, IOAM6_ATTR_SC_ID = 4, IOAM6_ATTR_SC_DATA = 5, IOAM6_ATTR_SC_NONE = 6, IOAM6_ATTR_PAD = 7, __IOAM6_ATTR_MAX = 8, }; enum { IOAM6_CMD_UNSPEC = 0, IOAM6_CMD_ADD_NAMESPACE = 1, IOAM6_CMD_DEL_NAMESPACE = 2, IOAM6_CMD_DUMP_NAMESPACES = 3, IOAM6_CMD_ADD_SCHEMA = 4, IOAM6_CMD_DEL_SCHEMA = 5, IOAM6_CMD_DUMP_SCHEMAS = 6, IOAM6_CMD_NS_SET_SCHEMA = 7, __IOAM6_CMD_MAX = 8, }; struct mfc6_cache_cmp_arg { struct in6_addr mf6c_mcastgrp; struct in6_addr mf6c_origin; }; enum { IP6MRA_CREPORT_UNSPEC = 0, IP6MRA_CREPORT_MSGTYPE = 1, IP6MRA_CREPORT_MIF_ID = 2, IP6MRA_CREPORT_SRC_ADDR = 3, IP6MRA_CREPORT_DST_ADDR = 4, IP6MRA_CREPORT_PKT = 5, __IP6MRA_CREPORT_MAX = 6, }; struct mfc6_cache { struct mr_mfc _c; union { struct { struct in6_addr mf6c_mcastgrp; struct in6_addr mf6c_origin; }; struct mfc6_cache_cmp_arg cmparg; }; }; struct mrt6msg { __u8 im6_mbz; __u8 im6_msgtype; __u16 im6_mif; __u32 im6_pad; struct in6_addr im6_src; struct in6_addr im6_dst; }; struct ip6mr_result { struct mr_table *mrt; }; typedef __u32 if_mask; struct if_set { if_mask ifs_bits[8]; }; struct mf6cctl { struct sockaddr_in6 mf6cc_origin; struct sockaddr_in6 mf6cc_mcastgrp; mifi_t mf6cc_parent; struct if_set mf6cc_ifset; }; struct mif6ctl { mifi_t mif6c_mifi; unsigned char mif6c_flags; unsigned char vifc_threshold; __u16 mif6c_pifi; unsigned int vifc_rate_limit; }; struct compat_sioc_sg_req6 { struct sockaddr_in6 src; struct sockaddr_in6 grp; compat_ulong_t pktcnt; compat_ulong_t bytecnt; compat_ulong_t wrong_if; }; struct compat_sioc_mif_req6 { mifi_t mifi; compat_ulong_t icount; compat_ulong_t ocount; compat_ulong_t ibytes; compat_ulong_t obytes; }; struct br_input_skb_cb { struct net_device *brdev; u16 frag_max_size; u8 igmp; u8 mrouters_only: 1; u8 proxyarp_replied: 1; u8 src_port_isolated: 1; u8 promisc: 1; u32 backup_nhid; }; struct nf_bridge_frag_data; struct fib6_rule { struct fib_rule common; struct rt6key src; struct rt6key dst; dscp_t dscp; }; struct xfrm6_tunnel_spi { struct hlist_node list_byaddr; struct hlist_node list_byspi; xfrm_address_t addr; u32 spi; refcount_t refcnt; struct callback_head callback_head; }; struct xfrm6_tunnel_net { struct hlist_head spi_byaddr[256]; struct hlist_head spi_byspi[256]; u32 spi; }; struct mip6_report_rate_limiter { spinlock_t lock; ktime_t stamp; int iif; struct in6_addr src; struct in6_addr dst; }; struct rt2_hdr { struct ipv6_rt_hdr rt_hdr; __u32 reserved; struct in6_addr addr; }; enum nf_ip_trace_comments___2 { NF_IP6_TRACE_COMMENT_RULE = 0, NF_IP6_TRACE_COMMENT_RETURN = 1, NF_IP6_TRACE_COMMENT_POLICY = 2, }; struct ip6t_error { struct ip6t_entry entry; struct xt_error_target target; }; struct ip6t_replace { char name[32]; unsigned int valid_hooks; unsigned int num_entries; unsigned int size; unsigned int hook_entry[5]; unsigned int underflow[5]; unsigned int num_counters; struct xt_counters __attribute__((btf_type_tag("user"))) *counters; struct ip6t_entry entries[0]; }; struct ip6t_standard { struct ip6t_entry entry; struct xt_standard_target target; }; struct ip6t_getinfo { char name[32]; unsigned int valid_hooks; unsigned int hook_entry[5]; unsigned int underflow[5]; unsigned int num_entries; unsigned int size; }; struct ip6t_get_entries { char name[32]; unsigned int size; struct ip6t_entry entrytable[0]; }; struct nft_ct_frag6_pernet { struct ctl_table_header *nf_frag_frags_hdr; struct fqdir *fqdir; }; enum { XT_RPFILTER_LOOSE = 1, XT_RPFILTER_VALID_MARK = 2, XT_RPFILTER_ACCEPT_LOCAL = 4, XT_RPFILTER_INVERT = 8, XT_RPFILTER_OPTION_MASK = 15, }; struct xt_rpfilter_info { __u8 flags; }; enum ip6t_reject_with { IP6T_ICMP6_NO_ROUTE = 0, IP6T_ICMP6_ADM_PROHIBITED = 1, IP6T_ICMP6_NOT_NEIGHBOUR = 2, IP6T_ICMP6_ADDR_UNREACH = 3, IP6T_ICMP6_PORT_UNREACH = 4, IP6T_ICMP6_ECHOREPLY = 5, IP6T_TCP_RESET = 6, IP6T_ICMP6_POLICY_FAIL = 7, IP6T_ICMP6_REJECT_ROUTE = 8, }; struct ip6t_reject_info { __u32 with; }; struct vti6_net { struct net_device *fb_tnl_dev; struct ip6_tnl __attribute__((btf_type_tag("rcu"))) *tnls_r_l[32]; struct ip6_tnl __attribute__((btf_type_tag("rcu"))) *tnls_wc[1]; struct ip6_tnl __attribute__((btf_type_tag("rcu"))) **tnls[2]; }; struct ip6_tnl_parm2 { char name[16]; int link; __u8 proto; __u8 encap_limit; __u8 hop_limit; __be32 flowinfo; __u32 flags; struct in6_addr laddr; struct in6_addr raddr; __be16 i_flags; __be16 o_flags; __be32 i_key; __be32 o_key; }; struct sit_net { struct ip_tunnel __attribute__((btf_type_tag("rcu"))) *tunnels_r_l[16]; struct ip_tunnel __attribute__((btf_type_tag("rcu"))) *tunnels_r[16]; struct ip_tunnel __attribute__((btf_type_tag("rcu"))) *tunnels_l[16]; struct ip_tunnel __attribute__((btf_type_tag("rcu"))) *tunnels_wc[1]; struct ip_tunnel __attribute__((btf_type_tag("rcu"))) **tunnels[4]; struct net_device *fb_tunnel_dev; }; struct ip_tunnel_prl { __be32 addr; __u16 flags; __u16 __reserved; __u32 datalen; __u32 __reserved2; }; struct ipv6_tlv_tnl_enc_lim { __u8 type; __u8 length; __u8 encap_limit; }; struct ipv6_tel_txoption { struct ipv6_txoptions ops; __u8 dst_opt[8]; }; struct ip6_tnl_net { struct net_device *fb_tnl_dev; struct ip6_tnl __attribute__((btf_type_tag("rcu"))) *tnls_r_l[32]; struct ip6_tnl __attribute__((btf_type_tag("rcu"))) *tnls_wc[1]; struct ip6_tnl __attribute__((btf_type_tag("rcu"))) **tnls[2]; struct ip6_tnl __attribute__((btf_type_tag("rcu"))) *collect_md_tun; }; struct ip6_tnl_parm { char name[16]; int link; __u8 proto; __u8 encap_limit; __u8 hop_limit; __be32 flowinfo; __u32 flags; struct in6_addr laddr; struct in6_addr raddr; }; struct ip6gre_net { struct ip6_tnl __attribute__((btf_type_tag("rcu"))) *tunnels[128]; struct ip6_tnl __attribute__((btf_type_tag("rcu"))) *collect_md_tun; struct ip6_tnl __attribute__((btf_type_tag("rcu"))) *collect_md_tun_erspan; struct net_device *fb_tunnel_dev; }; enum { IP6_FH_F_FRAG = 1, IP6_FH_F_AUTH = 2, IP6_FH_F_SKIP_RH = 4, }; enum tpacket_versions { TPACKET_V1 = 0, TPACKET_V2 = 1, TPACKET_V3 = 2, }; enum packet_sock_flags { PACKET_SOCK_ORIGDEV = 0, PACKET_SOCK_AUXDATA = 1, PACKET_SOCK_TX_HAS_OFF = 2, PACKET_SOCK_TP_LOSS = 3, PACKET_SOCK_RUNNING = 4, PACKET_SOCK_PRESSURE = 5, PACKET_SOCK_QDISC_BYPASS = 6, }; struct tpacket_stats { unsigned int tp_packets; unsigned int tp_drops; }; struct tpacket_stats_v3 { unsigned int tp_packets; unsigned int tp_drops; unsigned int tp_freeze_q_cnt; }; union tpacket_stats_u { struct tpacket_stats stats1; struct tpacket_stats_v3 stats3; }; struct pgv; struct tpacket_kbdq_core { struct pgv *pkbdq; unsigned int feature_req_word; unsigned int hdrlen; unsigned char reset_pending_on_curr_blk; unsigned char delete_blk_timer; unsigned short kactive_blk_num; unsigned short blk_sizeof_priv; unsigned short last_kactive_blk_num; char *pkblk_start; char *pkblk_end; int kblk_size; unsigned int max_frame_len; unsigned int knum_blocks; uint64_t knxt_seq_num; char *prev; char *nxt_offset; struct sk_buff *skb; rwlock_t blk_fill_in_prog_lock; unsigned short retire_blk_tov; unsigned short version; unsigned long tov_in_jiffies; struct timer_list retire_blk_timer; }; struct packet_ring_buffer { struct pgv *pg_vec; unsigned int head; unsigned int frames_per_block; unsigned int frame_size; unsigned int frame_max; unsigned int pg_vec_order; unsigned int pg_vec_pages; unsigned int pg_vec_len; unsigned int __attribute__((btf_type_tag("percpu"))) *pending_refcnt; union { unsigned long *rx_owner_map; struct tpacket_kbdq_core prb_bdqc; }; }; struct packet_fanout; struct packet_rollover; struct packet_mclist; struct packet_sock { struct sock sk; struct packet_fanout *fanout; union tpacket_stats_u stats; struct packet_ring_buffer rx_ring; struct packet_ring_buffer tx_ring; int copy_thresh; spinlock_t bind_lock; struct mutex pg_vec_lock; unsigned long flags; int ifindex; u8 vnet_hdr_sz; __be16 num; struct packet_rollover *rollover; struct packet_mclist *mclist; atomic_long_t mapped; enum tpacket_versions tp_version; unsigned int tp_hdrlen; unsigned int tp_reserve; unsigned int tp_tstamp; struct completion skb_completion; struct net_device __attribute__((btf_type_tag("rcu"))) *cached_dev; long: 64; struct packet_type prot_hook; long: 64; long: 64; long: 64; atomic_t tp_drops; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct packet_fanout { possible_net_t net; unsigned int num_members; u32 max_num_members; u16 id; u8 type; u8 flags; union { atomic_t rr_cur; struct bpf_prog __attribute__((btf_type_tag("rcu"))) *bpf_prog; }; struct list_head list; spinlock_t lock; refcount_t sk_ref; long: 64; struct packet_type prot_hook; struct sock __attribute__((btf_type_tag("rcu"))) *arr[0]; long: 64; long: 64; long: 64; }; struct pgv { char *buffer; }; struct packet_rollover { int sock; atomic_long_t num; atomic_long_t num_huge; atomic_long_t num_failed; long: 64; long: 64; long: 64; long: 64; u32 history[16]; }; struct packet_mclist { struct packet_mclist *next; int ifindex; int count; unsigned short type; unsigned short alen; unsigned char addr[32]; }; struct tpacket_bd_ts { unsigned int ts_sec; union { unsigned int ts_usec; unsigned int ts_nsec; }; }; struct tpacket_hdr_v1 { __u32 block_status; __u32 num_pkts; __u32 offset_to_first_pkt; __u32 blk_len; __u64 seq_num; struct tpacket_bd_ts ts_first_pkt; struct tpacket_bd_ts ts_last_pkt; }; union tpacket_bd_header_u { struct tpacket_hdr_v1 bh1; }; struct tpacket_block_desc { __u32 version; __u32 offset_to_priv; union tpacket_bd_header_u hdr; }; struct tpacket_hdr_variant1 { __u32 tp_rxhash; __u32 tp_vlan_tci; __u16 tp_vlan_tpid; __u16 tp_padding; }; struct tpacket3_hdr { __u32 tp_next_offset; __u32 tp_sec; __u32 tp_nsec; __u32 tp_snaplen; __u32 tp_len; __u32 tp_status; __u16 tp_mac; __u16 tp_net; union { struct tpacket_hdr_variant1 hv1; }; __u8 tp_padding[8]; }; struct sockaddr_ll { unsigned short sll_family; __be16 sll_protocol; int sll_ifindex; unsigned short sll_hatype; unsigned char sll_pkttype; unsigned char sll_halen; unsigned char sll_addr[8]; }; struct sockaddr_pkt { unsigned short spkt_family; unsigned char spkt_device[14]; __be16 spkt_protocol; }; struct packet_skb_cb { union { struct sockaddr_pkt pkt; union { unsigned int origlen; struct sockaddr_ll ll; }; } sa; }; struct tpacket_hdr; struct tpacket2_hdr; union tpacket_uhdr { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; struct tpacket3_hdr *h3; void *raw; }; struct tpacket_hdr { unsigned long tp_status; unsigned int tp_len; unsigned int tp_snaplen; unsigned short tp_mac; unsigned short tp_net; unsigned int tp_sec; unsigned int tp_usec; }; struct tpacket2_hdr { __u32 tp_status; __u32 tp_len; __u32 tp_snaplen; __u16 tp_mac; __u16 tp_net; __u32 tp_sec; __u32 tp_nsec; __u16 tp_vlan_tci; __u16 tp_vlan_tpid; __u8 tp_padding[4]; }; struct virtio_net_hdr_mrg_rxbuf { struct virtio_net_hdr hdr; __virtio16 num_buffers; }; struct tpacket_req { unsigned int tp_block_size; unsigned int tp_block_nr; unsigned int tp_frame_size; unsigned int tp_frame_nr; }; struct tpacket_req3 { unsigned int tp_block_size; unsigned int tp_block_nr; unsigned int tp_frame_size; unsigned int tp_frame_nr; unsigned int tp_retire_blk_tov; unsigned int tp_sizeof_priv; unsigned int tp_feature_req_word; }; union tpacket_req_u { struct tpacket_req req; struct tpacket_req3 req3; }; struct fanout_args { __u16 id; __u16 type_flags; __u32 max_num_members; }; struct packet_mreq_max { int mr_ifindex; unsigned short mr_type; unsigned short mr_alen; unsigned char mr_address[32]; }; struct tpacket_rollover_stats { __u64 tp_all; __u64 tp_huge; __u64 tp_failed; }; struct tpacket_auxdata { __u32 tp_status; __u32 tp_len; __u32 tp_snaplen; __u16 tp_mac; __u16 tp_net; __u16 tp_vlan_tci; __u16 tp_vlan_tpid; }; struct sadb_msg; typedef int (*pfkey_handler)(struct sock *, struct sk_buff *, const struct sadb_msg *, void * const *); struct sadb_msg { __u8 sadb_msg_version; __u8 sadb_msg_type; __u8 sadb_msg_errno; __u8 sadb_msg_satype; __u16 sadb_msg_len; __u16 sadb_msg_reserved; __u32 sadb_msg_seq; __u32 sadb_msg_pid; }; enum { IPSEC_MODE_ANY = 0, IPSEC_MODE_TRANSPORT = 1, IPSEC_MODE_TUNNEL = 2, IPSEC_MODE_BEET = 3, }; enum { IPSEC_POLICY_DISCARD = 0, IPSEC_POLICY_NONE = 1, IPSEC_POLICY_IPSEC = 2, IPSEC_POLICY_ENTRUST = 3, IPSEC_POLICY_BYPASS = 4, }; enum { IPSEC_DIR_ANY = 0, IPSEC_DIR_INBOUND = 1, IPSEC_DIR_OUTBOUND = 2, IPSEC_DIR_FWD = 3, IPSEC_DIR_MAX = 4, IPSEC_DIR_INVALID = 5, }; enum { IPSEC_LEVEL_DEFAULT = 0, IPSEC_LEVEL_USE = 1, IPSEC_LEVEL_REQUIRE = 2, IPSEC_LEVEL_UNIQUE = 3, }; struct pfkey_sock { struct sock sk; int registered; int promisc; struct { uint8_t msg_version; uint32_t msg_portid; int (*dump)(struct pfkey_sock *); void (*done)(struct pfkey_sock *); union { struct xfrm_policy_walk policy; struct xfrm_state_walk state; } u; struct sk_buff *skb; } dump; struct mutex dump_lock; }; struct sadb_x_policy { __u16 sadb_x_policy_len; __u16 sadb_x_policy_exttype; __u16 sadb_x_policy_type; __u8 sadb_x_policy_dir; __u8 sadb_x_policy_reserved; __u32 sadb_x_policy_id; __u32 sadb_x_policy_priority; }; struct sadb_x_sec_ctx { __u16 sadb_x_sec_len; __u16 sadb_x_sec_exttype; __u8 sadb_x_ctx_alg; __u8 sadb_x_ctx_doi; __u16 sadb_x_ctx_len; }; struct sadb_ext { __u16 sadb_ext_len; __u16 sadb_ext_type; }; struct sadb_key { __u16 sadb_key_len; __u16 sadb_key_exttype; __u16 sadb_key_bits; __u16 sadb_key_reserved; }; struct sadb_address { __u16 sadb_address_len; __u16 sadb_address_exttype; __u8 sadb_address_proto; __u8 sadb_address_prefixlen; __u16 sadb_address_reserved; }; struct sadb_prop { __u16 sadb_prop_len; __u16 sadb_prop_exttype; __u8 sadb_prop_replay; __u8 sadb_prop_reserved[3]; }; struct sadb_comb { __u8 sadb_comb_auth; __u8 sadb_comb_encrypt; __u16 sadb_comb_flags; __u16 sadb_comb_auth_minbits; __u16 sadb_comb_auth_maxbits; __u16 sadb_comb_encrypt_minbits; __u16 sadb_comb_encrypt_maxbits; __u32 sadb_comb_reserved; __u32 sadb_comb_soft_allocations; __u32 sadb_comb_hard_allocations; __u64 sadb_comb_soft_bytes; __u64 sadb_comb_hard_bytes; __u64 sadb_comb_soft_addtime; __u64 sadb_comb_hard_addtime; __u64 sadb_comb_soft_usetime; __u64 sadb_comb_hard_usetime; }; struct sadb_x_ipsecrequest { __u16 sadb_x_ipsecrequest_len; __u16 sadb_x_ipsecrequest_proto; __u8 sadb_x_ipsecrequest_mode; __u8 sadb_x_ipsecrequest_level; __u16 sadb_x_ipsecrequest_reserved1; __u32 sadb_x_ipsecrequest_reqid; __u32 sadb_x_ipsecrequest_reserved2; }; struct netns_pfkey { struct hlist_head table; atomic_t socks_nr; }; struct sadb_sa { __u16 sadb_sa_len; __u16 sadb_sa_exttype; __be32 sadb_sa_spi; __u8 sadb_sa_replay; __u8 sadb_sa_state; __u8 sadb_sa_auth; __u8 sadb_sa_encrypt; __u32 sadb_sa_flags; }; struct sadb_lifetime { __u16 sadb_lifetime_len; __u16 sadb_lifetime_exttype; __u32 sadb_lifetime_allocations; __u64 sadb_lifetime_bytes; __u64 sadb_lifetime_addtime; __u64 sadb_lifetime_usetime; }; struct sadb_x_sa2 { __u16 sadb_x_sa2_len; __u16 sadb_x_sa2_exttype; __u8 sadb_x_sa2_mode; __u8 sadb_x_sa2_reserved1; __u16 sadb_x_sa2_reserved2; __u32 sadb_x_sa2_sequence; __u32 sadb_x_sa2_reqid; }; struct sadb_x_nat_t_type { __u16 sadb_x_nat_t_type_len; __u16 sadb_x_nat_t_type_exttype; __u8 sadb_x_nat_t_type_type; __u8 sadb_x_nat_t_type_reserved[3]; }; struct sadb_x_nat_t_port { __u16 sadb_x_nat_t_port_len; __u16 sadb_x_nat_t_port_exttype; __be16 sadb_x_nat_t_port_port; __u16 sadb_x_nat_t_port_reserved; }; struct sadb_supported { __u16 sadb_supported_len; __u16 sadb_supported_exttype; __u32 sadb_supported_reserved; }; struct sadb_spirange { __u16 sadb_spirange_len; __u16 sadb_spirange_exttype; __u32 sadb_spirange_min; __u32 sadb_spirange_max; __u32 sadb_spirange_reserved; }; struct sadb_x_filter { __u16 sadb_x_filter_len; __u16 sadb_x_filter_exttype; __u32 sadb_x_filter_saddr[4]; __u32 sadb_x_filter_daddr[4]; __u16 sadb_x_filter_family; __u8 sadb_x_filter_splen; __u8 sadb_x_filter_dplen; }; enum br_boolopt_id { BR_BOOLOPT_NO_LL_LEARN = 0, BR_BOOLOPT_MCAST_VLAN_SNOOPING = 1, BR_BOOLOPT_MST_ENABLE = 2, BR_BOOLOPT_MAX = 3, }; enum net_bridge_opts { BROPT_VLAN_ENABLED = 0, BROPT_VLAN_STATS_ENABLED = 1, BROPT_NF_CALL_IPTABLES = 2, BROPT_NF_CALL_IP6TABLES = 3, BROPT_NF_CALL_ARPTABLES = 4, BROPT_GROUP_ADDR_SET = 5, BROPT_MULTICAST_ENABLED = 6, BROPT_MULTICAST_QUERY_USE_IFADDR = 7, BROPT_MULTICAST_STATS_ENABLED = 8, BROPT_HAS_IPV6_ADDR = 9, BROPT_NEIGH_SUPPRESS_ENABLED = 10, BROPT_MTU_SET_BY_USER = 11, BROPT_VLAN_STATS_PER_PORT = 12, BROPT_NO_LL_LEARN = 13, BROPT_VLAN_BRIDGE_BINDING = 14, BROPT_MCAST_VLAN_SNOOPING_ENABLED = 15, BROPT_MST_ENABLED = 16, }; enum switchdev_notifier_type { SWITCHDEV_FDB_ADD_TO_BRIDGE = 1, SWITCHDEV_FDB_DEL_TO_BRIDGE = 2, SWITCHDEV_FDB_ADD_TO_DEVICE = 3, SWITCHDEV_FDB_DEL_TO_DEVICE = 4, SWITCHDEV_FDB_OFFLOADED = 5, SWITCHDEV_FDB_FLUSH_TO_BRIDGE = 6, SWITCHDEV_PORT_OBJ_ADD = 7, SWITCHDEV_PORT_OBJ_DEL = 8, SWITCHDEV_PORT_ATTR_SET = 9, SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE = 10, SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE = 11, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE = 12, SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE = 13, SWITCHDEV_VXLAN_FDB_OFFLOADED = 14, SWITCHDEV_BRPORT_OFFLOADED = 15, SWITCHDEV_BRPORT_UNOFFLOADED = 16, SWITCHDEV_BRPORT_REPLAY = 17, }; struct br_boolopt_multi { __u32 optval; __u32 optmask; }; struct nf_br_ops { int (*br_dev_xmit_hook)(struct sk_buff *); }; enum br_pkt_type { BR_PKT_UNICAST = 0, BR_PKT_MULTICAST = 1, BR_PKT_BROADCAST = 2, }; struct net_bridge_mcast_gc { struct hlist_node gc_node; void (*destroy)(struct net_bridge_mcast_gc *); }; struct net_bridge_port_group; struct net_bridge_mdb_entry { struct rhash_head rhnode; struct net_bridge *br; struct net_bridge_port_group __attribute__((btf_type_tag("rcu"))) *ports; struct br_ip addr; bool host_joined; struct timer_list timer; struct hlist_node mdb_node; struct net_bridge_mcast_gc mcast_gc; struct callback_head rcu; }; struct net_bridge_port_group_sg_key { struct net_bridge_port *port; struct br_ip addr; }; struct net_bridge_port_group { struct net_bridge_port_group __attribute__((btf_type_tag("rcu"))) *next; struct net_bridge_port_group_sg_key key; unsigned char eth_addr[6]; unsigned char flags; unsigned char filter_mode; unsigned char grp_query_rexmit_cnt; unsigned char rt_protocol; struct hlist_head src_list; unsigned int src_ents; struct timer_list timer; struct timer_list rexmit_timer; struct hlist_node mglist; struct rb_root eht_set_tree; struct rb_root eht_host_tree; struct rhash_head rhnode; struct net_bridge_mcast_gc mcast_gc; struct callback_head rcu; }; enum { BR_FDB_LOCAL = 0, BR_FDB_STATIC = 1, BR_FDB_STICKY = 2, BR_FDB_ADDED_BY_USER = 3, BR_FDB_ADDED_BY_EXT_LEARN = 4, BR_FDB_OFFLOADED = 5, BR_FDB_NOTIFY = 6, BR_FDB_NOTIFY_INACTIVE = 7, BR_FDB_LOCKED = 8, }; enum { NFEA_UNSPEC = 0, NFEA_ACTIVITY_NOTIFY = 1, NFEA_DONT_REFRESH = 2, __NFEA_MAX = 3, }; enum { FDB_NOTIFY_BIT = 1, FDB_NOTIFY_INACTIVE_BIT = 2, }; struct net_bridge_vlan_group { struct rhashtable vlan_hash; struct rhashtable tunnel_hash; struct list_head vlan_list; u16 num_vlans; u16 pvid; u8 pvid_state; }; struct net_bridge_fdb_flush_desc { unsigned long flags; unsigned long flags_mask; int port_ifindex; u16 vlan_id; }; struct __fdb_entry { __u8 mac_addr[6]; __u8 port_no; __u8 is_local; __u32 ageing_timer_value; __u8 port_hi; __u8 pad0; __u16 unused; }; enum { BR_MCAST_DIR_RX = 0, BR_MCAST_DIR_TX = 1, BR_MCAST_DIR_SIZE = 2, }; enum { MDB_RTR_TYPE_DISABLED = 0, MDB_RTR_TYPE_TEMP_QUERY = 1, MDB_RTR_TYPE_PERM = 2, MDB_RTR_TYPE_TEMP = 3, }; struct br_frame_type { __be16 type; int (*frame_handler)(struct net_bridge_port *, struct sk_buff *); struct hlist_node list; }; struct __port_info { __u64 designated_root; __u64 designated_bridge; __u16 port_id; __u16 designated_port; __u32 path_cost; __u32 designated_cost; __u8 state; __u8 top_change_ack; __u8 config_pending; __u8 unused0; __u32 message_age_timer_value; __u32 forward_delay_timer_value; __u32 hold_timer_value; }; struct __bridge_info { __u64 designated_root; __u64 bridge_id; __u32 root_path_cost; __u32 max_age; __u32 hello_time; __u32 forward_delay; __u32 bridge_max_age; __u32 bridge_hello_time; __u32 bridge_forward_delay; __u8 topology_change; __u8 topology_change_detected; __u8 root_port; __u8 stp_enabled; __u32 ageing_time; __u32 gc_interval; __u32 hello_timer_value; __u32 tcn_timer_value; __u32 topology_change_timer_value; __u32 gc_timer_value; }; enum switchdev_attr_id { SWITCHDEV_ATTR_ID_UNDEFINED = 0, SWITCHDEV_ATTR_ID_PORT_STP_STATE = 1, SWITCHDEV_ATTR_ID_PORT_MST_STATE = 2, SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS = 3, SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS = 4, SWITCHDEV_ATTR_ID_PORT_MROUTER = 5, SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME = 6, SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING = 7, SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL = 8, SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED = 9, SWITCHDEV_ATTR_ID_BRIDGE_MROUTER = 10, SWITCHDEV_ATTR_ID_BRIDGE_MST = 11, SWITCHDEV_ATTR_ID_MRP_PORT_ROLE = 12, SWITCHDEV_ATTR_ID_VLAN_MSTI = 13, }; struct switchdev_mst_state { u16 msti; u8 state; }; struct switchdev_brport_flags { unsigned long val; unsigned long mask; }; struct switchdev_vlan_msti { u16 vid; u16 msti; }; struct switchdev_attr { struct net_device *orig_dev; enum switchdev_attr_id id; u32 flags; void *complete_priv; void (*complete)(struct net_device *, int, void *); union { u8 stp_state; struct switchdev_mst_state mst_state; struct switchdev_brport_flags brport_flags; bool mrouter; clock_t ageing_time; bool vlan_filtering; u16 vlan_protocol; bool mst; bool mc_disabled; u8 mrp_port_role; struct switchdev_vlan_msti vlan_msti; } u; }; struct br_config_bpdu { unsigned int topology_change: 1; unsigned int topology_change_ack: 1; bridge_id root; int root_path_cost; bridge_id bridge_id; port_id port_id; int message_age; int max_age; int hello_time; int forward_delay; }; enum { BR_GROUPFWD_STP = 1, BR_GROUPFWD_MACPAUSE = 2, BR_GROUPFWD_LACP = 4, }; enum { IFLA_BR_UNSPEC = 0, IFLA_BR_FORWARD_DELAY = 1, IFLA_BR_HELLO_TIME = 2, IFLA_BR_MAX_AGE = 3, IFLA_BR_AGEING_TIME = 4, IFLA_BR_STP_STATE = 5, IFLA_BR_PRIORITY = 6, IFLA_BR_VLAN_FILTERING = 7, IFLA_BR_VLAN_PROTOCOL = 8, IFLA_BR_GROUP_FWD_MASK = 9, IFLA_BR_ROOT_ID = 10, IFLA_BR_BRIDGE_ID = 11, IFLA_BR_ROOT_PORT = 12, IFLA_BR_ROOT_PATH_COST = 13, IFLA_BR_TOPOLOGY_CHANGE = 14, IFLA_BR_TOPOLOGY_CHANGE_DETECTED = 15, IFLA_BR_HELLO_TIMER = 16, IFLA_BR_TCN_TIMER = 17, IFLA_BR_TOPOLOGY_CHANGE_TIMER = 18, IFLA_BR_GC_TIMER = 19, IFLA_BR_GROUP_ADDR = 20, IFLA_BR_FDB_FLUSH = 21, IFLA_BR_MCAST_ROUTER = 22, IFLA_BR_MCAST_SNOOPING = 23, IFLA_BR_MCAST_QUERY_USE_IFADDR = 24, IFLA_BR_MCAST_QUERIER = 25, IFLA_BR_MCAST_HASH_ELASTICITY = 26, IFLA_BR_MCAST_HASH_MAX = 27, IFLA_BR_MCAST_LAST_MEMBER_CNT = 28, IFLA_BR_MCAST_STARTUP_QUERY_CNT = 29, IFLA_BR_MCAST_LAST_MEMBER_INTVL = 30, IFLA_BR_MCAST_MEMBERSHIP_INTVL = 31, IFLA_BR_MCAST_QUERIER_INTVL = 32, IFLA_BR_MCAST_QUERY_INTVL = 33, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL = 34, IFLA_BR_MCAST_STARTUP_QUERY_INTVL = 35, IFLA_BR_NF_CALL_IPTABLES = 36, IFLA_BR_NF_CALL_IP6TABLES = 37, IFLA_BR_NF_CALL_ARPTABLES = 38, IFLA_BR_VLAN_DEFAULT_PVID = 39, IFLA_BR_PAD = 40, IFLA_BR_VLAN_STATS_ENABLED = 41, IFLA_BR_MCAST_STATS_ENABLED = 42, IFLA_BR_MCAST_IGMP_VERSION = 43, IFLA_BR_MCAST_MLD_VERSION = 44, IFLA_BR_VLAN_STATS_PER_PORT = 45, IFLA_BR_MULTI_BOOLOPT = 46, IFLA_BR_MCAST_QUERIER_STATE = 47, __IFLA_BR_MAX = 48, }; enum { LINK_XSTATS_TYPE_UNSPEC = 0, LINK_XSTATS_TYPE_BRIDGE = 1, LINK_XSTATS_TYPE_BOND = 2, __LINK_XSTATS_TYPE_MAX = 3, }; enum { BRIDGE_XSTATS_UNSPEC = 0, BRIDGE_XSTATS_VLAN = 1, BRIDGE_XSTATS_MCAST = 2, BRIDGE_XSTATS_PAD = 3, BRIDGE_XSTATS_STP = 4, __BRIDGE_XSTATS_MAX = 5, }; struct bridge_vlan_info { __u16 flags; __u16 vid; }; struct vtunnel_info { u32 tunid; u16 vid; u16 flags; }; enum { IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC = 0, IFLA_BRIDGE_VLAN_TUNNEL_ID = 1, IFLA_BRIDGE_VLAN_TUNNEL_VID = 2, IFLA_BRIDGE_VLAN_TUNNEL_FLAGS = 3, __IFLA_BRIDGE_VLAN_TUNNEL_MAX = 4, }; enum { BR_VLFLAG_PER_PORT_STATS = 1, BR_VLFLAG_ADDED_BY_SWITCHDEV = 2, BR_VLFLAG_MCAST_ENABLED = 4, BR_VLFLAG_GLOBAL_MCAST_ENABLED = 8, BR_VLFLAG_NEIGH_SUPPRESS_ENABLED = 16, }; struct brport_attribute { struct attribute attr; ssize_t (*show)(struct net_bridge_port *, char *); int (*store)(struct net_bridge_port *, unsigned long); int (*store_raw)(struct net_bridge_port *, char *); }; enum { BRIDGE_QUERIER_UNSPEC = 0, BRIDGE_QUERIER_IP_ADDRESS = 1, BRIDGE_QUERIER_IP_PORT = 2, BRIDGE_QUERIER_IP_OTHER_TIMER = 3, BRIDGE_QUERIER_PAD = 4, BRIDGE_QUERIER_IPV6_ADDRESS = 5, BRIDGE_QUERIER_IPV6_PORT = 6, BRIDGE_QUERIER_IPV6_OTHER_TIMER = 7, __BRIDGE_QUERIER_MAX = 8, }; enum { PIM_TYPE_HELLO = 0, PIM_TYPE_REGISTER = 1, PIM_TYPE_REGISTER_STOP = 2, PIM_TYPE_JOIN_PRUNE = 3, PIM_TYPE_BOOTSTRAP = 4, PIM_TYPE_ASSERT = 5, PIM_TYPE_GRAFT = 6, PIM_TYPE_GRAFT_ACK = 7, PIM_TYPE_CANDIDATE_RP_ADV = 8, }; struct net_bridge_group_src { struct hlist_node node; struct br_ip addr; struct net_bridge_port_group *pg; u8 flags; u8 src_query_rexmit_cnt; struct timer_list timer; struct net_bridge *br; struct net_bridge_mcast_gc mcast_gc; struct callback_head rcu; }; struct pimhdr { __u8 type; __u8 reserved; __be16 csum; }; struct br_ip_list { struct list_head list; struct br_ip addr; }; enum { MDBA_UNSPEC = 0, MDBA_MDB = 1, MDBA_ROUTER = 2, __MDBA_MAX = 3, }; enum { MDBA_ROUTER_UNSPEC = 0, MDBA_ROUTER_PORT = 1, __MDBA_ROUTER_MAX = 2, }; enum { MDBA_ROUTER_PATTR_UNSPEC = 0, MDBA_ROUTER_PATTR_TIMER = 1, MDBA_ROUTER_PATTR_TYPE = 2, MDBA_ROUTER_PATTR_INET_TIMER = 3, MDBA_ROUTER_PATTR_INET6_TIMER = 4, MDBA_ROUTER_PATTR_VID = 5, __MDBA_ROUTER_PATTR_MAX = 6, }; enum { MDBA_MDB_UNSPEC = 0, MDBA_MDB_ENTRY = 1, __MDBA_MDB_MAX = 2, }; enum { MDBA_MDB_ENTRY_UNSPEC = 0, MDBA_MDB_ENTRY_INFO = 1, __MDBA_MDB_ENTRY_MAX = 2, }; enum { MDBA_MDB_EATTR_UNSPEC = 0, MDBA_MDB_EATTR_TIMER = 1, MDBA_MDB_EATTR_SRC_LIST = 2, MDBA_MDB_EATTR_GROUP_MODE = 3, MDBA_MDB_EATTR_SOURCE = 4, MDBA_MDB_EATTR_RTPROT = 5, MDBA_MDB_EATTR_DST = 6, MDBA_MDB_EATTR_DST_PORT = 7, MDBA_MDB_EATTR_VNI = 8, MDBA_MDB_EATTR_IFINDEX = 9, MDBA_MDB_EATTR_SRC_VNI = 10, __MDBA_MDB_EATTR_MAX = 11, }; enum { MDBA_MDB_SRCLIST_UNSPEC = 0, MDBA_MDB_SRCLIST_ENTRY = 1, __MDBA_MDB_SRCLIST_MAX = 2, }; enum { MDBA_MDB_SRCATTR_UNSPEC = 0, MDBA_MDB_SRCATTR_ADDRESS = 1, MDBA_MDB_SRCATTR_TIMER = 2, __MDBA_MDB_SRCATTR_MAX = 3, }; enum { MDBE_ATTR_UNSPEC = 0, MDBE_ATTR_SOURCE = 1, MDBE_ATTR_SRC_LIST = 2, MDBE_ATTR_GROUP_MODE = 3, MDBE_ATTR_RTPROT = 4, MDBE_ATTR_DST = 5, MDBE_ATTR_DST_PORT = 6, MDBE_ATTR_VNI = 7, MDBE_ATTR_IFINDEX = 8, MDBE_ATTR_SRC_VNI = 9, __MDBE_ATTR_MAX = 10, }; enum { MDBE_SRCATTR_UNSPEC = 0, MDBE_SRCATTR_ADDRESS = 1, __MDBE_SRCATTR_MAX = 2, }; struct br_mdb_src_entry; struct br_mdb_config { struct net_bridge *br; struct net_bridge_port *p; struct br_mdb_entry *entry; struct br_ip group; bool src_entry; u8 filter_mode; u16 nlflags; struct br_mdb_src_entry *src_entries; int num_src_entries; u8 rt_protocol; }; struct br_mdb_src_entry { struct br_ip addr; }; union net_bridge_eht_addr { __be32 ip4; struct in6_addr ip6; }; struct net_bridge_group_eht_set { struct rb_node rb_node; union net_bridge_eht_addr src_addr; struct rb_root entry_tree; struct timer_list timer; struct net_bridge_port_group *pg; struct net_bridge *br; struct net_bridge_mcast_gc mcast_gc; }; struct net_bridge_group_eht_host; struct net_bridge_group_eht_set_entry { struct rb_node rb_node; struct hlist_node host_list; union net_bridge_eht_addr h_addr; struct timer_list timer; struct net_bridge *br; struct net_bridge_group_eht_set *eht_set; struct net_bridge_group_eht_host *h_parent; struct net_bridge_mcast_gc mcast_gc; }; struct net_bridge_group_eht_host { struct rb_node rb_node; union net_bridge_eht_addr h_addr; struct hlist_head set_entries; unsigned int num_entries; unsigned char filter_mode; struct net_bridge_port_group *pg; }; struct devlink_dev_stats { u32 reload_stats[6]; u32 remote_reload_stats[6]; }; struct devlink_dpipe_headers; struct devlink_ops; struct devlink { u32 index; struct xarray ports; struct list_head rate_list; struct list_head sb_list; struct list_head dpipe_table_list; struct list_head resource_list; struct xarray params; struct list_head region_list; struct list_head reporter_list; struct devlink_dpipe_headers *dpipe_headers; struct list_head trap_list; struct list_head trap_group_list; struct list_head trap_policer_list; struct list_head linecard_list; const struct devlink_ops *ops; struct xarray snapshot_ids; struct devlink_dev_stats stats; struct device *dev; possible_net_t _net; struct mutex lock; struct lock_class_key lock_key; u8 reload_failed: 1; refcount_t refcount; struct rcu_work rwork; long: 64; long: 64; long: 64; char priv[0]; }; struct devlink_dpipe_header; struct devlink_dpipe_headers { struct devlink_dpipe_header **headers; unsigned int headers_count; }; struct devlink_dpipe_field; struct devlink_dpipe_header { const char *name; unsigned int id; struct devlink_dpipe_field *fields; unsigned int fields_count; bool global; }; enum devlink_dpipe_field_mapping_type { DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0, DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 1, }; struct devlink_dpipe_field { const char *name; unsigned int id; unsigned int bitwidth; enum devlink_dpipe_field_mapping_type mapping_type; }; enum devlink_reload_action { DEVLINK_RELOAD_ACTION_UNSPEC = 0, DEVLINK_RELOAD_ACTION_DRIVER_REINIT = 1, DEVLINK_RELOAD_ACTION_FW_ACTIVATE = 2, __DEVLINK_RELOAD_ACTION_MAX = 3, DEVLINK_RELOAD_ACTION_MAX = 2, }; enum devlink_reload_limit { DEVLINK_RELOAD_LIMIT_UNSPEC = 0, DEVLINK_RELOAD_LIMIT_NO_RESET = 1, __DEVLINK_RELOAD_LIMIT_MAX = 2, DEVLINK_RELOAD_LIMIT_MAX = 1, }; enum devlink_sb_threshold_type { DEVLINK_SB_THRESHOLD_TYPE_STATIC = 0, DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC = 1, }; enum devlink_sb_pool_type { DEVLINK_SB_POOL_TYPE_INGRESS = 0, DEVLINK_SB_POOL_TYPE_EGRESS = 1, }; enum devlink_eswitch_encap_mode { DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0, DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1, }; enum devlink_trap_action { DEVLINK_TRAP_ACTION_DROP = 0, DEVLINK_TRAP_ACTION_TRAP = 1, DEVLINK_TRAP_ACTION_MIRROR = 2, }; enum devlink_selftest_status { DEVLINK_SELFTEST_STATUS_SKIP = 0, DEVLINK_SELFTEST_STATUS_PASS = 1, DEVLINK_SELFTEST_STATUS_FAIL = 2, }; struct devlink_sb_pool_info; struct devlink_info_req; struct devlink_flash_update_params; struct devlink_trap; struct devlink_trap_group; struct devlink_trap_policer; struct devlink_port_new_attrs; struct devlink_ops { u32 supported_flash_update_params; unsigned long reload_actions; unsigned long reload_limits; int (*reload_down)(struct devlink *, bool, enum devlink_reload_action, enum devlink_reload_limit, struct netlink_ext_ack *); int (*reload_up)(struct devlink *, enum devlink_reload_action, enum devlink_reload_limit, u32 *, struct netlink_ext_ack *); int (*sb_pool_get)(struct devlink *, unsigned int, u16, struct devlink_sb_pool_info *); int (*sb_pool_set)(struct devlink *, unsigned int, u16, u32, enum devlink_sb_threshold_type, struct netlink_ext_ack *); int (*sb_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *); int (*sb_port_pool_set)(struct devlink_port *, unsigned int, u16, u32, struct netlink_ext_ack *); int (*sb_tc_pool_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16 *, u32 *); int (*sb_tc_pool_bind_set)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16, u32, struct netlink_ext_ack *); int (*sb_occ_snapshot)(struct devlink *, unsigned int); int (*sb_occ_max_clear)(struct devlink *, unsigned int); int (*sb_occ_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *, u32 *); int (*sb_occ_tc_port_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u32 *, u32 *); int (*eswitch_mode_get)(struct devlink *, u16 *); int (*eswitch_mode_set)(struct devlink *, u16, struct netlink_ext_ack *); int (*eswitch_inline_mode_get)(struct devlink *, u8 *); int (*eswitch_inline_mode_set)(struct devlink *, u8, struct netlink_ext_ack *); int (*eswitch_encap_mode_get)(struct devlink *, enum devlink_eswitch_encap_mode *); int (*eswitch_encap_mode_set)(struct devlink *, enum devlink_eswitch_encap_mode, struct netlink_ext_ack *); int (*info_get)(struct devlink *, struct devlink_info_req *, struct netlink_ext_ack *); int (*flash_update)(struct devlink *, struct devlink_flash_update_params *, struct netlink_ext_ack *); int (*trap_init)(struct devlink *, const struct devlink_trap *, void *); void (*trap_fini)(struct devlink *, const struct devlink_trap *, void *); int (*trap_action_set)(struct devlink *, const struct devlink_trap *, enum devlink_trap_action, struct netlink_ext_ack *); int (*trap_group_init)(struct devlink *, const struct devlink_trap_group *); int (*trap_group_set)(struct devlink *, const struct devlink_trap_group *, const struct devlink_trap_policer *, struct netlink_ext_ack *); int (*trap_group_action_set)(struct devlink *, const struct devlink_trap_group *, enum devlink_trap_action, struct netlink_ext_ack *); int (*trap_drop_counter_get)(struct devlink *, const struct devlink_trap *, u64 *); int (*trap_policer_init)(struct devlink *, const struct devlink_trap_policer *); void (*trap_policer_fini)(struct devlink *, const struct devlink_trap_policer *); int (*trap_policer_set)(struct devlink *, const struct devlink_trap_policer *, u64, u64, struct netlink_ext_ack *); int (*trap_policer_counter_get)(struct devlink *, const struct devlink_trap_policer *, u64 *); int (*port_new)(struct devlink *, const struct devlink_port_new_attrs *, struct netlink_ext_ack *, struct devlink_port **); int (*rate_leaf_tx_share_set)(struct devlink_rate *, void *, u64, struct netlink_ext_ack *); int (*rate_leaf_tx_max_set)(struct devlink_rate *, void *, u64, struct netlink_ext_ack *); int (*rate_leaf_tx_priority_set)(struct devlink_rate *, void *, u32, struct netlink_ext_ack *); int (*rate_leaf_tx_weight_set)(struct devlink_rate *, void *, u32, struct netlink_ext_ack *); int (*rate_node_tx_share_set)(struct devlink_rate *, void *, u64, struct netlink_ext_ack *); int (*rate_node_tx_max_set)(struct devlink_rate *, void *, u64, struct netlink_ext_ack *); int (*rate_node_tx_priority_set)(struct devlink_rate *, void *, u32, struct netlink_ext_ack *); int (*rate_node_tx_weight_set)(struct devlink_rate *, void *, u32, struct netlink_ext_ack *); int (*rate_node_new)(struct devlink_rate *, void **, struct netlink_ext_ack *); int (*rate_node_del)(struct devlink_rate *, void *, struct netlink_ext_ack *); int (*rate_leaf_parent_set)(struct devlink_rate *, struct devlink_rate *, void *, void *, struct netlink_ext_ack *); int (*rate_node_parent_set)(struct devlink_rate *, struct devlink_rate *, void *, void *, struct netlink_ext_ack *); bool (*selftest_check)(struct devlink *, unsigned int, struct netlink_ext_ack *); enum devlink_selftest_status (*selftest_run)(struct devlink *, unsigned int, struct netlink_ext_ack *); }; struct devlink_sb_pool_info { enum devlink_sb_pool_type pool_type; u32 size; enum devlink_sb_threshold_type threshold_type; u32 cell_size; }; struct devlink_flash_update_params { const struct firmware *fw; const char *component; u32 overwrite_mask; }; enum devlink_trap_type { DEVLINK_TRAP_TYPE_DROP = 0, DEVLINK_TRAP_TYPE_EXCEPTION = 1, DEVLINK_TRAP_TYPE_CONTROL = 2, }; struct devlink_trap { enum devlink_trap_type type; enum devlink_trap_action init_action; bool generic; u16 id; const char *name; u16 init_group_id; u32 metadata_cap; }; struct devlink_trap_group { const char *name; u16 id; bool generic; u32 init_policer_id; }; struct devlink_trap_policer { u32 id; u64 init_rate; u64 init_burst; u64 max_rate; u64 min_rate; u64 max_burst; u64 min_burst; }; struct devlink_port_new_attrs { enum devlink_port_flavour flavour; unsigned int port_index; u32 controller; u32 sfnum; u16 pfnum; u8 port_index_valid: 1; u8 controller_valid: 1; u8 sfnum_valid: 1; }; struct ib_core_device { struct device dev; possible_net_t rdma_net; struct kobject *ports_kobj; struct list_head port_list; struct ib_device *owner; }; enum rdma_driver_id { RDMA_DRIVER_UNKNOWN = 0, RDMA_DRIVER_MLX5 = 1, RDMA_DRIVER_MLX4 = 2, RDMA_DRIVER_CXGB3 = 3, RDMA_DRIVER_CXGB4 = 4, RDMA_DRIVER_MTHCA = 5, RDMA_DRIVER_BNXT_RE = 6, RDMA_DRIVER_OCRDMA = 7, RDMA_DRIVER_NES = 8, RDMA_DRIVER_I40IW = 9, RDMA_DRIVER_IRDMA = 9, RDMA_DRIVER_VMW_PVRDMA = 10, RDMA_DRIVER_QEDR = 11, RDMA_DRIVER_HNS = 12, RDMA_DRIVER_USNIC = 13, RDMA_DRIVER_RXE = 14, RDMA_DRIVER_HFI1 = 15, RDMA_DRIVER_QIB = 16, RDMA_DRIVER_EFA = 17, RDMA_DRIVER_SIW = 18, RDMA_DRIVER_ERDMA = 19, RDMA_DRIVER_MANA = 20, }; enum ib_cq_notify_flags { IB_CQ_SOLICITED = 1, IB_CQ_NEXT_COMP = 2, IB_CQ_SOLICITED_MASK = 3, IB_CQ_REPORT_MISSED_EVENTS = 4, }; struct ib_mad; enum rdma_link_layer { IB_LINK_LAYER_UNSPECIFIED = 0, IB_LINK_LAYER_INFINIBAND = 1, IB_LINK_LAYER_ETHERNET = 2, }; enum rdma_netdev_t { RDMA_NETDEV_OPA_VNIC = 0, RDMA_NETDEV_IPOIB = 1, }; enum ib_srq_attr_mask { IB_SRQ_MAX_WR = 1, IB_SRQ_LIMIT = 2, }; enum ib_mr_type { IB_MR_TYPE_MEM_REG = 0, IB_MR_TYPE_SG_GAPS = 1, IB_MR_TYPE_DM = 2, IB_MR_TYPE_USER = 3, IB_MR_TYPE_DMA = 4, IB_MR_TYPE_INTEGRITY = 5, }; enum ib_uverbs_advise_mr_advice { IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH = 0, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE = 1, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT = 2, }; struct uverbs_attr_bundle; struct rdma_cm_id; struct iw_cm_id; struct iw_cm_conn_param; struct ib_qp; struct ib_send_wr; struct ib_recv_wr; struct ib_cq; struct ib_wc; struct ib_srq; struct ib_grh; struct ib_device_attr; struct ib_udata; struct ib_device_modify; struct ib_port_attr; struct ib_port_modify; struct ib_port_immutable; struct rdma_netdev_alloc_params; union ib_gid; struct ib_gid_attr; struct ib_ucontext; struct rdma_user_mmap_entry; struct ib_pd; struct ib_ah; struct rdma_ah_init_attr; struct rdma_ah_attr; struct ib_srq_init_attr; struct ib_srq_attr; struct ib_qp_init_attr; struct ib_qp_attr; struct ib_cq_init_attr; struct ib_mr; struct ib_sge; struct ib_mr_status; struct ib_mw; struct ib_xrcd; struct ib_flow; struct ib_flow_attr; struct ib_flow_action; struct ib_wq; struct ib_wq_init_attr; struct ib_wq_attr; struct ib_rwq_ind_table; struct ib_rwq_ind_table_init_attr; struct ib_dm; struct ib_dm_alloc_attr; struct ib_dm_mr_attr; struct ib_counters; struct ib_counters_read_attr; struct rdma_hw_stats; struct rdma_counter; struct ib_device_ops { struct module *owner; enum rdma_driver_id driver_id; u32 uverbs_abi_ver; unsigned int uverbs_no_driver_id_binding: 1; const struct attribute_group *device_group; const struct attribute_group **port_groups; int (*post_send)(struct ib_qp *, const struct ib_send_wr *, const struct ib_send_wr **); int (*post_recv)(struct ib_qp *, const struct ib_recv_wr *, const struct ib_recv_wr **); void (*drain_rq)(struct ib_qp *); void (*drain_sq)(struct ib_qp *); int (*poll_cq)(struct ib_cq *, int, struct ib_wc *); int (*peek_cq)(struct ib_cq *, int); int (*req_notify_cq)(struct ib_cq *, enum ib_cq_notify_flags); int (*post_srq_recv)(struct ib_srq *, const struct ib_recv_wr *, const struct ib_recv_wr **); int (*process_mad)(struct ib_device *, int, u32, const struct ib_wc *, const struct ib_grh *, const struct ib_mad *, struct ib_mad *, size_t *, u16 *); int (*query_device)(struct ib_device *, struct ib_device_attr *, struct ib_udata *); int (*modify_device)(struct ib_device *, int, struct ib_device_modify *); void (*get_dev_fw_str)(struct ib_device *, char *); const struct cpumask * (*get_vector_affinity)(struct ib_device *, int); int (*query_port)(struct ib_device *, u32, struct ib_port_attr *); int (*modify_port)(struct ib_device *, u32, int, struct ib_port_modify *); int (*get_port_immutable)(struct ib_device *, u32, struct ib_port_immutable *); enum rdma_link_layer (*get_link_layer)(struct ib_device *, u32); struct net_device * (*get_netdev)(struct ib_device *, u32); struct net_device * (*alloc_rdma_netdev)(struct ib_device *, u32, enum rdma_netdev_t, const char *, unsigned char, void (*)(struct net_device *)); int (*rdma_netdev_get_params)(struct ib_device *, u32, enum rdma_netdev_t, struct rdma_netdev_alloc_params *); int (*query_gid)(struct ib_device *, u32, int, union ib_gid *); int (*add_gid)(const struct ib_gid_attr *, void **); int (*del_gid)(const struct ib_gid_attr *, void **); int (*query_pkey)(struct ib_device *, u32, u16, u16 *); int (*alloc_ucontext)(struct ib_ucontext *, struct ib_udata *); void (*dealloc_ucontext)(struct ib_ucontext *); int (*mmap)(struct ib_ucontext *, struct vm_area_struct *); void (*mmap_free)(struct rdma_user_mmap_entry *); void (*disassociate_ucontext)(struct ib_ucontext *); int (*alloc_pd)(struct ib_pd *, struct ib_udata *); int (*dealloc_pd)(struct ib_pd *, struct ib_udata *); int (*create_ah)(struct ib_ah *, struct rdma_ah_init_attr *, struct ib_udata *); int (*create_user_ah)(struct ib_ah *, struct rdma_ah_init_attr *, struct ib_udata *); int (*modify_ah)(struct ib_ah *, struct rdma_ah_attr *); int (*query_ah)(struct ib_ah *, struct rdma_ah_attr *); int (*destroy_ah)(struct ib_ah *, u32); int (*create_srq)(struct ib_srq *, struct ib_srq_init_attr *, struct ib_udata *); int (*modify_srq)(struct ib_srq *, struct ib_srq_attr *, enum ib_srq_attr_mask, struct ib_udata *); int (*query_srq)(struct ib_srq *, struct ib_srq_attr *); int (*destroy_srq)(struct ib_srq *, struct ib_udata *); int (*create_qp)(struct ib_qp *, struct ib_qp_init_attr *, struct ib_udata *); int (*modify_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); int (*query_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_qp_init_attr *); int (*destroy_qp)(struct ib_qp *, struct ib_udata *); int (*create_cq)(struct ib_cq *, const struct ib_cq_init_attr *, struct ib_udata *); int (*modify_cq)(struct ib_cq *, u16, u16); int (*destroy_cq)(struct ib_cq *, struct ib_udata *); int (*resize_cq)(struct ib_cq *, int, struct ib_udata *); struct ib_mr * (*get_dma_mr)(struct ib_pd *, int); struct ib_mr * (*reg_user_mr)(struct ib_pd *, u64, u64, u64, int, struct ib_udata *); struct ib_mr * (*reg_user_mr_dmabuf)(struct ib_pd *, u64, u64, u64, int, int, struct ib_udata *); struct ib_mr * (*rereg_user_mr)(struct ib_mr *, int, u64, u64, u64, int, struct ib_pd *, struct ib_udata *); int (*dereg_mr)(struct ib_mr *, struct ib_udata *); struct ib_mr * (*alloc_mr)(struct ib_pd *, enum ib_mr_type, u32); struct ib_mr * (*alloc_mr_integrity)(struct ib_pd *, u32, u32); int (*advise_mr)(struct ib_pd *, enum ib_uverbs_advise_mr_advice, u32, struct ib_sge *, u32, struct uverbs_attr_bundle *); int (*map_mr_sg)(struct ib_mr *, struct scatterlist *, int, unsigned int *); int (*check_mr_status)(struct ib_mr *, u32, struct ib_mr_status *); int (*alloc_mw)(struct ib_mw *, struct ib_udata *); int (*dealloc_mw)(struct ib_mw *); int (*attach_mcast)(struct ib_qp *, union ib_gid *, u16); int (*detach_mcast)(struct ib_qp *, union ib_gid *, u16); int (*alloc_xrcd)(struct ib_xrcd *, struct ib_udata *); int (*dealloc_xrcd)(struct ib_xrcd *, struct ib_udata *); struct ib_flow * (*create_flow)(struct ib_qp *, struct ib_flow_attr *, struct ib_udata *); int (*destroy_flow)(struct ib_flow *); int (*destroy_flow_action)(struct ib_flow_action *); int (*set_vf_link_state)(struct ib_device *, int, u32, int); int (*get_vf_config)(struct ib_device *, int, u32, struct ifla_vf_info *); int (*get_vf_stats)(struct ib_device *, int, u32, struct ifla_vf_stats *); int (*get_vf_guid)(struct ib_device *, int, u32, struct ifla_vf_guid *, struct ifla_vf_guid *); int (*set_vf_guid)(struct ib_device *, int, u32, u64, int); struct ib_wq * (*create_wq)(struct ib_pd *, struct ib_wq_init_attr *, struct ib_udata *); int (*destroy_wq)(struct ib_wq *, struct ib_udata *); int (*modify_wq)(struct ib_wq *, struct ib_wq_attr *, u32, struct ib_udata *); int (*create_rwq_ind_table)(struct ib_rwq_ind_table *, struct ib_rwq_ind_table_init_attr *, struct ib_udata *); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *); struct ib_dm * (*alloc_dm)(struct ib_device *, struct ib_ucontext *, struct ib_dm_alloc_attr *, struct uverbs_attr_bundle *); int (*dealloc_dm)(struct ib_dm *, struct uverbs_attr_bundle *); struct ib_mr * (*reg_dm_mr)(struct ib_pd *, struct ib_dm *, struct ib_dm_mr_attr *, struct uverbs_attr_bundle *); int (*create_counters)(struct ib_counters *, struct uverbs_attr_bundle *); int (*destroy_counters)(struct ib_counters *); int (*read_counters)(struct ib_counters *, struct ib_counters_read_attr *, struct uverbs_attr_bundle *); int (*map_mr_sg_pi)(struct ib_mr *, struct scatterlist *, int, unsigned int *, struct scatterlist *, int, unsigned int *); struct rdma_hw_stats * (*alloc_hw_device_stats)(struct ib_device *); struct rdma_hw_stats * (*alloc_hw_port_stats)(struct ib_device *, u32); int (*get_hw_stats)(struct ib_device *, struct rdma_hw_stats *, u32, int); int (*modify_hw_stat)(struct ib_device *, u32, unsigned int, bool); int (*fill_res_mr_entry)(struct sk_buff *, struct ib_mr *); int (*fill_res_mr_entry_raw)(struct sk_buff *, struct ib_mr *); int (*fill_res_cq_entry)(struct sk_buff *, struct ib_cq *); int (*fill_res_cq_entry_raw)(struct sk_buff *, struct ib_cq *); int (*fill_res_qp_entry)(struct sk_buff *, struct ib_qp *); int (*fill_res_qp_entry_raw)(struct sk_buff *, struct ib_qp *); int (*fill_res_cm_id_entry)(struct sk_buff *, struct rdma_cm_id *); int (*enable_driver)(struct ib_device *); void (*dealloc_driver)(struct ib_device *); void (*iw_add_ref)(struct ib_qp *); void (*iw_rem_ref)(struct ib_qp *); struct ib_qp * (*iw_get_qp)(struct ib_device *, int); int (*iw_connect)(struct iw_cm_id *, struct iw_cm_conn_param *); int (*iw_accept)(struct iw_cm_id *, struct iw_cm_conn_param *); int (*iw_reject)(struct iw_cm_id *, const void *, u8); int (*iw_create_listen)(struct iw_cm_id *, int); int (*iw_destroy_listen)(struct iw_cm_id *); int (*counter_bind_qp)(struct rdma_counter *, struct ib_qp *); int (*counter_unbind_qp)(struct ib_qp *); int (*counter_dealloc)(struct rdma_counter *); struct rdma_hw_stats * (*counter_alloc_stats)(struct rdma_counter *); int (*counter_update_stats)(struct rdma_counter *); int (*fill_stat_mr_entry)(struct sk_buff *, struct ib_mr *); int (*query_ucontext)(struct ib_ucontext *, struct uverbs_attr_bundle *); int (*get_numa_node)(struct ib_device *); size_t size_ib_ah; size_t size_ib_counters; size_t size_ib_cq; size_t size_ib_mw; size_t size_ib_pd; size_t size_ib_qp; size_t size_ib_rwq_ind_table; size_t size_ib_srq; size_t size_ib_ucontext; size_t size_ib_xrcd; }; enum ib_atomic_cap { IB_ATOMIC_NONE = 0, IB_ATOMIC_HCA = 1, IB_ATOMIC_GLOB = 2, }; struct ib_odp_caps { uint64_t general_caps; struct { uint32_t rc_odp_caps; uint32_t uc_odp_caps; uint32_t ud_odp_caps; uint32_t xrc_odp_caps; } per_transport_caps; }; struct ib_rss_caps { u32 supported_qpts; u32 max_rwq_indirection_tables; u32 max_rwq_indirection_table_size; }; struct ib_tm_caps { u32 max_rndv_hdr_size; u32 max_num_tags; u32 flags; u32 max_ops; u32 max_sge; }; struct ib_cq_caps { u16 max_cq_moderation_count; u16 max_cq_moderation_period; }; struct ib_device_attr { u64 fw_ver; __be64 sys_image_guid; u64 max_mr_size; u64 page_size_cap; u32 vendor_id; u32 vendor_part_id; u32 hw_ver; int max_qp; int max_qp_wr; u64 device_cap_flags; u64 kernel_cap_flags; int max_send_sge; int max_recv_sge; int max_sge_rd; int max_cq; int max_cqe; int max_mr; int max_pd; int max_qp_rd_atom; int max_ee_rd_atom; int max_res_rd_atom; int max_qp_init_rd_atom; int max_ee_init_rd_atom; enum ib_atomic_cap atomic_cap; enum ib_atomic_cap masked_atomic_cap; int max_ee; int max_rdd; int max_mw; int max_raw_ipv6_qp; int max_raw_ethy_qp; int max_mcast_grp; int max_mcast_qp_attach; int max_total_mcast_qp_attach; int max_ah; int max_srq; int max_srq_wr; int max_srq_sge; unsigned int max_fast_reg_page_list_len; unsigned int max_pi_fast_reg_page_list_len; u16 max_pkeys; u8 local_ca_ack_delay; int sig_prot_cap; int sig_guard_cap; struct ib_odp_caps odp_caps; uint64_t timestamp_mask; uint64_t hca_core_clock; struct ib_rss_caps rss_caps; u32 max_wq_type_rq; u32 raw_packet_caps; struct ib_tm_caps tm_caps; struct ib_cq_caps cq_caps; u64 max_dm_size; u32 max_sgl_rd; }; struct hw_stats_device_data; struct rdma_restrack_root; struct uapi_definition; struct ib_port_data; struct rdma_link_ops; struct ib_device { struct device *dma_device; struct ib_device_ops ops; char name[64]; struct callback_head callback_head; struct list_head event_handler_list; struct rw_semaphore event_handler_rwsem; spinlock_t qp_open_list_lock; struct rw_semaphore client_data_rwsem; struct xarray client_data; struct mutex unregistration_lock; rwlock_t cache_lock; struct ib_port_data *port_data; int num_comp_vectors; union { struct device dev; struct ib_core_device coredev; }; const struct attribute_group *groups[4]; u64 uverbs_cmd_mask; char node_desc[64]; __be64 node_guid; u32 local_dma_lkey; u16 is_switch: 1; u16 kverbs_provider: 1; u16 use_cq_dim: 1; u8 node_type; u32 phys_port_cnt; struct ib_device_attr attrs; struct hw_stats_device_data *hw_stats_data; u32 index; spinlock_t cq_pools_lock; struct list_head cq_pools[3]; struct rdma_restrack_root *res; const struct uapi_definition *driver_def; refcount_t refcount; struct completion unreg_completion; struct work_struct unregistration_work; const struct rdma_link_ops *link_ops; struct mutex compat_devs_mutex; struct xarray compat_devs; char iw_ifname[16]; u32 iw_driver_flags; u32 lag_flags; }; struct ib_uqp_object; enum ib_qp_type { IB_QPT_SMI = 0, IB_QPT_GSI = 1, IB_QPT_RC = 2, IB_QPT_UC = 3, IB_QPT_UD = 4, IB_QPT_RAW_IPV6 = 5, IB_QPT_RAW_ETHERTYPE = 6, IB_QPT_RAW_PACKET = 8, IB_QPT_XRC_INI = 9, IB_QPT_XRC_TGT = 10, IB_QPT_MAX = 11, IB_QPT_DRIVER = 255, IB_QPT_RESERVED1 = 4096, IB_QPT_RESERVED2 = 4097, IB_QPT_RESERVED3 = 4098, IB_QPT_RESERVED4 = 4099, IB_QPT_RESERVED5 = 4100, IB_QPT_RESERVED6 = 4101, IB_QPT_RESERVED7 = 4102, IB_QPT_RESERVED8 = 4103, IB_QPT_RESERVED9 = 4104, IB_QPT_RESERVED10 = 4105, }; enum rdma_restrack_type { RDMA_RESTRACK_PD = 0, RDMA_RESTRACK_CQ = 1, RDMA_RESTRACK_QP = 2, RDMA_RESTRACK_CM_ID = 3, RDMA_RESTRACK_MR = 4, RDMA_RESTRACK_CTX = 5, RDMA_RESTRACK_COUNTER = 6, RDMA_RESTRACK_SRQ = 7, RDMA_RESTRACK_MAX = 8, }; struct rdma_restrack_entry { bool valid; u8 no_track: 1; struct kref kref; struct completion comp; struct task_struct *task; const char *kern_name; enum rdma_restrack_type type; bool user; u32 id; }; struct ib_event; struct ib_qp_security; struct ib_qp { struct ib_device *device; struct ib_pd *pd; struct ib_cq *send_cq; struct ib_cq *recv_cq; spinlock_t mr_lock; int mrs_used; struct list_head rdma_mrs; struct list_head sig_mrs; struct ib_srq *srq; struct ib_xrcd *xrcd; struct list_head xrcd_list; atomic_t usecnt; struct list_head open_list; struct ib_qp *real_qp; struct ib_uqp_object *uobject; void (*event_handler)(struct ib_event *, void *); void *qp_context; const struct ib_gid_attr *av_sgid_attr; const struct ib_gid_attr *alt_path_sgid_attr; u32 qp_num; u32 max_write_sge; u32 max_read_sge; enum ib_qp_type qp_type; struct ib_rwq_ind_table *rwq_ind_tbl; struct ib_qp_security *qp_sec; u32 port; bool integrity_en; struct rdma_restrack_entry res; struct rdma_counter *counter; }; struct ib_uobject; struct ib_pd { u32 local_dma_lkey; u32 flags; struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; u32 unsafe_global_rkey; struct ib_mr *__internal_mr; struct rdma_restrack_entry res; }; struct ib_uverbs_file; struct ib_rdmacg_object {}; struct uverbs_api_object; struct ib_uobject { u64 user_handle; struct ib_uverbs_file *ufile; struct ib_ucontext *context; void *object; struct list_head list; struct ib_rdmacg_object cg_obj; int id; struct kref ref; atomic_t usecnt; struct callback_head rcu; const struct uverbs_api_object *uapi_object; }; struct ib_ucontext { struct ib_device *device; struct ib_uverbs_file *ufile; struct ib_rdmacg_object cg_obj; struct rdma_restrack_entry res; struct xarray mmap_xa; }; struct ib_sig_attrs; struct ib_mr { struct ib_device *device; struct ib_pd *pd; u32 lkey; u32 rkey; u64 iova; u64 length; unsigned int page_size; enum ib_mr_type type; bool need_inval; union { struct ib_uobject *uobject; struct list_head qp_entry; }; struct ib_dm *dm; struct ib_sig_attrs *sig_attrs; struct rdma_restrack_entry res; }; struct ib_dm { struct ib_device *device; u32 length; u32 flags; struct ib_uobject *uobject; atomic_t usecnt; }; enum ib_signature_type { IB_SIG_TYPE_NONE = 0, IB_SIG_TYPE_T10_DIF = 1, }; enum ib_t10_dif_bg_type { IB_T10DIF_CRC = 0, IB_T10DIF_CSUM = 1, }; struct ib_t10_dif_domain { enum ib_t10_dif_bg_type bg_type; u16 pi_interval; u16 bg; u16 app_tag; u32 ref_tag; bool ref_remap; bool app_escape; bool ref_escape; u16 apptag_check_mask; }; struct ib_sig_domain { enum ib_signature_type sig_type; union { struct ib_t10_dif_domain dif; } sig; }; struct ib_sig_attrs { u8 check_mask; struct ib_sig_domain mem; struct ib_sig_domain wire; int meta_length; }; struct irq_poll; typedef int irq_poll_fn(struct irq_poll *, int); struct irq_poll { struct list_head list; unsigned long state; int weight; irq_poll_fn *poll; }; struct ib_ucq_object; typedef void (*ib_comp_handler)(struct ib_cq *, void *); enum ib_poll_context { IB_POLL_SOFTIRQ = 0, IB_POLL_WORKQUEUE = 1, IB_POLL_UNBOUND_WORKQUEUE = 2, IB_POLL_LAST_POOL_TYPE = 2, IB_POLL_DIRECT = 3, }; struct dim; struct ib_cq { struct ib_device *device; struct ib_ucq_object *uobject; ib_comp_handler comp_handler; void (*event_handler)(struct ib_event *, void *); void *cq_context; int cqe; unsigned int cqe_used; atomic_t usecnt; enum ib_poll_context poll_ctx; struct ib_wc *wc; struct list_head pool_entry; union { struct irq_poll iop; struct work_struct work; }; struct workqueue_struct *comp_wq; struct dim *dim; ktime_t timestamp; u8 interrupt: 1; u8 shared: 1; unsigned int comp_vector; struct rdma_restrack_entry res; }; enum ib_event_type { IB_EVENT_CQ_ERR = 0, IB_EVENT_QP_FATAL = 1, IB_EVENT_QP_REQ_ERR = 2, IB_EVENT_QP_ACCESS_ERR = 3, IB_EVENT_COMM_EST = 4, IB_EVENT_SQ_DRAINED = 5, IB_EVENT_PATH_MIG = 6, IB_EVENT_PATH_MIG_ERR = 7, IB_EVENT_DEVICE_FATAL = 8, IB_EVENT_PORT_ACTIVE = 9, IB_EVENT_PORT_ERR = 10, IB_EVENT_LID_CHANGE = 11, IB_EVENT_PKEY_CHANGE = 12, IB_EVENT_SM_CHANGE = 13, IB_EVENT_SRQ_ERR = 14, IB_EVENT_SRQ_LIMIT_REACHED = 15, IB_EVENT_QP_LAST_WQE_REACHED = 16, IB_EVENT_CLIENT_REREGISTER = 17, IB_EVENT_GID_CHANGE = 18, IB_EVENT_WQ_FATAL = 19, }; struct ib_event { struct ib_device *device; union { struct ib_cq *cq; struct ib_qp *qp; struct ib_srq *srq; struct ib_wq *wq; u32 port_num; } element; enum ib_event_type event; }; struct ib_usrq_object; enum ib_srq_type { IB_SRQT_BASIC = 0, IB_SRQT_XRC = 1, IB_SRQT_TM = 2, }; struct ib_srq { struct ib_device *device; struct ib_pd *pd; struct ib_usrq_object *uobject; void (*event_handler)(struct ib_event *, void *); void *srq_context; enum ib_srq_type srq_type; atomic_t usecnt; struct { struct ib_cq *cq; union { struct { struct ib_xrcd *xrcd; u32 srq_num; } xrc; }; } ext; struct rdma_restrack_entry res; }; struct ib_xrcd { struct ib_device *device; atomic_t usecnt; struct inode *inode; struct rw_semaphore tgt_qps_rwsem; struct xarray tgt_qps; }; struct ib_uwq_object; enum ib_wq_state { IB_WQS_RESET = 0, IB_WQS_RDY = 1, IB_WQS_ERR = 2, }; enum ib_wq_type { IB_WQT_RQ = 0, }; struct ib_wq { struct ib_device *device; struct ib_uwq_object *uobject; void *wq_context; void (*event_handler)(struct ib_event *, void *); struct ib_pd *pd; struct ib_cq *cq; u32 wq_num; enum ib_wq_state state; enum ib_wq_type wq_type; atomic_t usecnt; }; enum ib_wc_status { IB_WC_SUCCESS = 0, IB_WC_LOC_LEN_ERR = 1, IB_WC_LOC_QP_OP_ERR = 2, IB_WC_LOC_EEC_OP_ERR = 3, IB_WC_LOC_PROT_ERR = 4, IB_WC_WR_FLUSH_ERR = 5, IB_WC_MW_BIND_ERR = 6, IB_WC_BAD_RESP_ERR = 7, IB_WC_LOC_ACCESS_ERR = 8, IB_WC_REM_INV_REQ_ERR = 9, IB_WC_REM_ACCESS_ERR = 10, IB_WC_REM_OP_ERR = 11, IB_WC_RETRY_EXC_ERR = 12, IB_WC_RNR_RETRY_EXC_ERR = 13, IB_WC_LOC_RDD_VIOL_ERR = 14, IB_WC_REM_INV_RD_REQ_ERR = 15, IB_WC_REM_ABORT_ERR = 16, IB_WC_INV_EECN_ERR = 17, IB_WC_INV_EEC_STATE_ERR = 18, IB_WC_FATAL_ERR = 19, IB_WC_RESP_TIMEOUT_ERR = 20, IB_WC_GENERAL_ERR = 21, }; enum ib_wc_opcode { IB_WC_SEND = 0, IB_WC_RDMA_WRITE = 1, IB_WC_RDMA_READ = 2, IB_WC_COMP_SWAP = 3, IB_WC_FETCH_ADD = 4, IB_WC_BIND_MW = 5, IB_WC_LOCAL_INV = 6, IB_WC_LSO = 7, IB_WC_ATOMIC_WRITE = 9, IB_WC_REG_MR = 10, IB_WC_MASKED_COMP_SWAP = 11, IB_WC_MASKED_FETCH_ADD = 12, IB_WC_FLUSH = 8, IB_WC_RECV = 128, IB_WC_RECV_RDMA_WITH_IMM = 129, }; struct ib_cqe; struct ib_wc { union { u64 wr_id; struct ib_cqe *wr_cqe; }; enum ib_wc_status status; enum ib_wc_opcode opcode; u32 vendor_err; u32 byte_len; struct ib_qp *qp; union { __be32 imm_data; u32 invalidate_rkey; } ex; u32 src_qp; u32 slid; int wc_flags; u16 pkey_index; u8 sl; u8 dlid_path_bits; u32 port_num; u8 smac[6]; u16 vlan_id; u8 network_hdr_type; }; struct ib_cqe { void (*done)(struct ib_cq *, struct ib_wc *); }; struct dim_stats { int ppms; int bpms; int epms; int cpms; int cpe_ratio; }; struct dim_sample { ktime_t time; u32 pkt_ctr; u32 byte_ctr; u16 event_ctr; u32 comp_ctr; }; struct dim { u8 state; struct dim_stats prev_stats; struct dim_sample start_sample; struct dim_sample measuring_sample; struct work_struct work; void *priv; u8 profile_ix; u8 mode; u8 tune_state; u8 steps_right; u8 steps_left; u8 tired; }; union ib_gid { u8 raw[16]; struct { __be64 subnet_prefix; __be64 interface_id; } global; }; enum ib_gid_type { IB_GID_TYPE_IB = 0, IB_GID_TYPE_ROCE = 1, IB_GID_TYPE_ROCE_UDP_ENCAP = 2, IB_GID_TYPE_SIZE = 3, }; struct ib_gid_attr { struct net_device __attribute__((btf_type_tag("rcu"))) *ndev; struct ib_device *device; union ib_gid gid; enum ib_gid_type gid_type; u16 index; u32 port_num; }; struct ib_rwq_ind_table { struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; u32 ind_tbl_num; u32 log_ind_tbl_size; struct ib_wq **ind_tbl; }; struct ib_ports_pkeys; struct ib_qp_security { struct ib_qp *qp; struct ib_device *dev; struct mutex mutex; struct ib_ports_pkeys *ports_pkeys; struct list_head shared_qp_list; void *security; bool destroying; atomic_t error_list_count; struct completion error_complete; int error_comps_pending; }; enum port_pkey_state { IB_PORT_PKEY_NOT_VALID = 0, IB_PORT_PKEY_VALID = 1, IB_PORT_PKEY_LISTED = 2, }; struct ib_port_pkey { enum port_pkey_state state; u16 pkey_index; u32 port_num; struct list_head qp_list; struct list_head to_error_list; struct ib_qp_security *sec; }; struct ib_ports_pkeys { struct ib_port_pkey main; struct ib_port_pkey alt; }; enum rdma_nl_counter_mode { RDMA_COUNTER_MODE_NONE = 0, RDMA_COUNTER_MODE_AUTO = 1, RDMA_COUNTER_MODE_MANUAL = 2, RDMA_COUNTER_MODE_MAX = 3, }; enum rdma_nl_counter_mask { RDMA_COUNTER_MASK_QP_TYPE = 1, RDMA_COUNTER_MASK_PID = 2, }; struct auto_mode_param { int qp_type; }; struct rdma_counter_mode { enum rdma_nl_counter_mode mode; enum rdma_nl_counter_mask mask; struct auto_mode_param param; }; struct rdma_counter { struct rdma_restrack_entry res; struct ib_device *device; uint32_t id; struct kref kref; struct rdma_counter_mode mode; struct mutex lock; struct rdma_hw_stats *stats; u32 port; }; struct rdma_stat_desc; struct rdma_hw_stats { struct mutex lock; unsigned long timestamp; unsigned long lifespan; const struct rdma_stat_desc *descs; unsigned long *is_disabled; int num_counters; u64 value[0]; }; struct rdma_stat_desc { const char *name; unsigned int flags; const void *priv; }; enum ib_wr_opcode { IB_WR_RDMA_WRITE = 0, IB_WR_RDMA_WRITE_WITH_IMM = 1, IB_WR_SEND = 2, IB_WR_SEND_WITH_IMM = 3, IB_WR_RDMA_READ = 4, IB_WR_ATOMIC_CMP_AND_SWP = 5, IB_WR_ATOMIC_FETCH_AND_ADD = 6, IB_WR_BIND_MW = 8, IB_WR_LSO = 10, IB_WR_SEND_WITH_INV = 9, IB_WR_RDMA_READ_WITH_INV = 11, IB_WR_LOCAL_INV = 7, IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, IB_WR_FLUSH = 14, IB_WR_ATOMIC_WRITE = 15, IB_WR_REG_MR = 32, IB_WR_REG_MR_INTEGRITY = 33, IB_WR_RESERVED1 = 240, IB_WR_RESERVED2 = 241, IB_WR_RESERVED3 = 242, IB_WR_RESERVED4 = 243, IB_WR_RESERVED5 = 244, IB_WR_RESERVED6 = 245, IB_WR_RESERVED7 = 246, IB_WR_RESERVED8 = 247, IB_WR_RESERVED9 = 248, IB_WR_RESERVED10 = 249, }; struct ib_send_wr { struct ib_send_wr *next; union { u64 wr_id; struct ib_cqe *wr_cqe; }; struct ib_sge *sg_list; int num_sge; enum ib_wr_opcode opcode; int send_flags; union { __be32 imm_data; u32 invalidate_rkey; } ex; }; struct ib_sge { u64 addr; u32 length; u32 lkey; }; struct ib_recv_wr { struct ib_recv_wr *next; union { u64 wr_id; struct ib_cqe *wr_cqe; }; struct ib_sge *sg_list; int num_sge; }; struct ib_grh { __be32 version_tclass_flow; __be16 paylen; u8 next_hdr; u8 hop_limit; union ib_gid sgid; union ib_gid dgid; }; struct ib_udata { const void __attribute__((btf_type_tag("user"))) *inbuf; void __attribute__((btf_type_tag("user"))) *outbuf; size_t inlen; size_t outlen; }; struct ib_device_modify { u64 sys_image_guid; char node_desc[64]; }; enum ib_port_state { IB_PORT_NOP = 0, IB_PORT_DOWN = 1, IB_PORT_INIT = 2, IB_PORT_ARMED = 3, IB_PORT_ACTIVE = 4, IB_PORT_ACTIVE_DEFER = 5, }; enum ib_mtu { IB_MTU_256 = 1, IB_MTU_512 = 2, IB_MTU_1024 = 3, IB_MTU_2048 = 4, IB_MTU_4096 = 5, }; struct ib_port_attr { u64 subnet_prefix; enum ib_port_state state; enum ib_mtu max_mtu; enum ib_mtu active_mtu; u32 phys_mtu; int gid_tbl_len; unsigned int ip_gids: 1; u32 port_cap_flags; u32 max_msg_sz; u32 bad_pkey_cntr; u32 qkey_viol_cntr; u16 pkey_tbl_len; u32 sm_lid; u32 lid; u8 lmc; u8 max_vl_num; u8 sm_sl; u8 subnet_timeout; u8 init_type_reply; u8 active_width; u16 active_speed; u8 phys_state; u16 port_cap_flags2; }; struct ib_port_modify { u32 set_port_cap_mask; u32 clr_port_cap_mask; u8 init_type; }; struct ib_port_immutable { int pkey_tbl_len; int gid_tbl_len; u32 core_cap_flags; u32 max_mad_size; }; struct rdma_netdev_alloc_params { size_t sizeof_priv; unsigned int txqs; unsigned int rxqs; void *param; int (*initialize_rdma_netdev)(struct ib_device *, u32, struct net_device *, void *); }; struct rdma_user_mmap_entry { struct kref ref; struct ib_ucontext *ucontext; unsigned long start_pgoff; size_t npages; bool driver_removed; }; enum rdma_ah_attr_type { RDMA_AH_ATTR_TYPE_UNDEFINED = 0, RDMA_AH_ATTR_TYPE_IB = 1, RDMA_AH_ATTR_TYPE_ROCE = 2, RDMA_AH_ATTR_TYPE_OPA = 3, }; struct ib_ah { struct ib_device *device; struct ib_pd *pd; struct ib_uobject *uobject; const struct ib_gid_attr *sgid_attr; enum rdma_ah_attr_type type; }; struct rdma_ah_init_attr { struct rdma_ah_attr *ah_attr; u32 flags; struct net_device *xmit_slave; }; struct ib_ah_attr { u16 dlid; u8 src_path_bits; }; struct roce_ah_attr { u8 dmac[6]; }; struct opa_ah_attr { u32 dlid; u8 src_path_bits; bool make_grd; }; struct ib_global_route { const struct ib_gid_attr *sgid_attr; union ib_gid dgid; u32 flow_label; u8 sgid_index; u8 hop_limit; u8 traffic_class; }; struct rdma_ah_attr { struct ib_global_route grh; u8 sl; u8 static_rate; u32 port_num; u8 ah_flags; enum rdma_ah_attr_type type; union { struct ib_ah_attr ib; struct roce_ah_attr roce; struct opa_ah_attr opa; }; }; struct ib_srq_attr { u32 max_wr; u32 max_sge; u32 srq_limit; }; struct ib_srq_init_attr { void (*event_handler)(struct ib_event *, void *); void *srq_context; struct ib_srq_attr attr; enum ib_srq_type srq_type; struct { struct ib_cq *cq; union { struct { struct ib_xrcd *xrcd; } xrc; struct { u32 max_num_tags; } tag_matching; }; } ext; }; struct ib_qp_cap { u32 max_send_wr; u32 max_recv_wr; u32 max_send_sge; u32 max_recv_sge; u32 max_inline_data; u32 max_rdma_ctxs; }; enum ib_sig_type { IB_SIGNAL_ALL_WR = 0, IB_SIGNAL_REQ_WR = 1, }; struct ib_qp_init_attr { void (*event_handler)(struct ib_event *, void *); void *qp_context; struct ib_cq *send_cq; struct ib_cq *recv_cq; struct ib_srq *srq; struct ib_xrcd *xrcd; struct ib_qp_cap cap; enum ib_sig_type sq_sig_type; enum ib_qp_type qp_type; u32 create_flags; u32 port_num; struct ib_rwq_ind_table *rwq_ind_tbl; u32 source_qpn; }; enum ib_qp_state { IB_QPS_RESET = 0, IB_QPS_INIT = 1, IB_QPS_RTR = 2, IB_QPS_RTS = 3, IB_QPS_SQD = 4, IB_QPS_SQE = 5, IB_QPS_ERR = 6, }; enum ib_mig_state { IB_MIG_MIGRATED = 0, IB_MIG_REARM = 1, IB_MIG_ARMED = 2, }; struct ib_qp_attr { enum ib_qp_state qp_state; enum ib_qp_state cur_qp_state; enum ib_mtu path_mtu; enum ib_mig_state path_mig_state; u32 qkey; u32 rq_psn; u32 sq_psn; u32 dest_qp_num; int qp_access_flags; struct ib_qp_cap cap; struct rdma_ah_attr ah_attr; struct rdma_ah_attr alt_ah_attr; u16 pkey_index; u16 alt_pkey_index; u8 en_sqd_async_notify; u8 sq_draining; u8 max_rd_atomic; u8 max_dest_rd_atomic; u8 min_rnr_timer; u32 port_num; u8 timeout; u8 retry_cnt; u8 rnr_retry; u32 alt_port_num; u8 alt_timeout; u32 rate_limit; struct net_device *xmit_slave; }; struct ib_cq_init_attr { unsigned int cqe; u32 comp_vector; u32 flags; }; enum ib_sig_err_type { IB_SIG_BAD_GUARD = 0, IB_SIG_BAD_REFTAG = 1, IB_SIG_BAD_APPTAG = 2, }; struct ib_sig_err { enum ib_sig_err_type err_type; u32 expected; u32 actual; u64 sig_err_offset; u32 key; }; struct ib_mr_status { u32 fail_status; struct ib_sig_err sig_err; }; enum ib_mw_type { IB_MW_TYPE_1 = 1, IB_MW_TYPE_2 = 2, }; struct ib_mw { struct ib_device *device; struct ib_pd *pd; struct ib_uobject *uobject; u32 rkey; enum ib_mw_type type; }; struct ib_flow { struct ib_qp *qp; struct ib_device *device; struct ib_uobject *uobject; }; enum ib_flow_attr_type { IB_FLOW_ATTR_NORMAL = 0, IB_FLOW_ATTR_ALL_DEFAULT = 1, IB_FLOW_ATTR_MC_DEFAULT = 2, IB_FLOW_ATTR_SNIFFER = 3, }; struct ib_flow_eth_filter { u8 dst_mac[6]; u8 src_mac[6]; __be16 ether_type; __be16 vlan_tag; u8 real_sz[0]; }; struct ib_flow_spec_eth { u32 type; u16 size; struct ib_flow_eth_filter val; struct ib_flow_eth_filter mask; }; struct ib_flow_ib_filter { __be16 dlid; __u8 sl; u8 real_sz[0]; }; struct ib_flow_spec_ib { u32 type; u16 size; struct ib_flow_ib_filter val; struct ib_flow_ib_filter mask; }; struct ib_flow_ipv4_filter { __be32 src_ip; __be32 dst_ip; u8 proto; u8 tos; u8 ttl; u8 flags; u8 real_sz[0]; }; struct ib_flow_spec_ipv4 { u32 type; u16 size; struct ib_flow_ipv4_filter val; struct ib_flow_ipv4_filter mask; }; struct ib_flow_tcp_udp_filter { __be16 dst_port; __be16 src_port; u8 real_sz[0]; }; struct ib_flow_spec_tcp_udp { u32 type; u16 size; struct ib_flow_tcp_udp_filter val; struct ib_flow_tcp_udp_filter mask; }; struct ib_flow_ipv6_filter { u8 src_ip[16]; u8 dst_ip[16]; __be32 flow_label; u8 next_hdr; u8 traffic_class; u8 hop_limit; u8 real_sz[0]; }; struct ib_flow_spec_ipv6 { u32 type; u16 size; struct ib_flow_ipv6_filter val; struct ib_flow_ipv6_filter mask; }; struct ib_flow_tunnel_filter { __be32 tunnel_id; u8 real_sz[0]; }; struct ib_flow_spec_tunnel { u32 type; u16 size; struct ib_flow_tunnel_filter val; struct ib_flow_tunnel_filter mask; }; struct ib_flow_esp_filter { __be32 spi; __be32 seq; u8 real_sz[0]; }; struct ib_flow_spec_esp { u32 type; u16 size; struct ib_flow_esp_filter val; struct ib_flow_esp_filter mask; }; struct ib_flow_gre_filter { __be16 c_ks_res0_ver; __be16 protocol; __be32 key; u8 real_sz[0]; }; struct ib_flow_spec_gre { u32 type; u16 size; struct ib_flow_gre_filter val; struct ib_flow_gre_filter mask; }; struct ib_flow_mpls_filter { __be32 tag; u8 real_sz[0]; }; struct ib_flow_spec_mpls { u32 type; u16 size; struct ib_flow_mpls_filter val; struct ib_flow_mpls_filter mask; }; enum ib_flow_spec_type { IB_FLOW_SPEC_ETH = 32, IB_FLOW_SPEC_IB = 34, IB_FLOW_SPEC_IPV4 = 48, IB_FLOW_SPEC_IPV6 = 49, IB_FLOW_SPEC_ESP = 52, IB_FLOW_SPEC_TCP = 64, IB_FLOW_SPEC_UDP = 65, IB_FLOW_SPEC_VXLAN_TUNNEL = 80, IB_FLOW_SPEC_GRE = 81, IB_FLOW_SPEC_MPLS = 96, IB_FLOW_SPEC_INNER = 256, IB_FLOW_SPEC_ACTION_TAG = 4096, IB_FLOW_SPEC_ACTION_DROP = 4097, IB_FLOW_SPEC_ACTION_HANDLE = 4098, IB_FLOW_SPEC_ACTION_COUNT = 4099, }; struct ib_flow_spec_action_tag { enum ib_flow_spec_type type; u16 size; u32 tag_id; }; struct ib_flow_spec_action_drop { enum ib_flow_spec_type type; u16 size; }; struct ib_flow_spec_action_handle { enum ib_flow_spec_type type; u16 size; struct ib_flow_action *act; }; struct ib_flow_spec_action_count { enum ib_flow_spec_type type; u16 size; struct ib_counters *counters; }; union ib_flow_spec { struct { u32 type; u16 size; }; struct ib_flow_spec_eth eth; struct ib_flow_spec_ib ib; struct ib_flow_spec_ipv4 ipv4; struct ib_flow_spec_tcp_udp tcp_udp; struct ib_flow_spec_ipv6 ipv6; struct ib_flow_spec_tunnel tunnel; struct ib_flow_spec_esp esp; struct ib_flow_spec_gre gre; struct ib_flow_spec_mpls mpls; struct ib_flow_spec_action_tag flow_tag; struct ib_flow_spec_action_drop drop; struct ib_flow_spec_action_handle action; struct ib_flow_spec_action_count flow_count; }; struct ib_flow_attr { enum ib_flow_attr_type type; u16 size; u16 priority; u32 flags; u8 num_of_specs; u32 port; union ib_flow_spec flows[0]; }; enum ib_flow_action_type { IB_FLOW_ACTION_UNSPECIFIED = 0, IB_FLOW_ACTION_ESP = 1, }; struct ib_flow_action { struct ib_device *device; struct ib_uobject *uobject; enum ib_flow_action_type type; atomic_t usecnt; }; struct ib_counters { struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; }; struct ib_wq_init_attr { void *wq_context; enum ib_wq_type wq_type; u32 max_wr; u32 max_sge; struct ib_cq *cq; void (*event_handler)(struct ib_event *, void *); u32 create_flags; }; struct ib_wq_attr { enum ib_wq_state wq_state; enum ib_wq_state curr_wq_state; u32 flags; u32 flags_mask; }; struct ib_rwq_ind_table_init_attr { u32 log_ind_tbl_size; struct ib_wq **ind_tbl; }; struct ib_dm_alloc_attr { u64 length; u32 alignment; u32 flags; }; struct ib_dm_mr_attr { u64 length; u64 offset; u32 access_flags; }; struct ib_counters_read_attr { u64 *counters_buff; u32 ncounters; u32 flags; }; struct ib_pkey_cache; struct ib_gid_table; struct ib_port_cache { u64 subnet_prefix; struct ib_pkey_cache *pkey; struct ib_gid_table *gid; u8 lmc; enum ib_port_state port_state; }; struct rdma_port_counter { struct rdma_counter_mode mode; struct rdma_hw_stats *hstats; unsigned int num_counters; struct mutex lock; }; struct ib_port; struct ib_port_data { struct ib_device *ib_dev; struct ib_port_immutable immutable; spinlock_t pkey_list_lock; spinlock_t netdev_lock; struct list_head pkey_list; struct ib_port_cache cache; struct net_device __attribute__((btf_type_tag("rcu"))) *netdev; netdevice_tracker netdev_tracker; struct hlist_node ndev_hash_link; struct rdma_port_counter port_counter; struct ib_port *sysfs; }; struct rdma_link_ops { struct list_head list; const char *type; int (*newlink)(const char *, struct net_device *); }; enum devlink_linecard_state { DEVLINK_LINECARD_STATE_UNSPEC = 0, DEVLINK_LINECARD_STATE_UNPROVISIONED = 1, DEVLINK_LINECARD_STATE_UNPROVISIONING = 2, DEVLINK_LINECARD_STATE_PROVISIONING = 3, DEVLINK_LINECARD_STATE_PROVISIONING_FAILED = 4, DEVLINK_LINECARD_STATE_PROVISIONED = 5, DEVLINK_LINECARD_STATE_ACTIVE = 6, __DEVLINK_LINECARD_STATE_MAX = 7, DEVLINK_LINECARD_STATE_MAX = 6, }; struct devlink_linecard_ops; struct devlink_linecard_type; struct devlink_linecard { struct list_head list; struct devlink *devlink; unsigned int index; const struct devlink_linecard_ops *ops; void *priv; enum devlink_linecard_state state; struct mutex state_lock; const char *type; struct devlink_linecard_type *types; unsigned int types_count; struct devlink *nested_devlink; }; struct devlink_linecard_ops { int (*provision)(struct devlink_linecard *, void *, const char *, const void *, struct netlink_ext_ack *); int (*unprovision)(struct devlink_linecard *, void *, struct netlink_ext_ack *); bool (*same_provision)(struct devlink_linecard *, void *, const char *, const void *); unsigned int (*types_count)(struct devlink_linecard *, void *); void (*types_get)(struct devlink_linecard *, void *, unsigned int, const char **, const void **); }; typedef void (*btf_trace_devlink_hwmsg)(void *, const struct devlink *, bool, unsigned long, const u8 *, size_t); typedef void (*btf_trace_devlink_hwerr)(void *, const struct devlink *, int, const char *); typedef void (*btf_trace_devlink_health_report)(void *, const struct devlink *, const char *, const char *); typedef void (*btf_trace_devlink_health_recover_aborted)(void *, const struct devlink *, const char *, bool, u64); typedef void (*btf_trace_devlink_health_reporter_state_update)(void *, const struct devlink *, const char *, bool); struct devlink_trap_metadata; typedef void (*btf_trace_devlink_trap_report)(void *, const struct devlink *, struct sk_buff *, const struct devlink_trap_metadata *); struct devlink_trap_metadata { const char *trap_name; const char *trap_group_name; struct net_device *input_dev; netdevice_tracker dev_tracker; const struct flow_action_cookie *fa_cookie; enum devlink_trap_type trap_type; }; struct trace_event_raw_devlink_hwmsg { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; bool incoming; unsigned long type; u32 __data_loc_buf; size_t len; char __data[0]; }; struct trace_event_raw_devlink_hwerr { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; int err; u32 __data_loc_msg; char __data[0]; }; struct trace_event_raw_devlink_health_report { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; u32 __data_loc_reporter_name; u32 __data_loc_msg; char __data[0]; }; struct trace_event_raw_devlink_health_recover_aborted { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; u32 __data_loc_reporter_name; bool health_state; u64 time_since_last_recover; char __data[0]; }; struct trace_event_raw_devlink_health_reporter_state_update { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; u32 __data_loc_reporter_name; u8 new_state; char __data[0]; }; struct trace_event_raw_devlink_trap_report { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; u32 __data_loc_trap_name; u32 __data_loc_trap_group_name; char input_dev_name[16]; char __data[0]; }; struct trace_event_data_offsets_devlink_hwmsg { u32 bus_name; u32 dev_name; u32 driver_name; u32 buf; }; struct trace_event_data_offsets_devlink_hwerr { u32 bus_name; u32 dev_name; u32 driver_name; u32 msg; }; struct trace_event_data_offsets_devlink_health_report { u32 bus_name; u32 dev_name; u32 driver_name; u32 reporter_name; u32 msg; }; struct trace_event_data_offsets_devlink_health_recover_aborted { u32 bus_name; u32 dev_name; u32 driver_name; u32 reporter_name; }; struct trace_event_data_offsets_devlink_health_reporter_state_update { u32 bus_name; u32 dev_name; u32 driver_name; u32 reporter_name; }; struct trace_event_data_offsets_devlink_trap_report { u32 bus_name; u32 dev_name; u32 driver_name; u32 trap_name; u32 trap_group_name; }; enum devlink_attr { DEVLINK_ATTR_UNSPEC = 0, DEVLINK_ATTR_BUS_NAME = 1, DEVLINK_ATTR_DEV_NAME = 2, DEVLINK_ATTR_PORT_INDEX = 3, DEVLINK_ATTR_PORT_TYPE = 4, DEVLINK_ATTR_PORT_DESIRED_TYPE = 5, DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6, DEVLINK_ATTR_PORT_NETDEV_NAME = 7, DEVLINK_ATTR_PORT_IBDEV_NAME = 8, DEVLINK_ATTR_PORT_SPLIT_COUNT = 9, DEVLINK_ATTR_PORT_SPLIT_GROUP = 10, DEVLINK_ATTR_SB_INDEX = 11, DEVLINK_ATTR_SB_SIZE = 12, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT = 13, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT = 14, DEVLINK_ATTR_SB_INGRESS_TC_COUNT = 15, DEVLINK_ATTR_SB_EGRESS_TC_COUNT = 16, DEVLINK_ATTR_SB_POOL_INDEX = 17, DEVLINK_ATTR_SB_POOL_TYPE = 18, DEVLINK_ATTR_SB_POOL_SIZE = 19, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE = 20, DEVLINK_ATTR_SB_THRESHOLD = 21, DEVLINK_ATTR_SB_TC_INDEX = 22, DEVLINK_ATTR_SB_OCC_CUR = 23, DEVLINK_ATTR_SB_OCC_MAX = 24, DEVLINK_ATTR_ESWITCH_MODE = 25, DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26, DEVLINK_ATTR_DPIPE_TABLES = 27, DEVLINK_ATTR_DPIPE_TABLE = 28, DEVLINK_ATTR_DPIPE_TABLE_NAME = 29, DEVLINK_ATTR_DPIPE_TABLE_SIZE = 30, DEVLINK_ATTR_DPIPE_TABLE_MATCHES = 31, DEVLINK_ATTR_DPIPE_TABLE_ACTIONS = 32, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED = 33, DEVLINK_ATTR_DPIPE_ENTRIES = 34, DEVLINK_ATTR_DPIPE_ENTRY = 35, DEVLINK_ATTR_DPIPE_ENTRY_INDEX = 36, DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES = 37, DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES = 38, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER = 39, DEVLINK_ATTR_DPIPE_MATCH = 40, DEVLINK_ATTR_DPIPE_MATCH_VALUE = 41, DEVLINK_ATTR_DPIPE_MATCH_TYPE = 42, DEVLINK_ATTR_DPIPE_ACTION = 43, DEVLINK_ATTR_DPIPE_ACTION_VALUE = 44, DEVLINK_ATTR_DPIPE_ACTION_TYPE = 45, DEVLINK_ATTR_DPIPE_VALUE = 46, DEVLINK_ATTR_DPIPE_VALUE_MASK = 47, DEVLINK_ATTR_DPIPE_VALUE_MAPPING = 48, DEVLINK_ATTR_DPIPE_HEADERS = 49, DEVLINK_ATTR_DPIPE_HEADER = 50, DEVLINK_ATTR_DPIPE_HEADER_NAME = 51, DEVLINK_ATTR_DPIPE_HEADER_ID = 52, DEVLINK_ATTR_DPIPE_HEADER_FIELDS = 53, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL = 54, DEVLINK_ATTR_DPIPE_HEADER_INDEX = 55, DEVLINK_ATTR_DPIPE_FIELD = 56, DEVLINK_ATTR_DPIPE_FIELD_NAME = 57, DEVLINK_ATTR_DPIPE_FIELD_ID = 58, DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH = 59, DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 60, DEVLINK_ATTR_PAD = 61, DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62, DEVLINK_ATTR_RESOURCE_LIST = 63, DEVLINK_ATTR_RESOURCE = 64, DEVLINK_ATTR_RESOURCE_NAME = 65, DEVLINK_ATTR_RESOURCE_ID = 66, DEVLINK_ATTR_RESOURCE_SIZE = 67, DEVLINK_ATTR_RESOURCE_SIZE_NEW = 68, DEVLINK_ATTR_RESOURCE_SIZE_VALID = 69, DEVLINK_ATTR_RESOURCE_SIZE_MIN = 70, DEVLINK_ATTR_RESOURCE_SIZE_MAX = 71, DEVLINK_ATTR_RESOURCE_SIZE_GRAN = 72, DEVLINK_ATTR_RESOURCE_UNIT = 73, DEVLINK_ATTR_RESOURCE_OCC = 74, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID = 75, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS = 76, DEVLINK_ATTR_PORT_FLAVOUR = 77, DEVLINK_ATTR_PORT_NUMBER = 78, DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER = 79, DEVLINK_ATTR_PARAM = 80, DEVLINK_ATTR_PARAM_NAME = 81, DEVLINK_ATTR_PARAM_GENERIC = 82, DEVLINK_ATTR_PARAM_TYPE = 83, DEVLINK_ATTR_PARAM_VALUES_LIST = 84, DEVLINK_ATTR_PARAM_VALUE = 85, DEVLINK_ATTR_PARAM_VALUE_DATA = 86, DEVLINK_ATTR_PARAM_VALUE_CMODE = 87, DEVLINK_ATTR_REGION_NAME = 88, DEVLINK_ATTR_REGION_SIZE = 89, DEVLINK_ATTR_REGION_SNAPSHOTS = 90, DEVLINK_ATTR_REGION_SNAPSHOT = 91, DEVLINK_ATTR_REGION_SNAPSHOT_ID = 92, DEVLINK_ATTR_REGION_CHUNKS = 93, DEVLINK_ATTR_REGION_CHUNK = 94, DEVLINK_ATTR_REGION_CHUNK_DATA = 95, DEVLINK_ATTR_REGION_CHUNK_ADDR = 96, DEVLINK_ATTR_REGION_CHUNK_LEN = 97, DEVLINK_ATTR_INFO_DRIVER_NAME = 98, DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99, DEVLINK_ATTR_INFO_VERSION_FIXED = 100, DEVLINK_ATTR_INFO_VERSION_RUNNING = 101, DEVLINK_ATTR_INFO_VERSION_STORED = 102, DEVLINK_ATTR_INFO_VERSION_NAME = 103, DEVLINK_ATTR_INFO_VERSION_VALUE = 104, DEVLINK_ATTR_SB_POOL_CELL_SIZE = 105, DEVLINK_ATTR_FMSG = 106, DEVLINK_ATTR_FMSG_OBJ_NEST_START = 107, DEVLINK_ATTR_FMSG_PAIR_NEST_START = 108, DEVLINK_ATTR_FMSG_ARR_NEST_START = 109, DEVLINK_ATTR_FMSG_NEST_END = 110, DEVLINK_ATTR_FMSG_OBJ_NAME = 111, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE = 112, DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA = 113, DEVLINK_ATTR_HEALTH_REPORTER = 114, DEVLINK_ATTR_HEALTH_REPORTER_NAME = 115, DEVLINK_ATTR_HEALTH_REPORTER_STATE = 116, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT = 117, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT = 118, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS = 119, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD = 120, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER = 121, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME = 122, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT = 123, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG = 124, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE = 125, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL = 126, DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127, DEVLINK_ATTR_PORT_PCI_VF_NUMBER = 128, DEVLINK_ATTR_STATS = 129, DEVLINK_ATTR_TRAP_NAME = 130, DEVLINK_ATTR_TRAP_ACTION = 131, DEVLINK_ATTR_TRAP_TYPE = 132, DEVLINK_ATTR_TRAP_GENERIC = 133, DEVLINK_ATTR_TRAP_METADATA = 134, DEVLINK_ATTR_TRAP_GROUP_NAME = 135, DEVLINK_ATTR_RELOAD_FAILED = 136, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS = 137, DEVLINK_ATTR_NETNS_FD = 138, DEVLINK_ATTR_NETNS_PID = 139, DEVLINK_ATTR_NETNS_ID = 140, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP = 141, DEVLINK_ATTR_TRAP_POLICER_ID = 142, DEVLINK_ATTR_TRAP_POLICER_RATE = 143, DEVLINK_ATTR_TRAP_POLICER_BURST = 144, DEVLINK_ATTR_PORT_FUNCTION = 145, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER = 146, DEVLINK_ATTR_PORT_LANES = 147, DEVLINK_ATTR_PORT_SPLITTABLE = 148, DEVLINK_ATTR_PORT_EXTERNAL = 149, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT = 151, DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK = 152, DEVLINK_ATTR_RELOAD_ACTION = 153, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED = 154, DEVLINK_ATTR_RELOAD_LIMITS = 155, DEVLINK_ATTR_DEV_STATS = 156, DEVLINK_ATTR_RELOAD_STATS = 157, DEVLINK_ATTR_RELOAD_STATS_ENTRY = 158, DEVLINK_ATTR_RELOAD_STATS_LIMIT = 159, DEVLINK_ATTR_RELOAD_STATS_VALUE = 160, DEVLINK_ATTR_REMOTE_RELOAD_STATS = 161, DEVLINK_ATTR_RELOAD_ACTION_INFO = 162, DEVLINK_ATTR_RELOAD_ACTION_STATS = 163, DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 164, DEVLINK_ATTR_RATE_TYPE = 165, DEVLINK_ATTR_RATE_TX_SHARE = 166, DEVLINK_ATTR_RATE_TX_MAX = 167, DEVLINK_ATTR_RATE_NODE_NAME = 168, DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 169, DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 170, DEVLINK_ATTR_LINECARD_INDEX = 171, DEVLINK_ATTR_LINECARD_STATE = 172, DEVLINK_ATTR_LINECARD_TYPE = 173, DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 174, DEVLINK_ATTR_NESTED_DEVLINK = 175, DEVLINK_ATTR_SELFTESTS = 176, DEVLINK_ATTR_RATE_TX_PRIORITY = 177, DEVLINK_ATTR_RATE_TX_WEIGHT = 178, DEVLINK_ATTR_REGION_DIRECT = 179, __DEVLINK_ATTR_MAX = 180, DEVLINK_ATTR_MAX = 179, }; struct devlink_nl_dump_state { unsigned long instance; int idx; union { struct { u64 start_offset; }; struct { u64 dump_ts; }; }; }; typedef int devlink_nl_dump_one_func_t(struct sk_buff *, struct devlink *, struct netlink_callback *, int); struct devlink_reload_combination { enum devlink_reload_action action; enum devlink_reload_limit limit; }; enum devlink_info_version_type { DEVLINK_INFO_VERSION_TYPE_NONE = 0, DEVLINK_INFO_VERSION_TYPE_COMPONENT = 1, }; struct devlink_info_req { struct sk_buff *msg; void (*version_cb)(const char *, enum devlink_info_version_type, void *); void *version_cb_priv; }; enum devlink_command { DEVLINK_CMD_UNSPEC = 0, DEVLINK_CMD_GET = 1, DEVLINK_CMD_SET = 2, DEVLINK_CMD_NEW = 3, DEVLINK_CMD_DEL = 4, DEVLINK_CMD_PORT_GET = 5, DEVLINK_CMD_PORT_SET = 6, DEVLINK_CMD_PORT_NEW = 7, DEVLINK_CMD_PORT_DEL = 8, DEVLINK_CMD_PORT_SPLIT = 9, DEVLINK_CMD_PORT_UNSPLIT = 10, DEVLINK_CMD_SB_GET = 11, DEVLINK_CMD_SB_SET = 12, DEVLINK_CMD_SB_NEW = 13, DEVLINK_CMD_SB_DEL = 14, DEVLINK_CMD_SB_POOL_GET = 15, DEVLINK_CMD_SB_POOL_SET = 16, DEVLINK_CMD_SB_POOL_NEW = 17, DEVLINK_CMD_SB_POOL_DEL = 18, DEVLINK_CMD_SB_PORT_POOL_GET = 19, DEVLINK_CMD_SB_PORT_POOL_SET = 20, DEVLINK_CMD_SB_PORT_POOL_NEW = 21, DEVLINK_CMD_SB_PORT_POOL_DEL = 22, DEVLINK_CMD_SB_TC_POOL_BIND_GET = 23, DEVLINK_CMD_SB_TC_POOL_BIND_SET = 24, DEVLINK_CMD_SB_TC_POOL_BIND_NEW = 25, DEVLINK_CMD_SB_TC_POOL_BIND_DEL = 26, DEVLINK_CMD_SB_OCC_SNAPSHOT = 27, DEVLINK_CMD_SB_OCC_MAX_CLEAR = 28, DEVLINK_CMD_ESWITCH_GET = 29, DEVLINK_CMD_ESWITCH_SET = 30, DEVLINK_CMD_DPIPE_TABLE_GET = 31, DEVLINK_CMD_DPIPE_ENTRIES_GET = 32, DEVLINK_CMD_DPIPE_HEADERS_GET = 33, DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 34, DEVLINK_CMD_RESOURCE_SET = 35, DEVLINK_CMD_RESOURCE_DUMP = 36, DEVLINK_CMD_RELOAD = 37, DEVLINK_CMD_PARAM_GET = 38, DEVLINK_CMD_PARAM_SET = 39, DEVLINK_CMD_PARAM_NEW = 40, DEVLINK_CMD_PARAM_DEL = 41, DEVLINK_CMD_REGION_GET = 42, DEVLINK_CMD_REGION_SET = 43, DEVLINK_CMD_REGION_NEW = 44, DEVLINK_CMD_REGION_DEL = 45, DEVLINK_CMD_REGION_READ = 46, DEVLINK_CMD_PORT_PARAM_GET = 47, DEVLINK_CMD_PORT_PARAM_SET = 48, DEVLINK_CMD_PORT_PARAM_NEW = 49, DEVLINK_CMD_PORT_PARAM_DEL = 50, DEVLINK_CMD_INFO_GET = 51, DEVLINK_CMD_HEALTH_REPORTER_GET = 52, DEVLINK_CMD_HEALTH_REPORTER_SET = 53, DEVLINK_CMD_HEALTH_REPORTER_RECOVER = 54, DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE = 55, DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET = 56, DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR = 57, DEVLINK_CMD_FLASH_UPDATE = 58, DEVLINK_CMD_FLASH_UPDATE_END = 59, DEVLINK_CMD_FLASH_UPDATE_STATUS = 60, DEVLINK_CMD_TRAP_GET = 61, DEVLINK_CMD_TRAP_SET = 62, DEVLINK_CMD_TRAP_NEW = 63, DEVLINK_CMD_TRAP_DEL = 64, DEVLINK_CMD_TRAP_GROUP_GET = 65, DEVLINK_CMD_TRAP_GROUP_SET = 66, DEVLINK_CMD_TRAP_GROUP_NEW = 67, DEVLINK_CMD_TRAP_GROUP_DEL = 68, DEVLINK_CMD_TRAP_POLICER_GET = 69, DEVLINK_CMD_TRAP_POLICER_SET = 70, DEVLINK_CMD_TRAP_POLICER_NEW = 71, DEVLINK_CMD_TRAP_POLICER_DEL = 72, DEVLINK_CMD_HEALTH_REPORTER_TEST = 73, DEVLINK_CMD_RATE_GET = 74, DEVLINK_CMD_RATE_SET = 75, DEVLINK_CMD_RATE_NEW = 76, DEVLINK_CMD_RATE_DEL = 77, DEVLINK_CMD_LINECARD_GET = 78, DEVLINK_CMD_LINECARD_SET = 79, DEVLINK_CMD_LINECARD_NEW = 80, DEVLINK_CMD_LINECARD_DEL = 81, DEVLINK_CMD_SELFTESTS_GET = 82, DEVLINK_CMD_SELFTESTS_RUN = 83, __DEVLINK_CMD_MAX = 84, DEVLINK_CMD_MAX = 83, }; enum devlink_attr_selftest_id { DEVLINK_ATTR_SELFTEST_ID_UNSPEC = 0, DEVLINK_ATTR_SELFTEST_ID_FLASH = 1, __DEVLINK_ATTR_SELFTEST_ID_MAX = 2, DEVLINK_ATTR_SELFTEST_ID_MAX = 1, }; enum devlink_multicast_groups { DEVLINK_MCGRP_CONFIG = 0, }; enum devlink_attr_selftest_result { DEVLINK_ATTR_SELFTEST_RESULT_UNSPEC = 0, DEVLINK_ATTR_SELFTEST_RESULT = 1, DEVLINK_ATTR_SELFTEST_RESULT_ID = 2, DEVLINK_ATTR_SELFTEST_RESULT_STATUS = 3, __DEVLINK_ATTR_SELFTEST_RESULT_MAX = 4, DEVLINK_ATTR_SELFTEST_RESULT_MAX = 3, }; struct devlink_flash_notify { const char *status_msg; const char *component; unsigned long done; unsigned long total; unsigned long timeout; }; struct devlink_flash_component_lookup_ctx { const char *lookup_name; bool lookup_name_found; }; enum devlink_port_function_attr { DEVLINK_PORT_FUNCTION_ATTR_UNSPEC = 0, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 1, DEVLINK_PORT_FN_ATTR_STATE = 2, DEVLINK_PORT_FN_ATTR_OPSTATE = 3, DEVLINK_PORT_FN_ATTR_CAPS = 4, __DEVLINK_PORT_FUNCTION_ATTR_MAX = 5, DEVLINK_PORT_FUNCTION_ATTR_MAX = 4, }; enum devlink_port_fn_attr_cap { DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT = 0, DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT = 1, DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT = 2, DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT = 3, __DEVLINK_PORT_FN_ATTR_CAPS_MAX = 4, }; struct devlink_sb { struct list_head list; unsigned int index; u32 size; u16 ingress_pools_count; u16 egress_pools_count; u16 ingress_tc_count; u16 egress_tc_count; }; enum devlink_dpipe_match_type { DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0, }; enum devlink_dpipe_action_type { DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY = 0, }; struct devlink_dpipe_table_ops; struct devlink_dpipe_table { void *priv; struct list_head list; const char *name; bool counters_enabled; bool counter_control_extern; bool resource_valid; u64 resource_id; u64 resource_units; struct devlink_dpipe_table_ops *table_ops; struct callback_head rcu; }; struct devlink_dpipe_dump_ctx; struct devlink_dpipe_table_ops { int (*actions_dump)(void *, struct sk_buff *); int (*matches_dump)(void *, struct sk_buff *); int (*entries_dump)(void *, bool, struct devlink_dpipe_dump_ctx *); int (*counters_set_update)(void *, bool); u64 (*size_get)(void *); }; struct devlink_dpipe_dump_ctx { struct genl_info *info; enum devlink_command cmd; struct sk_buff *skb; struct nlattr *nest; void *hdr; }; struct devlink_dpipe_value; struct devlink_dpipe_entry { u64 index; struct devlink_dpipe_value *match_values; unsigned int match_values_count; struct devlink_dpipe_value *action_values; unsigned int action_values_count; u64 counter; bool counter_valid; }; struct devlink_dpipe_action; struct devlink_dpipe_match; struct devlink_dpipe_value { union { struct devlink_dpipe_action *action; struct devlink_dpipe_match *match; }; unsigned int mapping_value; bool mapping_valid; unsigned int value_size; void *value; void *mask; }; struct devlink_dpipe_action { enum devlink_dpipe_action_type type; unsigned int header_index; struct devlink_dpipe_header *header; unsigned int field_id; }; struct devlink_dpipe_match { enum devlink_dpipe_match_type type; unsigned int header_index; struct devlink_dpipe_header *header; unsigned int field_id; }; enum devlink_resource_unit { DEVLINK_RESOURCE_UNIT_ENTRY = 0, }; struct devlink_resource_size_params { u64 size_min; u64 size_max; u64 size_granularity; enum devlink_resource_unit unit; }; typedef u64 devlink_resource_occ_get_t(void *); struct devlink_resource { const char *name; u64 id; u64 size; u64 size_new; bool size_valid; struct devlink_resource *parent; struct devlink_resource_size_params size_params; struct list_head list; struct list_head resource_list; devlink_resource_occ_get_t *occ_get; void *occ_get_priv; }; enum devlink_param_type { DEVLINK_PARAM_TYPE_U8 = 0, DEVLINK_PARAM_TYPE_U16 = 1, DEVLINK_PARAM_TYPE_U32 = 2, DEVLINK_PARAM_TYPE_STRING = 3, DEVLINK_PARAM_TYPE_BOOL = 4, }; struct devlink_param_gset_ctx; union devlink_param_value; struct devlink_param { u32 id; const char *name; bool generic; enum devlink_param_type type; unsigned long supported_cmodes; int (*get)(struct devlink *, u32, struct devlink_param_gset_ctx *); int (*set)(struct devlink *, u32, struct devlink_param_gset_ctx *); int (*validate)(struct devlink *, u32, union devlink_param_value, struct netlink_ext_ack *); }; union devlink_param_value { u8 vu8; u16 vu16; u32 vu32; char vstr[32]; bool vbool; }; enum devlink_param_cmode { DEVLINK_PARAM_CMODE_RUNTIME = 0, DEVLINK_PARAM_CMODE_DRIVERINIT = 1, DEVLINK_PARAM_CMODE_PERMANENT = 2, __DEVLINK_PARAM_CMODE_MAX = 3, DEVLINK_PARAM_CMODE_MAX = 2, }; struct devlink_param_gset_ctx { union devlink_param_value val; enum devlink_param_cmode cmode; }; enum devlink_param_generic_id { DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET = 0, DEVLINK_PARAM_GENERIC_ID_MAX_MACS = 1, DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV = 2, DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT = 3, DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI = 4, DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX = 5, DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN = 6, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY = 7, DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE = 8, DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE = 9, DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET = 10, DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH = 11, DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA = 12, DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET = 13, DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP = 14, DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE = 15, DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE = 16, __DEVLINK_PARAM_GENERIC_ID_MAX = 17, DEVLINK_PARAM_GENERIC_ID_MAX = 16, }; struct devlink_param_item { struct list_head list; const struct devlink_param *param; union devlink_param_value driverinit_value; bool driverinit_value_valid; union devlink_param_value driverinit_value_new; bool driverinit_value_new_valid; }; struct devlink_region_ops; struct devlink_port_region_ops; struct devlink_region { struct devlink *devlink; struct devlink_port *port; struct list_head list; union { const struct devlink_region_ops *ops; const struct devlink_port_region_ops *port_ops; }; struct mutex snapshot_lock; struct list_head snapshot_list; u32 max_snapshots; u32 cur_snapshots; u64 size; }; struct devlink_region_ops { const char *name; void (*destructor)(const void *); int (*snapshot)(struct devlink *, const struct devlink_region_ops *, struct netlink_ext_ack *, u8 **); int (*read)(struct devlink *, const struct devlink_region_ops *, struct netlink_ext_ack *, u64, u32, u8 *); void *priv; }; struct devlink_port_region_ops { const char *name; void (*destructor)(const void *); int (*snapshot)(struct devlink_port *, const struct devlink_port_region_ops *, struct netlink_ext_ack *, u8 **); int (*read)(struct devlink_port *, const struct devlink_port_region_ops *, struct netlink_ext_ack *, u64, u32, u8 *); void *priv; }; struct devlink_snapshot { struct list_head list; struct devlink_region *region; u8 *data; u32 id; }; typedef int devlink_chunk_fill_t(void *, u8 *, u32, u64, struct netlink_ext_ack *); enum devlink_health_reporter_state { DEVLINK_HEALTH_REPORTER_STATE_HEALTHY = 0, DEVLINK_HEALTH_REPORTER_STATE_ERROR = 1, }; struct devlink_health_reporter_ops; struct devlink_fmsg; struct devlink_health_reporter { struct list_head list; void *priv; const struct devlink_health_reporter_ops *ops; struct devlink *devlink; struct devlink_port *devlink_port; struct devlink_fmsg *dump_fmsg; u64 graceful_period; bool auto_recover; bool auto_dump; u8 health_state; u64 dump_ts; u64 dump_real_ts; u64 error_count; u64 recovery_count; u64 last_recovery_ts; }; struct devlink_health_reporter_ops { char *name; int (*recover)(struct devlink_health_reporter *, void *, struct netlink_ext_ack *); int (*dump)(struct devlink_health_reporter *, struct devlink_fmsg *, void *, struct netlink_ext_ack *); int (*diagnose)(struct devlink_health_reporter *, struct devlink_fmsg *, struct netlink_ext_ack *); int (*test)(struct devlink_health_reporter *, struct netlink_ext_ack *); }; struct devlink_fmsg { struct list_head item_list; bool putting_binary; }; struct devlink_fmsg_item { struct list_head list; int attrtype; u8 nla_type; u16 len; int value[0]; }; enum { DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT = 0, DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE = 1, }; enum { DEVLINK_ATTR_STATS_RX_PACKETS = 0, DEVLINK_ATTR_STATS_RX_BYTES = 1, DEVLINK_ATTR_STATS_RX_DROPPED = 2, __DEVLINK_ATTR_STATS_MAX = 3, DEVLINK_ATTR_STATS_MAX = 2, }; enum devlink_trap_generic_id { DEVLINK_TRAP_GENERIC_ID_SMAC_MC = 0, DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH = 1, DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER = 2, DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER = 3, DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST = 4, DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER = 5, DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE = 6, DEVLINK_TRAP_GENERIC_ID_TTL_ERROR = 7, DEVLINK_TRAP_GENERIC_ID_TAIL_DROP = 8, DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET = 9, DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC = 10, DEVLINK_TRAP_GENERIC_ID_DIP_LB = 11, DEVLINK_TRAP_GENERIC_ID_SIP_MC = 12, DEVLINK_TRAP_GENERIC_ID_SIP_LB = 13, DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR = 14, DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC = 15, DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE = 16, DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 17, DEVLINK_TRAP_GENERIC_ID_MTU_ERROR = 18, DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH = 19, DEVLINK_TRAP_GENERIC_ID_RPF = 20, DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE = 21, DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS = 22, DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS = 23, DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE = 24, DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR = 25, DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC = 26, DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP = 27, DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP = 28, DEVLINK_TRAP_GENERIC_ID_STP = 29, DEVLINK_TRAP_GENERIC_ID_LACP = 30, DEVLINK_TRAP_GENERIC_ID_LLDP = 31, DEVLINK_TRAP_GENERIC_ID_IGMP_QUERY = 32, DEVLINK_TRAP_GENERIC_ID_IGMP_V1_REPORT = 33, DEVLINK_TRAP_GENERIC_ID_IGMP_V2_REPORT = 34, DEVLINK_TRAP_GENERIC_ID_IGMP_V3_REPORT = 35, DEVLINK_TRAP_GENERIC_ID_IGMP_V2_LEAVE = 36, DEVLINK_TRAP_GENERIC_ID_MLD_QUERY = 37, DEVLINK_TRAP_GENERIC_ID_MLD_V1_REPORT = 38, DEVLINK_TRAP_GENERIC_ID_MLD_V2_REPORT = 39, DEVLINK_TRAP_GENERIC_ID_MLD_V1_DONE = 40, DEVLINK_TRAP_GENERIC_ID_IPV4_DHCP = 41, DEVLINK_TRAP_GENERIC_ID_IPV6_DHCP = 42, DEVLINK_TRAP_GENERIC_ID_ARP_REQUEST = 43, DEVLINK_TRAP_GENERIC_ID_ARP_RESPONSE = 44, DEVLINK_TRAP_GENERIC_ID_ARP_OVERLAY = 45, DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_SOLICIT = 46, DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_ADVERT = 47, DEVLINK_TRAP_GENERIC_ID_IPV4_BFD = 48, DEVLINK_TRAP_GENERIC_ID_IPV6_BFD = 49, DEVLINK_TRAP_GENERIC_ID_IPV4_OSPF = 50, DEVLINK_TRAP_GENERIC_ID_IPV6_OSPF = 51, DEVLINK_TRAP_GENERIC_ID_IPV4_BGP = 52, DEVLINK_TRAP_GENERIC_ID_IPV6_BGP = 53, DEVLINK_TRAP_GENERIC_ID_IPV4_VRRP = 54, DEVLINK_TRAP_GENERIC_ID_IPV6_VRRP = 55, DEVLINK_TRAP_GENERIC_ID_IPV4_PIM = 56, DEVLINK_TRAP_GENERIC_ID_IPV6_PIM = 57, DEVLINK_TRAP_GENERIC_ID_UC_LB = 58, DEVLINK_TRAP_GENERIC_ID_LOCAL_ROUTE = 59, DEVLINK_TRAP_GENERIC_ID_EXTERNAL_ROUTE = 60, DEVLINK_TRAP_GENERIC_ID_IPV6_UC_DIP_LINK_LOCAL_SCOPE = 61, DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_NODES = 62, DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_ROUTERS = 63, DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_SOLICIT = 64, DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ADVERT = 65, DEVLINK_TRAP_GENERIC_ID_IPV6_REDIRECT = 66, DEVLINK_TRAP_GENERIC_ID_IPV4_ROUTER_ALERT = 67, DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ALERT = 68, DEVLINK_TRAP_GENERIC_ID_PTP_EVENT = 69, DEVLINK_TRAP_GENERIC_ID_PTP_GENERAL = 70, DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE = 71, DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP = 72, DEVLINK_TRAP_GENERIC_ID_EARLY_DROP = 73, DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING = 74, DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING = 75, DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING = 76, DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING = 77, DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING = 78, DEVLINK_TRAP_GENERIC_ID_ARP_PARSING = 79, DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING = 80, DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING = 81, DEVLINK_TRAP_GENERIC_ID_GRE_PARSING = 82, DEVLINK_TRAP_GENERIC_ID_UDP_PARSING = 83, DEVLINK_TRAP_GENERIC_ID_TCP_PARSING = 84, DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING = 85, DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING = 86, DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING = 87, DEVLINK_TRAP_GENERIC_ID_GTP_PARSING = 88, DEVLINK_TRAP_GENERIC_ID_ESP_PARSING = 89, DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_NEXTHOP = 90, DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER = 91, DEVLINK_TRAP_GENERIC_ID_EAPOL = 92, DEVLINK_TRAP_GENERIC_ID_LOCKED_PORT = 93, __DEVLINK_TRAP_GENERIC_ID_MAX = 94, DEVLINK_TRAP_GENERIC_ID_MAX = 93, }; enum devlink_trap_group_generic_id { DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS = 0, DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS = 1, DEVLINK_TRAP_GROUP_GENERIC_ID_L3_EXCEPTIONS = 2, DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS = 3, DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS = 4, DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS = 5, DEVLINK_TRAP_GROUP_GENERIC_ID_STP = 6, DEVLINK_TRAP_GROUP_GENERIC_ID_LACP = 7, DEVLINK_TRAP_GROUP_GENERIC_ID_LLDP = 8, DEVLINK_TRAP_GROUP_GENERIC_ID_MC_SNOOPING = 9, DEVLINK_TRAP_GROUP_GENERIC_ID_DHCP = 10, DEVLINK_TRAP_GROUP_GENERIC_ID_NEIGH_DISCOVERY = 11, DEVLINK_TRAP_GROUP_GENERIC_ID_BFD = 12, DEVLINK_TRAP_GROUP_GENERIC_ID_OSPF = 13, DEVLINK_TRAP_GROUP_GENERIC_ID_BGP = 14, DEVLINK_TRAP_GROUP_GENERIC_ID_VRRP = 15, DEVLINK_TRAP_GROUP_GENERIC_ID_PIM = 16, DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB = 17, DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY = 18, DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY = 19, DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6 = 20, DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT = 21, DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL = 22, DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_SAMPLE = 23, DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_TRAP = 24, DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS = 25, DEVLINK_TRAP_GROUP_GENERIC_ID_EAPOL = 26, __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX = 27, DEVLINK_TRAP_GROUP_GENERIC_ID_MAX = 26, }; struct devlink_trap_policer_item; struct devlink_stats; struct devlink_trap_group_item { const struct devlink_trap_group *group; struct devlink_trap_policer_item *policer_item; struct list_head list; struct devlink_stats __attribute__((btf_type_tag("percpu"))) *stats; }; struct devlink_trap_policer_item { const struct devlink_trap_policer *policer; u64 rate; u64 burst; struct list_head list; }; struct devlink_stats { u64_stats_t rx_bytes; u64_stats_t rx_packets; struct u64_stats_sync syncp; }; struct devlink_trap_item { const struct devlink_trap *trap; struct devlink_trap_group_item *group_item; struct list_head list; enum devlink_trap_action action; struct devlink_stats __attribute__((btf_type_tag("percpu"))) *stats; void *priv; }; struct devlink_linecard_type { const char *type; const void *priv; }; enum vlan_flags { VLAN_FLAG_REORDER_HDR = 1, VLAN_FLAG_GVRP = 2, VLAN_FLAG_LOOSE_BINDING = 4, VLAN_FLAG_MVRP = 8, VLAN_FLAG_BRIDGE_BINDING = 16, }; enum vlan_protos { VLAN_PROTO_8021Q = 0, VLAN_PROTO_8021AD = 1, VLAN_PROTO_NUM = 2, }; struct vlan_pcpu_stats { u64_stats_t rx_packets; u64_stats_t rx_bytes; u64_stats_t rx_multicast; u64_stats_t tx_packets; u64_stats_t tx_bytes; struct u64_stats_sync syncp; u32 rx_errors; u32 tx_dropped; }; struct vlan_vid_info { struct list_head list; __be16 proto; u16 vid; int refcount; }; struct vlan_priority_tci_mapping; struct vlan_dev_priv { unsigned int nr_ingress_mappings; u32 ingress_priority_map[8]; unsigned int nr_egress_mappings; struct vlan_priority_tci_mapping *egress_priority_map[16]; __be16 vlan_proto; u16 vlan_id; u16 flags; struct net_device *real_dev; netdevice_tracker dev_tracker; unsigned char real_dev_addr[6]; struct proc_dir_entry *dent; struct vlan_pcpu_stats __attribute__((btf_type_tag("percpu"))) *vlan_pcpu_stats; }; struct vlan_priority_tci_mapping { u32 priority; u16 vlan_qos; struct vlan_priority_tci_mapping *next; }; enum nl80211_chan_width { NL80211_CHAN_WIDTH_20_NOHT = 0, NL80211_CHAN_WIDTH_20 = 1, NL80211_CHAN_WIDTH_40 = 2, NL80211_CHAN_WIDTH_80 = 3, NL80211_CHAN_WIDTH_80P80 = 4, NL80211_CHAN_WIDTH_160 = 5, NL80211_CHAN_WIDTH_5 = 6, NL80211_CHAN_WIDTH_10 = 7, NL80211_CHAN_WIDTH_1 = 8, NL80211_CHAN_WIDTH_2 = 9, NL80211_CHAN_WIDTH_4 = 10, NL80211_CHAN_WIDTH_8 = 11, NL80211_CHAN_WIDTH_16 = 12, NL80211_CHAN_WIDTH_320 = 13, }; enum ieee80211_edmg_bw_config { IEEE80211_EDMG_BW_CONFIG_4 = 4, IEEE80211_EDMG_BW_CONFIG_5 = 5, IEEE80211_EDMG_BW_CONFIG_6 = 6, IEEE80211_EDMG_BW_CONFIG_7 = 7, IEEE80211_EDMG_BW_CONFIG_8 = 8, IEEE80211_EDMG_BW_CONFIG_9 = 9, IEEE80211_EDMG_BW_CONFIG_10 = 10, IEEE80211_EDMG_BW_CONFIG_11 = 11, IEEE80211_EDMG_BW_CONFIG_12 = 12, IEEE80211_EDMG_BW_CONFIG_13 = 13, IEEE80211_EDMG_BW_CONFIG_14 = 14, IEEE80211_EDMG_BW_CONFIG_15 = 15, }; struct ieee80211_edmg { u8 channels; enum ieee80211_edmg_bw_config bw_config; }; struct ieee80211_channel; struct cfg80211_chan_def { struct ieee80211_channel *chan; enum nl80211_chan_width width; u32 center_freq1; u32 center_freq2; struct ieee80211_edmg edmg; u16 freq1_offset; }; struct cfg80211_internal_bss; enum nl80211_iftype { NL80211_IFTYPE_UNSPECIFIED = 0, NL80211_IFTYPE_ADHOC = 1, NL80211_IFTYPE_STATION = 2, NL80211_IFTYPE_AP = 3, NL80211_IFTYPE_AP_VLAN = 4, NL80211_IFTYPE_WDS = 5, NL80211_IFTYPE_MONITOR = 6, NL80211_IFTYPE_MESH_POINT = 7, NL80211_IFTYPE_P2P_CLIENT = 8, NL80211_IFTYPE_P2P_GO = 9, NL80211_IFTYPE_P2P_DEVICE = 10, NL80211_IFTYPE_OCB = 11, NL80211_IFTYPE_NAN = 12, NUM_NL80211_IFTYPES = 13, NL80211_IFTYPE_MAX = 12, }; struct cfg80211_conn; struct cfg80211_cached_keys; enum ieee80211_bss_type { IEEE80211_BSS_TYPE_ESS = 0, IEEE80211_BSS_TYPE_PBSS = 1, IEEE80211_BSS_TYPE_IBSS = 2, IEEE80211_BSS_TYPE_MBSS = 3, IEEE80211_BSS_TYPE_ANY = 4, }; struct wiphy; struct wiphy_work; typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *); struct wiphy_work { struct list_head entry; wiphy_work_func_t func; }; struct cfg80211_cqm_config; struct wireless_dev { struct wiphy *wiphy; enum nl80211_iftype iftype; struct list_head list; struct net_device *netdev; u32 identifier; struct list_head mgmt_registrations; u8 mgmt_registrations_need_update: 1; struct mutex mtx; bool use_4addr; bool is_running; bool registered; bool registering; u8 address[6]; struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; enum ieee80211_bss_type conn_bss_type; u32 conn_owner_nlportid; struct work_struct disconnect_wk; u8 disconnect_bssid[6]; struct list_head event_list; spinlock_t event_lock; u8 connected: 1; bool ps; int ps_timeout; u32 ap_unexpected_nlportid; u32 owner_nlportid; bool nl_owner_dead; bool cac_started; unsigned long cac_start_time; unsigned int cac_time_ms; struct wiphy_work cqm_rssi_work; struct cfg80211_cqm_config __attribute__((btf_type_tag("rcu"))) *cqm_config; struct list_head pmsr_list; spinlock_t pmsr_lock; struct work_struct pmsr_free_wk; unsigned long unprot_beacon_reported; union { struct { u8 connected_addr[6]; u8 ssid[32]; u8 ssid_len; long: 0; } client; struct { int beacon_interval; struct cfg80211_chan_def preset_chandef; struct cfg80211_chan_def chandef; u8 id[32]; u8 id_len; u8 id_up_len; } mesh; struct { struct cfg80211_chan_def preset_chandef; u8 ssid[32]; u8 ssid_len; } ap; struct { struct cfg80211_internal_bss *current_bss; struct cfg80211_chan_def chandef; int beacon_interval; u8 ssid[32]; u8 ssid_len; } ibss; struct { struct cfg80211_chan_def chandef; } ocb; } u; struct { u8 addr[6]; union { struct { unsigned int beacon_interval; struct cfg80211_chan_def chandef; } ap; struct { struct cfg80211_internal_bss *current_bss; } client; }; } links[15]; u16 valid_links; u64 android_kabi_reserved1; u64 android_kabi_reserved2; }; enum cfg80211_signal_type { CFG80211_SIGNAL_TYPE_NONE = 0, CFG80211_SIGNAL_TYPE_MBM = 1, CFG80211_SIGNAL_TYPE_UNSPEC = 2, }; struct rfkill; struct mac_address; struct ieee80211_txrx_stypes; struct ieee80211_iface_combination; struct wiphy_iftype_akm_suites; struct wiphy_wowlan_support; struct cfg80211_wowlan; struct wiphy_iftype_ext_capab; struct ieee80211_supported_band; struct regulatory_request; struct ieee80211_regdomain; struct ieee80211_ht_cap; struct ieee80211_vht_cap; struct wiphy_coalesce_support; struct wiphy_vendor_command; struct nl80211_vendor_cmd_info; struct cfg80211_pmsr_capabilities; struct cfg80211_sar_capa; struct wiphy { struct mutex mtx; u8 perm_addr[6]; u8 addr_mask[6]; struct mac_address *addresses; const struct ieee80211_txrx_stypes *mgmt_stypes; const struct ieee80211_iface_combination *iface_combinations; int n_iface_combinations; u16 software_iftypes; u16 n_addresses; u16 interface_modes; u16 max_acl_mac_addrs; u32 flags; u32 regulatory_flags; u32 features; u8 ext_features[9]; u32 ap_sme_capa; enum cfg80211_signal_type signal_type; int bss_priv_size; u8 max_scan_ssids; u8 max_sched_scan_reqs; u8 max_sched_scan_ssids; u8 max_match_sets; u16 max_scan_ie_len; u16 max_sched_scan_ie_len; u32 max_sched_scan_plans; u32 max_sched_scan_plan_interval; u32 max_sched_scan_plan_iterations; int n_cipher_suites; const u32 *cipher_suites; int n_akm_suites; const u32 *akm_suites; const struct wiphy_iftype_akm_suites *iftype_akm_suites; unsigned int num_iftype_akm_suites; u8 retry_short; u8 retry_long; u32 frag_threshold; u32 rts_threshold; u8 coverage_class; char fw_version[32]; u32 hw_version; const struct wiphy_wowlan_support *wowlan; struct cfg80211_wowlan *wowlan_config; u16 max_remain_on_channel_duration; u8 max_num_pmkids; u32 available_antennas_tx; u32 available_antennas_rx; u32 probe_resp_offload; const u8 *extended_capabilities; const u8 *extended_capabilities_mask; u8 extended_capabilities_len; const struct wiphy_iftype_ext_capab *iftype_ext_capab; unsigned int num_iftype_ext_capab; const void *privid; struct ieee80211_supported_band *bands[6]; void (*reg_notifier)(struct wiphy *, struct regulatory_request *); const struct ieee80211_regdomain __attribute__((btf_type_tag("rcu"))) *regd; struct device dev; bool registered; struct dentry *debugfsdir; const struct ieee80211_ht_cap *ht_capa_mod_mask; const struct ieee80211_vht_cap *vht_capa_mod_mask; struct list_head wdev_list; possible_net_t _net; const struct wiphy_coalesce_support *coalesce; const struct wiphy_vendor_command *vendor_commands; const struct nl80211_vendor_cmd_info *vendor_events; int n_vendor_commands; int n_vendor_events; u16 max_ap_assoc_sta; u8 max_num_csa_counters; u32 bss_select_support; u8 nan_supported_bands; u32 txq_limit; u32 txq_memory_limit; u32 txq_quantum; unsigned long tx_queue_len; u8 support_mbssid: 1; u8 support_only_he_mbssid: 1; const struct cfg80211_pmsr_capabilities *pmsr_capa; struct { u64 peer; u64 vif; u8 max_retry; } tid_config_support; u8 max_data_retry_count; const struct cfg80211_sar_capa *sar_capa; struct rfkill *rfkill; u8 mbssid_max_interfaces; u8 ema_max_profile_periodicity; u16 max_num_akm_suites; u16 hw_timestamp_max_peers; u64 android_kabi_reserved1; long: 64; char priv[0]; }; struct mac_address { u8 addr[6]; }; struct ieee80211_txrx_stypes { u16 tx; u16 rx; }; struct ieee80211_iface_limit; struct ieee80211_iface_combination { const struct ieee80211_iface_limit *limits; u32 num_different_channels; u16 max_interfaces; u8 n_limits; bool beacon_int_infra_match; u8 radar_detect_widths; u8 radar_detect_regions; u32 beacon_int_min_gcd; }; struct ieee80211_iface_limit { u16 max; u16 types; }; struct wiphy_iftype_akm_suites { u16 iftypes_mask; const u32 *akm_suites; int n_akm_suites; }; struct wiphy_wowlan_tcp_support; struct wiphy_wowlan_support { u32 flags; int n_patterns; int pattern_max_len; int pattern_min_len; int max_pkt_offset; int max_nd_match_sets; const struct wiphy_wowlan_tcp_support *tcp; }; struct nl80211_wowlan_tcp_data_token_feature; struct wiphy_wowlan_tcp_support { const struct nl80211_wowlan_tcp_data_token_feature *tok; u32 data_payload_max; u32 data_interval_max; u32 wake_payload_max; bool seq; }; struct nl80211_wowlan_tcp_data_token_feature { __u32 min_len; __u32 max_len; __u32 bufsize; }; struct cfg80211_pkt_pattern; struct cfg80211_wowlan_tcp; struct cfg80211_sched_scan_request; struct cfg80211_wowlan { bool any; bool disconnect; bool magic_pkt; bool gtk_rekey_failure; bool eap_identity_req; bool four_way_handshake; bool rfkill_release; struct cfg80211_pkt_pattern *patterns; struct cfg80211_wowlan_tcp *tcp; int n_patterns; struct cfg80211_sched_scan_request *nd_config; }; struct cfg80211_pkt_pattern { const u8 *mask; const u8 *pattern; int pattern_len; int pkt_offset; }; struct nl80211_wowlan_tcp_data_seq { __u32 start; __u32 offset; __u32 len; }; struct nl80211_wowlan_tcp_data_token { __u32 offset; __u32 len; __u8 token_stream[0]; }; struct cfg80211_wowlan_tcp { struct socket *sock; __be32 src; __be32 dst; u16 src_port; u16 dst_port; u8 dst_mac[6]; int payload_len; const u8 *payload; struct nl80211_wowlan_tcp_data_seq payload_seq; u32 data_interval; u32 wake_len; const u8 *wake_data; const u8 *wake_mask; u32 tokens_size; struct nl80211_wowlan_tcp_data_token payload_tok; }; enum nl80211_band { NL80211_BAND_2GHZ = 0, NL80211_BAND_5GHZ = 1, NL80211_BAND_60GHZ = 2, NL80211_BAND_6GHZ = 3, NL80211_BAND_S1GHZ = 4, NL80211_BAND_LC = 5, NUM_NL80211_BANDS = 6, }; struct cfg80211_bss_select_adjust { enum nl80211_band band; s8 delta; }; struct cfg80211_ssid; struct cfg80211_match_set; struct cfg80211_sched_scan_plan; struct cfg80211_sched_scan_request { u64 reqid; struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; const u8 *ie; size_t ie_len; u32 flags; struct cfg80211_match_set *match_sets; int n_match_sets; s32 min_rssi_thold; u32 delay; struct cfg80211_sched_scan_plan *scan_plans; int n_scan_plans; u8 mac_addr[6]; u8 mac_addr_mask[6]; bool relative_rssi_set; s8 relative_rssi; struct cfg80211_bss_select_adjust rssi_adjust; struct wiphy *wiphy; struct net_device *dev; unsigned long scan_start; bool report_results; struct callback_head callback_head; u32 owner_nlportid; bool nl_owner_dead; struct list_head list; u64 android_kabi_reserved1; struct ieee80211_channel *channels[0]; }; struct cfg80211_ssid { u8 ssid[32]; u8 ssid_len; }; struct cfg80211_match_set { struct cfg80211_ssid ssid; u8 bssid[6]; s32 rssi_thold; s32 per_band_rssi_thold[6]; }; struct cfg80211_sched_scan_plan { u32 interval; u32 iterations; }; enum nl80211_dfs_state { NL80211_DFS_USABLE = 0, NL80211_DFS_UNAVAILABLE = 1, NL80211_DFS_AVAILABLE = 2, }; struct ieee80211_channel { enum nl80211_band band; u32 center_freq; u16 freq_offset; u16 hw_value; u32 flags; int max_antenna_gain; int max_power; int max_reg_power; bool beacon_found; u32 orig_flags; int orig_mag; int orig_mpwr; enum nl80211_dfs_state dfs_state; unsigned long dfs_state_entered; unsigned int dfs_cac_ms; s8 psd; }; struct wiphy_iftype_ext_capab { enum nl80211_iftype iftype; const u8 *extended_capabilities; const u8 *extended_capabilities_mask; u8 extended_capabilities_len; u16 eml_capabilities; u16 mld_capa_and_ops; }; struct ieee80211_mcs_info { u8 rx_mask[10]; __le16 rx_highest; u8 tx_params; u8 reserved[3]; }; struct ieee80211_sta_ht_cap { u16 cap; bool ht_supported; u8 ampdu_factor; u8 ampdu_density; struct ieee80211_mcs_info mcs; short: 0; } __attribute__((packed)); struct ieee80211_vht_mcs_info { __le16 rx_mcs_map; __le16 rx_highest; __le16 tx_mcs_map; __le16 tx_highest; }; struct ieee80211_sta_vht_cap { bool vht_supported; u32 cap; struct ieee80211_vht_mcs_info vht_mcs; }; struct ieee80211_sta_s1g_cap { bool s1g; u8 cap[10]; u8 nss_mcs[5]; }; struct ieee80211_rate; struct ieee80211_sband_iftype_data; struct ieee80211_supported_band { struct ieee80211_channel *channels; struct ieee80211_rate *bitrates; enum nl80211_band band; int n_channels; int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct ieee80211_sta_s1g_cap s1g_cap; struct ieee80211_edmg edmg_cap; u16 n_iftype_data; const struct ieee80211_sband_iftype_data *iftype_data; }; struct ieee80211_rate { u32 flags; u16 bitrate; u16 hw_value; u16 hw_value_short; }; struct ieee80211_he_cap_elem { u8 mac_cap_info[6]; u8 phy_cap_info[11]; }; struct ieee80211_he_mcs_nss_supp { __le16 rx_mcs_80; __le16 tx_mcs_80; __le16 rx_mcs_160; __le16 tx_mcs_160; __le16 rx_mcs_80p80; __le16 tx_mcs_80p80; }; struct ieee80211_sta_he_cap { bool has_he; struct ieee80211_he_cap_elem he_cap_elem; struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; u8 ppe_thres[25]; } __attribute__((packed)); struct ieee80211_he_6ghz_capa { __le16 capa; }; struct ieee80211_eht_cap_elem_fixed { u8 mac_cap_info[2]; u8 phy_cap_info[9]; }; struct ieee80211_eht_mcs_nss_supp_20mhz_only { union { struct { u8 rx_tx_mcs7_max_nss; u8 rx_tx_mcs9_max_nss; u8 rx_tx_mcs11_max_nss; u8 rx_tx_mcs13_max_nss; }; u8 rx_tx_max_nss[4]; }; }; struct ieee80211_eht_mcs_nss_supp_bw { union { struct { u8 rx_tx_mcs9_max_nss; u8 rx_tx_mcs11_max_nss; u8 rx_tx_mcs13_max_nss; }; u8 rx_tx_max_nss[3]; }; }; struct ieee80211_eht_mcs_nss_supp { union { struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz; struct { struct ieee80211_eht_mcs_nss_supp_bw _80; struct ieee80211_eht_mcs_nss_supp_bw _160; struct ieee80211_eht_mcs_nss_supp_bw _320; } bw; }; }; struct ieee80211_sta_eht_cap { bool has_eht; struct ieee80211_eht_cap_elem_fixed eht_cap_elem; struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp; u8 eht_ppe_thres[32]; }; struct ieee80211_sband_iftype_data { u16 types_mask; struct ieee80211_sta_he_cap he_cap; struct ieee80211_he_6ghz_capa he_6ghz_capa; struct ieee80211_sta_eht_cap eht_cap; struct { const u8 *data; unsigned int len; } vendor_elems; } __attribute__((packed)); enum nl80211_reg_initiator { NL80211_REGDOM_SET_BY_CORE = 0, NL80211_REGDOM_SET_BY_USER = 1, NL80211_REGDOM_SET_BY_DRIVER = 2, NL80211_REGDOM_SET_BY_COUNTRY_IE = 3, }; enum nl80211_user_reg_hint_type { NL80211_USER_REG_HINT_USER = 0, NL80211_USER_REG_HINT_CELL_BASE = 1, NL80211_USER_REG_HINT_INDOOR = 2, }; enum nl80211_dfs_regions { NL80211_DFS_UNSET = 0, NL80211_DFS_FCC = 1, NL80211_DFS_ETSI = 2, NL80211_DFS_JP = 3, }; enum environment_cap { ENVIRON_ANY = 0, ENVIRON_INDOOR = 1, ENVIRON_OUTDOOR = 2, }; struct regulatory_request { struct callback_head callback_head; int wiphy_idx; enum nl80211_reg_initiator initiator; enum nl80211_user_reg_hint_type user_reg_hint_type; char alpha2[3]; enum nl80211_dfs_regions dfs_region; bool intersect; bool processed; enum environment_cap country_ie_env; struct list_head list; }; struct ieee80211_freq_range { u32 start_freq_khz; u32 end_freq_khz; u32 max_bandwidth_khz; }; struct ieee80211_power_rule { u32 max_antenna_gain; u32 max_eirp; }; struct ieee80211_wmm_ac { u16 cw_min; u16 cw_max; u16 cot; u8 aifsn; }; struct ieee80211_wmm_rule { struct ieee80211_wmm_ac client[4]; struct ieee80211_wmm_ac ap[4]; }; struct ieee80211_reg_rule { struct ieee80211_freq_range freq_range; struct ieee80211_power_rule power_rule; struct ieee80211_wmm_rule wmm_rule; u32 flags; u32 dfs_cac_ms; bool has_wmm; s8 psd; }; struct ieee80211_regdomain { struct callback_head callback_head; u32 n_reg_rules; char alpha2[3]; enum nl80211_dfs_regions dfs_region; struct ieee80211_reg_rule reg_rules[0]; }; struct ieee80211_ht_cap { __le16 cap_info; u8 ampdu_params_info; struct ieee80211_mcs_info mcs; __le16 extended_ht_cap_info; __le32 tx_BF_cap_info; u8 antenna_selection_info; } __attribute__((packed)); struct ieee80211_vht_cap { __le32 vht_cap_info; struct ieee80211_vht_mcs_info supp_mcs; }; struct wiphy_coalesce_support { int n_rules; int max_delay; int n_patterns; int pattern_max_len; int pattern_min_len; int max_pkt_offset; }; struct nl80211_vendor_cmd_info { __u32 vendor_id; __u32 subcmd; }; struct wiphy_vendor_command { struct nl80211_vendor_cmd_info info; u32 flags; int (*doit)(struct wiphy *, struct wireless_dev *, const void *, int); int (*dumpit)(struct wiphy *, struct wireless_dev *, struct sk_buff *, const void *, int, unsigned long *); const struct nla_policy *policy; unsigned int maxattr; u64 android_kabi_reserved1; }; struct cfg80211_pmsr_capabilities { unsigned int max_peers; u8 report_ap_tsf: 1; u8 randomize_mac_addr: 1; struct { u32 preambles; u32 bandwidths; s8 max_bursts_exponent; u8 max_ftms_per_burst; u8 supported: 1; u8 asap: 1; u8 non_asap: 1; u8 request_lci: 1; u8 request_civicloc: 1; u8 trigger_based: 1; u8 non_trigger_based: 1; } ftm; }; enum nl80211_sar_type { NL80211_SAR_TYPE_POWER = 0, NUM_NL80211_SAR_TYPE = 1, }; struct cfg80211_sar_freq_ranges; struct cfg80211_sar_capa { enum nl80211_sar_type type; u32 num_freq_ranges; const struct cfg80211_sar_freq_ranges *freq_ranges; }; struct cfg80211_sar_freq_ranges { u32 start_freq; u32 end_freq; }; struct iw_ioctl_description { __u8 header_type; __u8 token_type; __u16 token_size; __u16 min_tokens; __u16 max_tokens; __u32 flags; }; struct compat_iw_point { compat_caddr_t pointer; __u16 length; __u16 flags; }; struct iwreq { union { char ifrn_name[16]; } ifr_ifrn; union iwreq_data u; }; typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, unsigned int, struct iw_request_info *, iw_handler); struct __compat_iw_event { __u16 len; __u16 cmd; union { compat_caddr_t pointer; struct { struct {} __empty_ptr_bytes; __u8 ptr_bytes[0]; }; }; }; struct iw_event { __u16 len; __u16 cmd; union iwreq_data u; }; struct iw_encode_ext { __u32 ext_flags; __u8 tx_seq[8]; __u8 rx_seq[8]; struct sockaddr addr; __u16 alg; __u16 key_len; __u8 key[0]; }; struct iw_thrspy { struct sockaddr addr; struct iw_quality qual; struct iw_quality low; struct iw_quality high; }; struct vsock_tap { struct net_device *dev; struct module *module; struct list_head list; }; struct vsock_diag_msg { __u8 vdiag_family; __u8 vdiag_type; __u8 vdiag_state; __u8 vdiag_shutdown; __u32 vdiag_src_cid; __u32 vdiag_src_port; __u32 vdiag_dst_cid; __u32 vdiag_dst_port; __u32 vdiag_ino; __u32 vdiag_cookie[2]; }; struct vsock_diag_req { __u8 sdiag_family; __u8 sdiag_protocol; __u16 pad; __u32 vdiag_states; __u32 vdiag_ino; __u32 vdiag_show; __u32 vdiag_cookie[2]; }; typedef void (*btf_trace_virtio_transport_alloc_pkt)(void *, __u32, __u32, __u32, __u32, __u32, __u16, __u16, __u32); typedef void (*btf_trace_virtio_transport_recv_pkt)(void *, __u32, __u32, __u32, __u32, __u32, __u16, __u16, __u32, __u32, __u32); enum virtio_vsock_op { VIRTIO_VSOCK_OP_INVALID = 0, VIRTIO_VSOCK_OP_REQUEST = 1, VIRTIO_VSOCK_OP_RESPONSE = 2, VIRTIO_VSOCK_OP_RST = 3, VIRTIO_VSOCK_OP_SHUTDOWN = 4, VIRTIO_VSOCK_OP_RW = 5, VIRTIO_VSOCK_OP_CREDIT_UPDATE = 6, VIRTIO_VSOCK_OP_CREDIT_REQUEST = 7, }; enum virtio_vsock_shutdown { VIRTIO_VSOCK_SHUTDOWN_RCV = 1, VIRTIO_VSOCK_SHUTDOWN_SEND = 2, }; enum af_vsockmon_transport { AF_VSOCK_TRANSPORT_UNKNOWN = 0, AF_VSOCK_TRANSPORT_NO_INFO = 1, AF_VSOCK_TRANSPORT_VIRTIO = 2, }; enum af_vsockmon_op { AF_VSOCK_OP_UNKNOWN = 0, AF_VSOCK_OP_CONNECT = 1, AF_VSOCK_OP_DISCONNECT = 2, AF_VSOCK_OP_CONTROL = 3, AF_VSOCK_OP_PAYLOAD = 4, }; enum virtio_vsock_type { VIRTIO_VSOCK_TYPE_STREAM = 1, VIRTIO_VSOCK_TYPE_SEQPACKET = 2, }; struct trace_event_raw_virtio_transport_alloc_pkt { struct trace_entry ent; __u32 src_cid; __u32 src_port; __u32 dst_cid; __u32 dst_port; __u32 len; __u16 type; __u16 op; __u32 flags; char __data[0]; }; struct trace_event_raw_virtio_transport_recv_pkt { struct trace_entry ent; __u32 src_cid; __u32 src_port; __u32 dst_cid; __u32 dst_port; __u32 len; __u16 type; __u16 op; __u32 flags; __u32 buf_alloc; __u32 fwd_cnt; char __data[0]; }; struct virtio_vsock_sock { struct vsock_sock *vsk; spinlock_t tx_lock; spinlock_t rx_lock; u32 tx_cnt; u32 peer_fwd_cnt; u32 peer_buf_alloc; u32 fwd_cnt; u32 last_fwd_cnt; u32 rx_bytes; u32 buf_alloc; struct sk_buff_head rx_queue; u32 msg_count; }; struct virtio_vsock_pkt_info { u32 remote_cid; u32 remote_port; struct vsock_sock *vsk; struct msghdr *msg; u32 pkt_len; u16 type; u16 op; u32 flags; bool reply; }; struct trace_event_data_offsets_virtio_transport_alloc_pkt {}; struct trace_event_data_offsets_virtio_transport_recv_pkt {}; struct af_vsockmon_hdr { __le64 src_cid; __le64 dst_cid; __le32 src_port; __le32 dst_port; __le16 op; __le16 transport; __le16 len; __u8 reserved[2]; }; struct vsock_loopback { struct workqueue_struct *workqueue; struct sk_buff_head pkt_queue; struct work_struct pkt_work; }; struct xdp_ring; struct xsk_queue { u32 ring_mask; u32 nentries; u32 cached_prod; u32 cached_cons; struct xdp_ring *ring; u64 invalid_descs; u64 queue_empty_descs; size_t ring_vmalloc_size; }; struct xdp_ring { u32 producer; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u32 pad1; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u32 consumer; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u32 pad2; u32 flags; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u32 pad3; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct xdp_rxtx_ring { struct xdp_ring ptrs; struct xdp_desc desc[0]; }; struct xdp_umem_ring { struct xdp_ring ptrs; u64 desc[0]; }; struct xsk_map; struct xsk_map_node { struct list_head node; struct xsk_map *map; struct xdp_sock __attribute__((btf_type_tag("rcu"))) **map_entry; }; struct xsk_map { struct bpf_map map; spinlock_t lock; atomic_t count; struct xdp_sock __attribute__((btf_type_tag("rcu"))) *xsk_map[0]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct sockaddr_xdp { __u16 sxdp_family; __u16 sxdp_flags; __u32 sxdp_ifindex; __u32 sxdp_queue_id; __u32 sxdp_shared_umem_fd; }; struct xdp_ring_offset_v1 { __u64 producer; __u64 consumer; __u64 desc; }; struct parsed_desc { u32 mb; u32 valid; }; struct xdp_umem_reg { __u64 addr; __u64 len; __u32 chunk_size; __u32 headroom; __u32 flags; }; struct xdp_ring_offset { __u64 producer; __u64 consumer; __u64 desc; __u64 flags; }; struct xdp_mmap_offsets { struct xdp_ring_offset rx; struct xdp_ring_offset tx; struct xdp_ring_offset fr; struct xdp_ring_offset cr; }; struct xdp_options { __u32 flags; }; struct xdp_mmap_offsets_v1 { struct xdp_ring_offset_v1 rx; struct xdp_ring_offset_v1 tx; struct xdp_ring_offset_v1 fr; struct xdp_ring_offset_v1 cr; }; struct xdp_statistics { __u64 rx_dropped; __u64 rx_invalid_descs; __u64 tx_invalid_descs; __u64 rx_ring_full; __u64 rx_fill_ring_empty_descs; __u64 tx_ring_empty_descs; }; struct xsk_dma_map { dma_addr_t *dma_pages; struct device *dev; struct net_device *netdev; refcount_t users; struct list_head list; u32 dma_pages_cnt; bool dma_need_sync; }; typedef unsigned long cycles_t; typedef struct elf32_hdr Elf32_Ehdr; typedef struct elf32_phdr Elf32_Phdr; typedef struct elf64_phdr Elf64_Phdr; typedef struct elf32_note Elf32_Nhdr; struct compress_format { unsigned char magic[2]; const char *name; decompress_fn decompressor; }; enum cpio_fields { C_MAGIC = 0, C_INO = 1, C_MODE = 2, C_UID = 3, C_GID = 4, C_NLINK = 5, C_MTIME = 6, C_FILESIZE = 7, C_MAJ = 8, C_MIN = 9, C_RMAJ = 10, C_RMIN = 11, C_NAMESIZE = 12, C_CHKSUM = 13, C_NFIELDS = 14, }; struct cpio_data { void *data; size_t size; char name[18]; }; struct fdt_errtabent { const char *str; }; struct fprop_local_single { unsigned long events; unsigned int period; raw_spinlock_t lock; }; struct ida_bitmap { unsigned long bitmap[16]; }; struct klist_waiter { struct list_head list; struct klist_node *node; struct task_struct *process; int woken; }; struct uevent_sock { struct list_head list; struct sock *sk; }; typedef void (*btf_trace_ma_op)(void *, const char *, struct ma_state *); typedef void (*btf_trace_ma_read)(void *, const char *, struct ma_state *); typedef void (*btf_trace_ma_write)(void *, const char *, struct ma_state *, unsigned long, void *); enum maple_type { maple_dense = 0, maple_leaf_64 = 1, maple_range_64 = 2, maple_arange_64 = 3, }; struct maple_pnode; struct maple_metadata { unsigned char end; unsigned char gap; }; struct maple_range_64 { struct maple_pnode *parent; unsigned long pivot[15]; union { void __attribute__((btf_type_tag("rcu"))) *slot[16]; struct { void __attribute__((btf_type_tag("rcu"))) *pad[15]; struct maple_metadata meta; }; }; }; struct maple_arange_64 { struct maple_pnode *parent; unsigned long pivot[9]; void __attribute__((btf_type_tag("rcu"))) *slot[10]; unsigned long gap[10]; struct maple_metadata meta; }; struct maple_node { union { struct { struct maple_pnode *parent; void __attribute__((btf_type_tag("rcu"))) *slot[31]; }; struct { void *pad; struct callback_head rcu; struct maple_enode *piv_parent; unsigned char parent_slot; enum maple_type type; unsigned char slot_len; unsigned int ma_flags; }; struct maple_range_64 mr64; struct maple_arange_64 ma64; struct maple_alloc alloc; }; }; struct trace_event_raw_ma_op { struct trace_entry ent; const char *fn; unsigned long min; unsigned long max; unsigned long index; unsigned long last; void *node; char __data[0]; }; struct trace_event_raw_ma_read { struct trace_entry ent; const char *fn; unsigned long min; unsigned long max; unsigned long index; unsigned long last; void *node; char __data[0]; }; struct trace_event_raw_ma_write { struct trace_entry ent; const char *fn; unsigned long min; unsigned long max; unsigned long index; unsigned long last; unsigned long piv; void *val; void *node; char __data[0]; }; struct maple_topiary { struct maple_pnode *parent; struct maple_enode *next; }; struct ma_wr_state { struct ma_state *mas; struct maple_node *node; unsigned long r_min; unsigned long r_max; enum maple_type type; unsigned char offset_end; unsigned char node_end; unsigned long *pivots; unsigned long end_piv; void __attribute__((btf_type_tag("rcu"))) **slots; void *entry; void *content; }; struct maple_big_node { struct maple_pnode *parent; unsigned long pivot[33]; union { struct maple_enode *slot[34]; struct { unsigned long padding[21]; unsigned long gap[21]; }; }; unsigned char b_end; enum maple_type type; }; struct ma_topiary; struct maple_subtree_state { struct ma_state *orig_l; struct ma_state *orig_r; struct ma_state *l; struct ma_state *m; struct ma_state *r; struct ma_topiary *free; struct ma_topiary *destroy; struct maple_big_node *bn; }; struct ma_topiary { struct maple_enode *head; struct maple_enode *tail; struct maple_tree *mtree; }; struct trace_event_data_offsets_ma_op {}; struct trace_event_data_offsets_ma_read {}; struct trace_event_data_offsets_ma_write {}; struct radix_tree_preload { local_lock_t lock; unsigned int nr; struct xa_node *nodes; }; struct printf_spec { unsigned int type: 8; int field_width: 24; unsigned int flags: 8; unsigned int base: 8; int precision: 16; }; struct page_flags_fields { int width; int shift; int mask; const struct printf_spec *spec; const char *name; }; enum format_type { FORMAT_TYPE_NONE = 0, FORMAT_TYPE_WIDTH = 1, FORMAT_TYPE_PRECISION = 2, FORMAT_TYPE_CHAR = 3, FORMAT_TYPE_STR = 4, FORMAT_TYPE_PTR = 5, FORMAT_TYPE_PERCENT_CHAR = 6, FORMAT_TYPE_INVALID = 7, FORMAT_TYPE_LONG_LONG = 8, FORMAT_TYPE_ULONG = 9, FORMAT_TYPE_LONG = 10, FORMAT_TYPE_UBYTE = 11, FORMAT_TYPE_BYTE = 12, FORMAT_TYPE_USHORT = 13, FORMAT_TYPE_SHORT = 14, FORMAT_TYPE_UINT = 15, FORMAT_TYPE_INT = 16, FORMAT_TYPE_SIZE_T = 17, FORMAT_TYPE_PTRDIFF = 18, }; struct efi_generic_dev_path { u8 type; u8 sub_type; u16 length; }; typedef union { struct { u32 revision; efi_handle_t parent_handle; efi_system_table_t *system_table; efi_handle_t device_handle; void *file_path; void *reserved; u32 load_options_size; void *load_options; void *image_base; __u64 image_size; unsigned int image_code_type; unsigned int image_data_type; efi_status_t (*unload)(efi_handle_t); }; struct { u32 revision; u32 parent_handle; u32 system_table; u32 device_handle; u32 file_path; u32 reserved; u32 load_options_size; u32 load_options; u32 image_base; __u64 image_size; u32 image_code_type; u32 image_data_type; u32 unload; } mixed_mode; } efi_loaded_image_t; struct efi_vendor_dev_path { struct efi_generic_dev_path header; efi_guid_t vendorguid; u8 vendordata[0]; }; enum efistub_event { EFISTUB_EVT_INITRD = 0, EFISTUB_EVT_LOAD_OPTIONS = 1, EFISTUB_EVT_COUNT = 2, }; struct efi_tcg2_event { u32 event_size; struct { u32 header_size; u16 header_version; u32 pcr_index; u32 event_type; } __attribute__((packed)) event_header; } __attribute__((packed)); typedef struct efi_tcg2_event efi_tcg2_event_t; struct efi_tcg2_tagged_event { u32 tagged_event_id; u32 tagged_event_data_size; }; typedef struct efi_tcg2_tagged_event efi_tcg2_tagged_event_t; struct efi_measured_event { efi_tcg2_event_t event_data; efi_tcg2_tagged_event_t tagged_event; u8 tagged_event_data[0]; } __attribute__((packed)); struct efi_boot_memmap { unsigned long map_size; unsigned long desc_size; u32 desc_ver; unsigned long map_key; unsigned long buff_size; efi_memory_desc_t map[0]; }; union efi_load_file_protocol; typedef union efi_load_file_protocol efi_load_file_protocol_t; union efi_load_file_protocol { struct { efi_status_t (*load_file)(efi_load_file_protocol_t *, efi_device_path_protocol_t *, bool, unsigned long *, void *); }; struct { u32 load_file; } mixed_mode; }; typedef union efi_load_file_protocol efi_load_file2_protocol_t; typedef struct { u32 attributes; u16 file_path_list_length; const efi_char16_t *description; const efi_device_path_protocol_t *file_path_list; u32 optional_data_size; const void *optional_data; } efi_load_option_unpacked_t; typedef struct { u32 attributes; u16 file_path_list_length; u8 variable_data[0]; } __attribute__((packed)) efi_load_option_t; typedef u32 efi_tcg2_event_log_format; union efi_tcg2_protocol; typedef union efi_tcg2_protocol efi_tcg2_protocol_t; union efi_tcg2_protocol { struct { void *get_capability; efi_status_t (*get_event_log)(efi_tcg2_protocol_t *, efi_tcg2_event_log_format, efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *); efi_status_t (*hash_log_extend_event)(efi_tcg2_protocol_t *, u64, efi_physical_addr_t, u64, const efi_tcg2_event_t *); void *submit_command; void *get_active_pcr_banks; void *set_active_pcr_banks; void *get_result_of_set_active_pcr_banks; }; struct { u32 get_capability; u32 get_event_log; u32 hash_log_extend_event; u32 submit_command; u32 get_active_pcr_banks; u32 set_active_pcr_banks; u32 get_result_of_set_active_pcr_banks; } mixed_mode; }; typedef efi_status_t (*efi_exit_boot_map_processing)(struct efi_boot_memmap *, void *); union efi_memory_attribute_protocol; typedef union efi_memory_attribute_protocol efi_memory_attribute_protocol_t; union efi_memory_attribute_protocol { struct { efi_status_t (*get_memory_attributes)(efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64 *); efi_status_t (*set_memory_attributes)(efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64); efi_status_t (*clear_memory_attributes)(efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64); }; struct { u32 get_memory_attributes; u32 set_memory_attributes; u32 clear_memory_attributes; } mixed_mode; }; struct exit_boot_struct { struct efi_boot_memmap *boot_memmap; efi_memory_desc_t *runtime_map; int runtime_entry_count; void *new_fdt_addr; }; union efi_device_path_from_text_protocol { struct { efi_device_path_protocol_t * (*convert_text_to_device_node)(const efi_char16_t *); efi_device_path_protocol_t * (*convert_text_to_device_path)(const efi_char16_t *); }; struct { u32 convert_text_to_device_node; u32 convert_text_to_device_path; } mixed_mode; }; typedef union efi_device_path_from_text_protocol efi_device_path_from_text_protocol_t; struct efi_file_path_dev_path { struct efi_generic_dev_path header; efi_char16_t filename[0]; }; union efi_file_protocol; typedef union efi_file_protocol efi_file_protocol_t; union efi_file_protocol { struct { u64 revision; efi_status_t (*open)(efi_file_protocol_t *, efi_file_protocol_t **, efi_char16_t *, u64, u64); efi_status_t (*close)(efi_file_protocol_t *); efi_status_t (*delete)(efi_file_protocol_t *); efi_status_t (*read)(efi_file_protocol_t *, unsigned long *, void *); efi_status_t (*write)(efi_file_protocol_t *, unsigned long, void *); efi_status_t (*get_position)(efi_file_protocol_t *, u64 *); efi_status_t (*set_position)(efi_file_protocol_t *, u64); efi_status_t (*get_info)(efi_file_protocol_t *, efi_guid_t *, unsigned long *, void *); efi_status_t (*set_info)(efi_file_protocol_t *, efi_guid_t *, unsigned long, void *); efi_status_t (*flush)(efi_file_protocol_t *); }; struct { u64 revision; u32 open; u32 close; u32 delete; u32 read; u32 write; u32 get_position; u32 set_position; u32 get_info; u32 set_info; u32 flush; } mixed_mode; }; typedef struct { u64 size; u64 file_size; u64 phys_size; efi_time_t create_time; efi_time_t last_access_time; efi_time_t modification_time; __u64 attribute; efi_char16_t filename[0]; } efi_file_info_t; struct finfo { efi_file_info_t info; efi_char16_t filename[256]; }; union efi_simple_file_system_protocol; typedef union efi_simple_file_system_protocol efi_simple_file_system_protocol_t; union efi_simple_file_system_protocol { struct { u64 revision; efi_status_t (*open_volume)(efi_simple_file_system_protocol_t *, efi_file_protocol_t **); }; struct { u64 revision; u32 open_volume; } mixed_mode; }; enum efi_cmdline_option { EFI_CMDLINE_NONE = 0, EFI_CMDLINE_MODE_NUM = 1, EFI_CMDLINE_RES = 2, EFI_CMDLINE_AUTO = 3, EFI_CMDLINE_LIST = 4, }; typedef struct { u32 red_mask; u32 green_mask; u32 blue_mask; u32 reserved_mask; } efi_pixel_bitmask_t; typedef struct { u32 version; u32 horizontal_resolution; u32 vertical_resolution; int pixel_format; efi_pixel_bitmask_t pixel_information; u32 pixels_per_scan_line; } efi_graphics_output_mode_info_t; union efi_graphics_output_protocol; typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t; union efi_graphics_output_protocol_mode; typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t; union efi_graphics_output_protocol { struct { efi_status_t (*query_mode)(efi_graphics_output_protocol_t *, u32, unsigned long *, efi_graphics_output_mode_info_t **); efi_status_t (*set_mode)(efi_graphics_output_protocol_t *, u32); void *blt; efi_graphics_output_protocol_mode_t *mode; }; struct { u32 query_mode; u32 set_mode; u32 blt; u32 mode; } mixed_mode; }; union efi_graphics_output_protocol_mode { struct { u32 max_mode; u32 mode; efi_graphics_output_mode_info_t *info; unsigned long size_of_info; efi_physical_addr_t frame_buffer_base; unsigned long frame_buffer_size; }; struct { u32 max_mode; u32 mode; u32 info; u32 size_of_info; u64 frame_buffer_base; u32 frame_buffer_size; } mixed_mode; }; typedef struct { void *read; void *write; } efi_pci_io_protocol_access_t; typedef enum { EfiPciIoWidthUint8 = 0, EfiPciIoWidthUint16 = 1, EfiPciIoWidthUint32 = 2, EfiPciIoWidthUint64 = 3, EfiPciIoWidthFifoUint8 = 4, EfiPciIoWidthFifoUint16 = 5, EfiPciIoWidthFifoUint32 = 6, EfiPciIoWidthFifoUint64 = 7, EfiPciIoWidthFillUint8 = 8, EfiPciIoWidthFillUint16 = 9, EfiPciIoWidthFillUint32 = 10, EfiPciIoWidthFillUint64 = 11, EfiPciIoWidthMaximum = 12, } EFI_PCI_IO_PROTOCOL_WIDTH; union efi_pci_io_protocol; typedef union efi_pci_io_protocol efi_pci_io_protocol_t; typedef efi_status_t (*efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *, EFI_PCI_IO_PROTOCOL_WIDTH, u32, unsigned long, void *); typedef struct { efi_pci_io_protocol_cfg_t read; efi_pci_io_protocol_cfg_t write; } efi_pci_io_protocol_config_access_t; typedef struct { u32 read; u32 write; } efi_pci_io_protocol_access_32_t; union efi_pci_io_protocol { struct { void *poll_mem; void *poll_io; efi_pci_io_protocol_access_t mem; efi_pci_io_protocol_access_t io; efi_pci_io_protocol_config_access_t pci; void *copy_mem; void *map; void *unmap; void *allocate_buffer; void *free_buffer; void *flush; efi_status_t (*get_location)(efi_pci_io_protocol_t *, unsigned long *, unsigned long *, unsigned long *, unsigned long *); void *attributes; void *get_bar_attributes; void *set_bar_attributes; uint64_t romsize; void *romimage; }; struct { u32 poll_mem; u32 poll_io; efi_pci_io_protocol_access_32_t mem; efi_pci_io_protocol_access_32_t io; efi_pci_io_protocol_access_32_t pci; u32 copy_mem; u32 map; u32 unmap; u32 allocate_buffer; u32 free_buffer; u32 flush; u32 get_location; u32 attributes; u32 get_bar_attributes; u32 set_bar_attributes; u64 romsize; u32 romimage; } mixed_mode; }; union efi_rng_protocol; typedef union efi_rng_protocol efi_rng_protocol_t; union efi_rng_protocol { struct { efi_status_t (*get_info)(efi_rng_protocol_t *, unsigned long *, efi_guid_t *); efi_status_t (*get_rng)(efi_rng_protocol_t *, efi_guid_t *, unsigned long, u8 *); }; struct { u32 get_info; u32 get_rng; } mixed_mode; }; struct tcpa_event { u32 pcr_index; u32 event_type; u8 pcr_value[20]; u32 event_size; u8 event_data[0]; }; #ifndef BPF_NO_PRESERVE_ACCESS_INDEX #pragma clang attribute pop #endif #endif /* __VMLINUX_H__ */