Lines Matching defs:gve_priv

725 struct gve_priv {  struct
726 struct net_device *dev;
727 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
728 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
729 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
730 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
731 dma_addr_t irq_db_indices_bus;
732 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
733 char mgmt_msix_name[IFNAMSIZ + 16];
734 u32 mgmt_msix_idx;
735 __be32 *counter_array; /* array of num_event_counters */
736 dma_addr_t counter_array_bus;
738 u16 num_event_counters;
739 u16 tx_desc_cnt; /* num desc per ring */
740 u16 rx_desc_cnt; /* num desc per ring */
741 u16 max_tx_desc_cnt;
742 u16 max_rx_desc_cnt;
743 u16 min_tx_desc_cnt;
744 u16 min_rx_desc_cnt;
745 bool modify_ring_size_enabled;
746 bool default_min_ring_size;
747 u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
748 u64 max_registered_pages;
749 u64 num_registered_pages; /* num pages registered with NIC */
750 struct bpf_prog *xdp_prog; /* XDP BPF program */
751 u32 rx_copybreak; /* copy packets smaller than this */
752 u16 default_num_queues; /* default num queues to set up */
754 u16 num_xdp_queues;
755 struct gve_queue_config tx_cfg;
756 struct gve_queue_config rx_cfg;
757 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
759 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
760 __be32 __iomem *db_bar2; /* "array" of doorbells */
761 u32 msg_enable; /* level for netif* netdev print macros */
762 struct pci_dev *pdev;
765 u32 tx_timeo_cnt;
768 union gve_adminq_command *adminq;
769 dma_addr_t adminq_bus_addr;
770 struct dma_pool *adminq_pool;
771 struct mutex adminq_lock; /* Protects adminq command execution */
772 u32 adminq_mask; /* masks prod_cnt to adminq size */
773 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
774 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
775 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
777 u32 adminq_describe_device_cnt;
778 u32 adminq_cfg_device_resources_cnt;
779 u32 adminq_register_page_list_cnt;
780 u32 adminq_unregister_page_list_cnt;
781 u32 adminq_create_tx_queue_cnt;
782 u32 adminq_create_rx_queue_cnt;
783 u32 adminq_destroy_tx_queue_cnt;
784 u32 adminq_destroy_rx_queue_cnt;
785 u32 adminq_dcfg_device_resources_cnt;
786 u32 adminq_set_driver_parameter_cnt;
787 u32 adminq_report_stats_cnt;
788 u32 adminq_report_link_speed_cnt;
789 u32 adminq_get_ptype_map_cnt;
790 u32 adminq_verify_driver_compatibility_cnt;
791 u32 adminq_query_flow_rules_cnt;
792 u32 adminq_cfg_flow_rule_cnt;
793 u32 adminq_cfg_rss_cnt;
794 u32 adminq_query_rss_cnt;
797 u32 interface_up_cnt; /* count of times interface turned up since last reset */
798 u32 interface_down_cnt; /* count of times interface turned down since last reset */
799 u32 reset_cnt; /* count of reset */
800 u32 page_alloc_fail; /* count of page alloc fails */
801 u32 dma_mapping_error; /* count of dma mapping errors */
802 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
803 u32 suspend_cnt; /* count of times suspended */
804 u32 resume_cnt; /* count of times resumed */
805 struct workqueue_struct *gve_wq;
806 struct work_struct service_task;
807 struct work_struct stats_report_task;
808 unsigned long service_task_flags;
809 unsigned long state_flags;
811 struct gve_stats_report *stats_report;
812 u64 stats_report_len;
813 dma_addr_t stats_report_bus; /* dma address for the stats report */
814 unsigned long ethtool_flags;
816 unsigned long stats_report_timer_period;
817 struct timer_list stats_report_timer;
820 u64 link_speed;
821 bool up_before_suspend; /* True if dev was up before suspend */
823 struct gve_ptype_lut *ptype_lut_dqo;
826 u16 data_buffer_size_dqo;
827 u16 max_rx_buffer_size; /* device limit */
829 enum gve_queue_format queue_format;
832 u32 tx_coalesce_usecs;
833 u32 rx_coalesce_usecs;
835 u16 header_buf_size; /* device configured, header-split supported if non-zero */
836 bool header_split_enabled; /* True if the header split is enabled by the user */
838 u32 max_flow_rules;
839 u32 num_flow_rules;
841 struct gve_flow_rules_cache flow_rules_cache;
865 static inline bool gve_get_do_reset(struct gve_priv *priv) in gve_get_do_reset() argument