1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Atlantic Network Driver 3 * 4 * Copyright (C) 2014-2019 aQuantia Corporation 5 * Copyright (C) 2019-2020 Marvell International Ltd. 6 */ 7 8 /* File hw_atl_llh.h: Declarations of bitfield and register access functions for 9 * Atlantic registers. 10 */ 11 12 #ifndef HW_ATL_LLH_H 13 #define HW_ATL_LLH_H 14 15 #include <linux/types.h> 16 17 struct aq_hw_s; 18 19 /* set temperature sense reset */ 20 void hw_atl_ts_reset_set(struct aq_hw_s *aq_hw, u32 val); 21 22 /* set temperature sense power down */ 23 void hw_atl_ts_power_down_set(struct aq_hw_s *aq_hw, u32 val); 24 25 /* get temperature sense power down */ 26 u32 hw_atl_ts_power_down_get(struct aq_hw_s *aq_hw); 27 28 /* get temperature sense ready */ 29 u32 hw_atl_ts_ready_get(struct aq_hw_s *aq_hw); 30 31 /* get temperature sense ready latch high */ 32 u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw); 33 34 /* get temperature sense data */ 35 u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw); 36 37 /* SMBUS0 bus busy */ 38 u32 hw_atl_smb0_bus_busy_get(struct aq_hw_s *aq_hw); 39 40 /* SMBUS0 byte transfer complete */ 41 u32 hw_atl_smb0_byte_transfer_complete_get(struct aq_hw_s *aq_hw); 42 43 /* SMBUS0 receive acknowledged */ 44 u32 hw_atl_smb0_receive_acknowledged_get(struct aq_hw_s *aq_hw); 45 46 /* SMBUS0 set transmitted data (only leftmost byte of data valid) */ 47 void hw_atl_smb0_tx_data_set(struct aq_hw_s *aq_hw, u32 data); 48 49 /* SMBUS0 provisioning2 command register */ 50 void hw_atl_smb0_provisioning2_set(struct aq_hw_s *aq_hw, u32 data); 51 52 /* SMBUS0 repeated start detect */ 53 u32 hw_atl_smb0_repeated_start_detect_get(struct aq_hw_s *aq_hw); 54 55 /* SMBUS0 received data register */ 56 u32 hw_atl_smb0_rx_data_get(struct aq_hw_s *aq_hw); 57 58 /* global */ 59 60 /* set global microprocessor semaphore */ 61 void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, 62 u32 semaphore); 63 64 /* get global microprocessor semaphore */ 65 u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore); 66 67 /* set global register reset disable */ 68 void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis); 69 70 /* set soft reset */ 71 void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res); 72 73 /* get soft reset */ 74 u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw); 75 76 /* stats */ 77 78 u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw); 79 80 /* get rx dma good octet counter */ 81 u64 hw_atl_stats_rx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw); 82 83 /* get rx dma good packet counter */ 84 u64 hw_atl_stats_rx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw); 85 86 /* get tx dma good octet counter */ 87 u64 hw_atl_stats_tx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw); 88 89 /* get tx dma good packet counter */ 90 u64 hw_atl_stats_tx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw); 91 92 /* get msm rx errors counter register */ 93 u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw); 94 95 /* get msm rx unicast frames counter register */ 96 u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); 97 98 /* get msm rx multicast frames counter register */ 99 u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); 100 101 /* get msm rx broadcast frames counter register */ 102 u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); 103 104 /* get msm rx broadcast octets counter register 1 */ 105 u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); 106 107 /* get msm rx unicast octets counter register 0 */ 108 u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); 109 110 /* get msm tx errors counter register */ 111 u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw); 112 113 /* get msm tx unicast frames counter register */ 114 u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); 115 116 /* get msm tx multicast frames counter register */ 117 u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); 118 119 /* get msm tx broadcast frames counter register */ 120 u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); 121 122 /* get msm tx multicast octets counter register 1 */ 123 u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw); 124 125 /* get msm tx broadcast octets counter register 1 */ 126 u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); 127 128 /* get msm tx unicast octets counter register 0 */ 129 u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); 130 131 /* get global mif identification */ 132 u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw); 133 134 /* interrupt */ 135 136 /* set interrupt auto mask lsw */ 137 void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, 138 u32 irq_auto_masklsw); 139 140 /* set interrupt mapping enable rx */ 141 void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, 142 u32 rx); 143 144 /* set interrupt mapping enable tx */ 145 void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, 146 u32 tx); 147 148 /* set interrupt mapping rx */ 149 void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx); 150 151 /* set interrupt mapping tx */ 152 void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx); 153 154 /* set interrupt mask clear lsw */ 155 void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, 156 u32 irq_msk_clearlsw); 157 158 /* set interrupt mask set lsw */ 159 void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw); 160 161 /* set interrupt register reset disable */ 162 void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis); 163 164 /* set interrupt status clear lsw */ 165 void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, 166 u32 irq_status_clearlsw); 167 168 /* get interrupt status lsw */ 169 u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw); 170 171 /* get reset interrupt */ 172 u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw); 173 174 /* set reset interrupt */ 175 void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq); 176 177 /* set RSC interrupt */ 178 void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable); 179 180 /* set RSC delay */ 181 void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay); 182 183 /* rdm */ 184 185 /* set cpu id */ 186 void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); 187 188 /* set rx dca enable */ 189 void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en); 190 191 /* set rx dca mode */ 192 void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode); 193 194 /* set rx descriptor data buffer size */ 195 void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, 196 u32 rx_desc_data_buff_size, 197 u32 descriptor); 198 199 /* set rx descriptor dca enable */ 200 void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, 201 u32 dca); 202 203 /* set rx descriptor enable */ 204 void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, 205 u32 descriptor); 206 207 /* set rx descriptor header splitting */ 208 void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, 209 u32 rx_desc_head_splitting, 210 u32 descriptor); 211 212 /* get rx descriptor head pointer */ 213 u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); 214 215 /* set rx descriptor length */ 216 void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, 217 u32 descriptor); 218 219 /* set rx descriptor write-back interrupt enable */ 220 void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, 221 u32 rx_desc_wr_wb_irq_en); 222 223 /* set rx header dca enable */ 224 void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, 225 u32 dca); 226 227 /* set rx payload dca enable */ 228 void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, 229 u32 dca); 230 231 /* set rx descriptor header buffer size */ 232 void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, 233 u32 rx_desc_head_buff_size, 234 u32 descriptor); 235 236 /* set rx descriptor reset */ 237 void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, 238 u32 descriptor); 239 240 /* Set RDM Interrupt Moderation Enable */ 241 void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, 242 u32 rdm_intr_moder_en); 243 244 /* reg */ 245 246 /* set general interrupt mapping register */ 247 void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, 248 u32 regidx); 249 250 /* get general interrupt status register */ 251 u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw); 252 253 /* set interrupt global control register */ 254 void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl); 255 256 /* set interrupt throttle register */ 257 void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle); 258 259 /* set rx dma descriptor base address lsw */ 260 void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, 261 u32 rx_dma_desc_base_addrlsw, 262 u32 descriptor); 263 264 /* set rx dma descriptor base address msw */ 265 void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, 266 u32 rx_dma_desc_base_addrmsw, 267 u32 descriptor); 268 269 /* get rx dma descriptor status register */ 270 u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor); 271 272 /* set rx dma descriptor tail pointer register */ 273 void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, 274 u32 rx_dma_desc_tail_ptr, 275 u32 descriptor); 276 277 /* set rx filter multicast filter mask register */ 278 void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, 279 u32 rx_flr_mcst_flr_msk); 280 281 /* set rx filter multicast filter register */ 282 void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, 283 u32 filter); 284 285 /* set rx filter rss control register 1 */ 286 void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, 287 u32 rx_flr_rss_control1); 288 289 /* Set RX Filter Control Register 2 */ 290 void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2); 291 292 /* Set RX Interrupt Moderation Control Register */ 293 void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, 294 u32 rx_intr_moderation_ctl, 295 u32 queue); 296 297 /* set tx dma debug control */ 298 void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, 299 u32 tx_dma_debug_ctl); 300 301 /* set tx dma descriptor base address lsw */ 302 void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, 303 u32 tx_dma_desc_base_addrlsw, 304 u32 descriptor); 305 306 /* set tx dma descriptor base address msw */ 307 void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, 308 u32 tx_dma_desc_base_addrmsw, 309 u32 descriptor); 310 311 /* set tx dma descriptor tail pointer register */ 312 void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, 313 u32 tx_dma_desc_tail_ptr, 314 u32 descriptor); 315 316 /* Set TX Interrupt Moderation Control Register */ 317 void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, 318 u32 tx_intr_moderation_ctl, 319 u32 queue); 320 321 /* set global microprocessor scratch pad */ 322 void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, 323 u32 glb_cpu_scratch_scp, 324 u32 scratch_scp); 325 326 /* rpb */ 327 328 /* set dma system loopback */ 329 void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk); 330 331 /* set dma network loopback */ 332 void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk); 333 334 /* set rx traffic class mode */ 335 void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, 336 u32 rx_traf_class_mode); 337 338 /* get rx traffic class mode */ 339 u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw); 340 341 /* set rx buffer enable */ 342 void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en); 343 344 /* set rx buffer high threshold (per tc) */ 345 void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, 346 u32 rx_buff_hi_threshold_per_tc, 347 u32 buffer); 348 349 /* set rx buffer low threshold (per tc) */ 350 void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, 351 u32 rx_buff_lo_threshold_per_tc, 352 u32 buffer); 353 354 /* set rx flow control mode */ 355 void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, 356 u32 rx_flow_ctl_mode); 357 358 /* set rx packet buffer size (per tc) */ 359 void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, 360 u32 rx_pkt_buff_size_per_tc, 361 u32 buffer); 362 363 /* toggle rdm rx dma descriptor cache init */ 364 void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw); 365 366 /* get rdm rx dma descriptor cache init done */ 367 u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw); 368 369 /* set rx xoff enable (per tc) */ 370 void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, 371 u32 rx_xoff_en_per_tc, 372 u32 buffer); 373 374 /* rpf */ 375 376 /* set l2 broadcast count threshold */ 377 void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, 378 u32 l2broadcast_count_threshold); 379 380 /* set l2 broadcast enable */ 381 void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en); 382 383 /* set l2 broadcast filter action */ 384 void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, 385 u32 l2broadcast_flr_act); 386 387 /* set l2 multicast filter enable */ 388 void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, 389 u32 l2multicast_flr_en, 390 u32 filter); 391 392 /* get l2 promiscuous mode enable */ 393 u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw); 394 395 /* set l2 promiscuous mode enable */ 396 void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, 397 u32 l2promiscuous_mode_en); 398 399 /* set l2 unicast filter action */ 400 void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, 401 u32 l2unicast_flr_act, 402 u32 filter); 403 404 /* set l2 unicast filter enable */ 405 void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, 406 u32 filter); 407 408 /* set l2 unicast destination address lsw */ 409 void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, 410 u32 l2unicast_dest_addresslsw, 411 u32 filter); 412 413 /* set l2 unicast destination address msw */ 414 void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, 415 u32 l2unicast_dest_addressmsw, 416 u32 filter); 417 418 /* Set L2 Accept all Multicast packets */ 419 void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, 420 u32 l2_accept_all_mc_packets); 421 422 /* set user-priority tc mapping */ 423 void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, 424 u32 user_priority_tc_map, u32 tc); 425 426 /* set rss key address */ 427 void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr); 428 429 /* set rss key write data */ 430 void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data); 431 432 /* get rss key write enable */ 433 u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw); 434 435 /* set rss key write enable */ 436 void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en); 437 438 /* set rss redirection table address */ 439 void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, 440 u32 rss_redir_tbl_addr); 441 442 /* set rss redirection table write data */ 443 void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, 444 u32 rss_redir_tbl_wr_data); 445 446 /* get rss redirection write enable */ 447 u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw); 448 449 /* set rss redirection write enable */ 450 void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en); 451 452 /* set tpo to rpf system loopback */ 453 void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, 454 u32 tpo_to_rpf_sys_lbk); 455 456 /* set vlan inner ethertype */ 457 void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht); 458 459 /* set vlan outer ethertype */ 460 void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht); 461 462 /* set vlan promiscuous mode enable */ 463 void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, 464 u32 vlan_prom_mode_en); 465 466 /* Get VLAN promiscuous mode enable */ 467 u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw); 468 469 /* Set VLAN untagged action */ 470 void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, 471 u32 vlan_untagged_act); 472 473 /* Set VLAN accept untagged packets */ 474 void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, 475 u32 vlan_acc_untagged_packets); 476 477 /* Set VLAN filter enable */ 478 void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, 479 u32 filter); 480 481 /* Set VLAN Filter Action */ 482 void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act, 483 u32 filter); 484 485 /* Set VLAN ID Filter */ 486 void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, 487 u32 filter); 488 489 /* Set VLAN RX queue assignment enable */ 490 void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en, 491 u32 filter); 492 493 /* Set VLAN RX queue */ 494 void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq, 495 u32 filter); 496 497 /* set ethertype filter enable */ 498 void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, 499 u32 filter); 500 501 /* set ethertype user-priority enable */ 502 void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, 503 u32 etht_user_priority_en, 504 u32 filter); 505 506 /* set ethertype rx queue enable */ 507 void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, 508 u32 etht_rx_queue_en, 509 u32 filter); 510 511 /* set ethertype rx queue */ 512 void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, 513 u32 filter); 514 515 /* set ethertype user-priority */ 516 void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, 517 u32 etht_user_priority, 518 u32 filter); 519 520 /* set ethertype management queue */ 521 void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, 522 u32 filter); 523 524 /* set ethertype filter action */ 525 void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, 526 u32 filter); 527 528 /* set ethertype filter */ 529 void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter); 530 531 /* set L4 source port */ 532 void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter); 533 534 /* set L4 destination port */ 535 void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter); 536 537 /* rpo */ 538 539 /* set ipv4 header checksum offload enable */ 540 void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, 541 u32 ipv4header_crc_offload_en); 542 543 /* set rx descriptor vlan stripping */ 544 void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, 545 u32 rx_desc_vlan_stripping, 546 u32 descriptor); 547 548 void hw_atl_rpo_outer_vlan_tag_mode_set(void *context, 549 u32 outervlantagmode); 550 551 u32 hw_atl_rpo_outer_vlan_tag_mode_get(void *context); 552 553 /* set tcp/udp checksum offload enable */ 554 void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, 555 u32 tcp_udp_crc_offload_en); 556 557 /* Set LRO Patch Optimization Enable. */ 558 void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, 559 u32 lro_patch_optimization_en); 560 561 /* Set Large Receive Offload Enable */ 562 void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en); 563 564 /* Set LRO Q Sessions Limit */ 565 void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, 566 u32 lro_qsessions_lim); 567 568 /* Set LRO Total Descriptor Limit */ 569 void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, 570 u32 lro_total_desc_lim); 571 572 /* Set LRO Min Payload of First Packet */ 573 void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, 574 u32 lro_min_pld_of_first_pkt); 575 576 /* Set LRO Packet Limit */ 577 void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim); 578 579 /* Set LRO Max Number of Descriptors */ 580 void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, 581 u32 lro_max_desc_num, u32 lro); 582 583 /* Set LRO Time Base Divider */ 584 void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, 585 u32 lro_time_base_divider); 586 587 /*Set LRO Inactive Interval */ 588 void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, 589 u32 lro_inactive_interval); 590 591 /*Set LRO Max Coalescing Interval */ 592 void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, 593 u32 lro_max_coal_interval); 594 595 /* rx */ 596 597 /* set rx register reset disable */ 598 void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis); 599 600 /* tdm */ 601 602 /* set cpu id */ 603 void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); 604 605 /* set large send offload enable */ 606 void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, 607 u32 large_send_offload_en); 608 609 /* set tx descriptor enable */ 610 void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, 611 u32 descriptor); 612 613 /* set tx dca enable */ 614 void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en); 615 616 /* set tx dca mode */ 617 void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode); 618 619 /* set tx descriptor dca enable */ 620 void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, 621 u32 dca); 622 623 /* get tx descriptor head pointer */ 624 u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); 625 626 /* set tx descriptor length */ 627 void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, 628 u32 descriptor); 629 630 /* set tx descriptor write-back interrupt enable */ 631 void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, 632 u32 tx_desc_wr_wb_irq_en); 633 634 /* set tx descriptor write-back threshold */ 635 void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, 636 u32 tx_desc_wr_wb_threshold, 637 u32 descriptor); 638 639 /* Set TDM Interrupt Moderation Enable */ 640 void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, 641 u32 tdm_irq_moderation_en); 642 /* thm */ 643 644 /* set lso tcp flag of first packet */ 645 void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, 646 u32 lso_tcp_flag_of_first_pkt); 647 648 /* set lso tcp flag of last packet */ 649 void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, 650 u32 lso_tcp_flag_of_last_pkt); 651 652 /* set lso tcp flag of middle packet */ 653 void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, 654 u32 lso_tcp_flag_of_middle_pkt); 655 656 /* tpb */ 657 658 /* set TX Traffic Class Mode */ 659 void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw, 660 u32 tx_traf_class_mode); 661 662 /* get TX Traffic Class Mode */ 663 u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw); 664 665 /* set tx buffer enable */ 666 void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); 667 668 /* set tx buffer high threshold (per tc) */ 669 void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, 670 u32 tx_buff_hi_threshold_per_tc, 671 u32 buffer); 672 673 /* set tx buffer low threshold (per tc) */ 674 void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, 675 u32 tx_buff_lo_threshold_per_tc, 676 u32 buffer); 677 678 /* set tx dma system loopback enable */ 679 void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en); 680 681 /* set tx dma network loopback enable */ 682 void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw, 683 u32 tx_dma_net_lbk_en); 684 685 /* set tx clock gating enable */ 686 void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw, 687 u32 tx_clk_gate_en); 688 689 /* set tx packet buffer size (per tc) */ 690 void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, 691 u32 tx_pkt_buff_size_per_tc, 692 u32 buffer); 693 694 /* set tx path pad insert enable */ 695 void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en); 696 697 /* tpo */ 698 699 /* set ipv4 header checksum offload enable */ 700 void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, 701 u32 ipv4header_crc_offload_en); 702 703 /* set tcp/udp checksum offload enable */ 704 void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, 705 u32 tcp_udp_crc_offload_en); 706 707 /* set tx pkt system loopback enable */ 708 void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, 709 u32 tx_pkt_sys_lbk_en); 710 711 /* tps */ 712 713 /* set tx packet scheduler data arbitration mode */ 714 void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, 715 u32 tx_pkt_shed_data_arb_mode); 716 717 /* set tx packet scheduler descriptor rate current time reset */ 718 void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, 719 u32 curr_time_res); 720 721 /* set tx packet scheduler descriptor rate limit */ 722 void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, 723 u32 tx_pkt_shed_desc_rate_lim); 724 725 /* set tx packet scheduler descriptor tc arbitration mode */ 726 void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, 727 u32 arb_mode); 728 729 /* set tx packet scheduler descriptor tc max credit */ 730 void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, 731 const u32 tc, 732 const u32 max_credit); 733 734 /* set tx packet scheduler descriptor tc weight */ 735 void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, 736 const u32 tc, 737 const u32 weight); 738 739 /* set tx packet scheduler descriptor vm arbitration mode */ 740 void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, 741 u32 arb_mode); 742 743 /* set tx packet scheduler tc data max credit */ 744 void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, 745 const u32 tc, 746 const u32 max_credit); 747 748 /* set tx packet scheduler tc data weight */ 749 void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, 750 const u32 tc, 751 const u32 weight); 752 753 /* set tx descriptor rate mode */ 754 void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw, 755 const u32 rate_mode); 756 757 /* set tx packet scheduler descriptor rate enable */ 758 void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc, 759 const u32 enable); 760 761 /* set tx packet scheduler descriptor rate integral value */ 762 void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc, 763 const u32 rate_int); 764 765 /* set tx packet scheduler descriptor rate fractional value */ 766 void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc, 767 const u32 rate_frac); 768 769 /* tx */ 770 771 /* set tx register reset disable */ 772 void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis); 773 774 /* msm */ 775 776 /* get register access status */ 777 u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw); 778 779 /* set register address for indirect address */ 780 void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, 781 u32 reg_addr_for_indirect_addr); 782 783 /* set register read strobe */ 784 void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe); 785 786 /* get register read data */ 787 u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw); 788 789 /* set register write data */ 790 void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data); 791 792 /* set register write strobe */ 793 void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); 794 795 /* pci */ 796 797 /* set pci register reset disable */ 798 void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); 799 800 /* pcs */ 801 void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw, 802 u32 ptp_clock_read_enable); 803 804 u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index); 805 806 /* set uP Force Interrupt */ 807 void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr); 808 809 /* clear ipv4 filter destination address */ 810 void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location); 811 812 /* clear ipv4 filter source address */ 813 void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location); 814 815 /* clear command for filter l3-l4 */ 816 void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location); 817 818 /* clear ipv6 filter destination address */ 819 void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location); 820 821 /* clear ipv6 filter source address */ 822 void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location); 823 824 /* set ipv4 filter destination address */ 825 void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, 826 u32 ipv4_dest); 827 828 /* set ipv4 filter source address */ 829 void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location, 830 u32 ipv4_src); 831 832 /* set command for filter l3-l4 */ 833 void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd); 834 835 /* set ipv6 filter source address */ 836 void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location, 837 u32 *ipv6_src); 838 839 /* set ipv6 filter destination address */ 840 void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, 841 u32 *ipv6_dest); 842 843 /* set Global MDIO Interface 1 */ 844 void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *hw, u32 value); 845 846 /* get Global MDIO Interface 1 */ 847 u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *hw); 848 849 /* set Global MDIO Interface 2 */ 850 void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *hw, u32 value); 851 852 /* get Global MDIO Interface 2 */ 853 u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *hw); 854 855 /* set Global MDIO Interface 3 */ 856 void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *hw, u32 value); 857 858 /* get Global MDIO Interface 3 */ 859 u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *hw); 860 861 /* set Global MDIO Interface 4 */ 862 void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *hw, u32 value); 863 864 /* get Global MDIO Interface 4 */ 865 u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *hw); 866 867 /* set Global MDIO Interface 5 */ 868 void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *hw, u32 value); 869 870 /* get Global MDIO Interface 5 */ 871 u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *hw); 872 873 u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw); 874 875 /* get global microprocessor ram semaphore */ 876 u32 hw_atl_sem_ram_get(struct aq_hw_s *self); 877 878 /* get global microprocessor mdio semaphore */ 879 u32 hw_atl_sem_mdio_get(struct aq_hw_s *self); 880 881 u32 hw_atl_sem_reset1_get(struct aq_hw_s *self); 882 u32 hw_atl_sem_reset2_get(struct aq_hw_s *self); 883 884 /* get global microprocessor scratch pad register */ 885 u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp); 886 887 /* get global microprocessor scratch pad 12 register */ 888 u32 hw_atl_scrpad12_get(struct aq_hw_s *self); 889 890 /* get global microprocessor scratch pad 25 register */ 891 u32 hw_atl_scrpad25_get(struct aq_hw_s *self); 892 893 #endif /* HW_ATL_LLH_H */ 894