Lines Matching +full:non +full:- +full:urgent
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright(c) 2020-2023 Cornelis Networks, Inc.
4 * Copyright(c) 2015-2020 Intel Corporation.
13 #include <linux/dma-mapping.h>
27 #include <linux/i2c-algo-bit.h>
74 /* Offline Disabled Reason is 4-bits */
98 * per driver stats, either not device nor port-specific, or
108 __u64 sps_txerrs; /* tx-related packet errors */
109 __u64 sps_rcverrs; /* non-crc rcv packet errors */
124 * First-cut criterion for "device is active" is
126 * 5-second interval. SMA packets are 64 dwords,
233 /* Timer for re-enabling ASPM if interrupt activity quiets down */
235 /* per-context configuration flags */
284 /* per-context event flags for fileops/intr communication */
292 /* total number of polled urgent packets */
293 u32 urgent; member
294 /* saved total number of polled urgent packets for poll edge trigger */
298 /* non-zero if ctxt is being shared. */
303 * non-zero if ctxt can be shared, and defines the maximum number of
304 * sub-contexts for this device context.
314 * rcvhdrq_size - return total size in bytes for header queue
322 return PAGE_ALIGN(rcd->rcvhdrq_cnt * in rcvhdrq_size()
323 rcd->rcvhdrqentsize * sizeof(u32)); in rcvhdrq_size()
415 return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK); in hfi1_16B_get_l4()
420 return (u8)((hdr->lrh[1] & OPA_16B_SC_MASK) >> OPA_16B_SC_SHIFT); in hfi1_16B_get_sc()
425 return (u32)((hdr->lrh[1] & OPA_16B_LID_MASK) | in hfi1_16B_get_dlid()
426 (((hdr->lrh[2] & OPA_16B_DLID_MASK) >> in hfi1_16B_get_dlid()
432 return (u32)((hdr->lrh[0] & OPA_16B_LID_MASK) | in hfi1_16B_get_slid()
433 (((hdr->lrh[2] & OPA_16B_SLID_MASK) >> in hfi1_16B_get_slid()
439 return (u8)((hdr->lrh[0] & OPA_16B_BECN_MASK) >> OPA_16B_BECN_SHIFT); in hfi1_16B_get_becn()
444 return (u8)((hdr->lrh[1] & OPA_16B_FECN_MASK) >> OPA_16B_FECN_SHIFT); in hfi1_16B_get_fecn()
449 return (u8)((hdr->lrh[1] & OPA_16B_L2_MASK) >> OPA_16B_L2_SHIFT); in hfi1_16B_get_l2()
454 return (u16)((hdr->lrh[2] & OPA_16B_PKEY_MASK) >> OPA_16B_PKEY_SHIFT); in hfi1_16B_get_pkey()
459 return (u8)((hdr->lrh[1] & OPA_16B_RC_MASK) >> OPA_16B_RC_SHIFT); in hfi1_16B_get_rc()
464 return (u8)((hdr->lrh[3] & OPA_16B_AGE_MASK) >> OPA_16B_AGE_SHIFT); in hfi1_16B_get_age()
469 return (u16)((hdr->lrh[0] & OPA_16B_LEN_MASK) >> OPA_16B_LEN_SHIFT); in hfi1_16B_get_len()
474 return (u16)(hdr->lrh[3] & OPA_16B_ENTROPY_MASK); in hfi1_16B_get_entropy()
485 return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) & in hfi1_16B_bth_get_pad()
495 return be32_to_cpu(mgmt->dest_qpn) & OPA_16B_MGMT_QPN_MASK; in hfi1_16B_get_dest_qpn()
500 return be32_to_cpu(mgmt->src_qpn) & OPA_16B_MGMT_QPN_MASK; in hfi1_16B_get_src_qpn()
506 mgmt->dest_qpn = cpu_to_be32(dest_qp & OPA_16B_MGMT_QPN_MASK); in hfi1_16B_set_qpn()
507 mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK); in hfi1_16B_set_qpn()
511 * hfi1_get_rc_ohdr - get extended header
512 * @opah - the opaheader
522 if (opah->hdr_type == HFI1_PKT_TYPE_9B) { in hfi1_get_rc_ohdr()
523 hdr = &opah->ibh; in hfi1_get_rc_ohdr()
525 ohdr = &hdr->u.oth; in hfi1_get_rc_ohdr()
527 ohdr = &hdr->u.l.oth; in hfi1_get_rc_ohdr()
531 hdr_16b = &opah->opah; in hfi1_get_rc_ohdr()
534 ohdr = &hdr_16b->u.oth; in hfi1_get_rc_ohdr()
536 ohdr = &hdr_16b->u.l.oth; in hfi1_get_rc_ohdr()
544 * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
549 #define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */
550 #define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */
551 #define HFI1_IB_CFG_LWID 3 /* currently active Link-width */
554 #define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
555 #define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
575 * as bits for easy multi-state checking. The actual state can only be
620 #define FM_TBL_SC2VLNT 4 /* Get/set SC->VLnt */
662 #define CNTR_INVALID_VL -1 /* Specifies invalid VL */
675 if (*cntr < (u64)-1LL) in incr_cntr64()
696 /* per-SL CCA information */
699 struct hfi1_pportdata *ppd; /* read-only */
700 int sl; /* read-only */
701 u16 ccti; /* read/write - current value of CCTI */
706 * SMA-facing value. Should be set from .latest when
707 * HLS_UP_* -> HLS_DN_* transition actually occurs.
729 * port-numbers are one-based. The first or only port is port1.
761 * this address is mapped read-only into user processes so they can
816 u32 port; /* IB port number and index into dd->pports - 1 */
851 * cca_timer_lock protects access to the per-SL cca_timer
865 * cc_state_lock protects (write) access to the per-port
1008 /* device data struct now contains only "general per-device" info.
1084 /* mem-mapped pointer to base of PIO buffers */
1087 * write-combining mem-mapped pointer to base of RcvArray
1092 * credit return base - a per-NUMA range of DMA address that
1093 * the chip will use to update the per-context free counter
1141 * mapped read-only into user processes so they can get unit and
1199 * credits are to be kept at 0 and set when handling the link-up
1231 /* MSI-X information */
1250 * per-port counters
1304 /* hfi1_pportdata, points to array of (physical) port-specific
1305 * data structs, indexed by pidx (0..n-1)
1392 /* for cpu affinity; -1 if none */
1411 return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) * in uctxt_offset()
1460 * hfi1_rcd_head - add accessor for rcd head
1465 return rcd->head; in hfi1_rcd_head()
1469 * hfi1_set_rcd_head - add accessor for rcd head
1475 rcd->head = head; in hfi1_set_rcd_head()
1481 return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset; in get_rhf_addr()
1487 return !!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL); in get_dma_rtail_setting()
1491 * hfi1_seq_incr_wrap - wrapping increment for sequence
1504 * hfi1_seq_cnt - return seq_cnt member
1511 return rcd->seq_cnt; in hfi1_seq_cnt()
1515 * hfi1_set_seq_cnt - return seq_cnt member
1522 rcd->seq_cnt = cnt; in hfi1_set_seq_cnt()
1526 * last_rcv_seq - is last
1534 return seq != rcd->seq_cnt; in last_rcv_seq()
1538 * rcd_seq_incr - increment context sequence number
1546 rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt); in hfi1_seq_incr()
1551 * get_hdrqentsize - return hdrq entry size
1556 return rcd->rcvhdrqentsize; in get_hdrqentsize()
1560 * get_hdrq_cnt - return hdrq count
1565 return rcd->rcvhdrq_cnt; in get_hdrq_cnt()
1569 * hfi1_is_slowpath - check if this context is slow path
1574 return rcd->do_interrupt == rcd->slow_handler; in hfi1_is_slowpath()
1578 * hfi1_is_fastpath - check if this context is fast path
1583 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_is_fastpath()
1586 return rcd->do_interrupt == rcd->fast_handler; in hfi1_is_fastpath()
1590 * hfi1_set_fast - change to the fast handler
1598 rcd->do_interrupt = rcd->fast_handler; in hfi1_set_fast()
1612 #define HFI1_JKEY_MASK (BIT(16) - 1)
1617 * 0 - 31 - users with administrator privileges
1618 * 32 - 63 - kernel protocols using KDETH packets
1619 * 64 - 65535 - all other users using KDETH packets
1626 jkey &= HFI1_ADMIN_JKEY_RANGE - 1; in generate_jkey()
1628 jkey |= BIT(HFI1_JKEY_WIDTH - 1); in generate_jkey()
1640 u16 link_speed = ppd->link_speed_active; in active_egress_rate()
1641 u16 link_width = ppd->link_width_active; in active_egress_rate()
1683 * --------------------------------------------------- in egress_cycles()
1707 #define PKEY_CHECK_INVALID -1
1721 * sc_to_vlt() - reverse lookup sc to vl
1722 * @dd - devdata
1723 * @sc5 - 5 bit sc
1734 seq = read_seqbegin(&dd->sc2vl_lock); in sc_to_vlt()
1735 rval = *(((u8 *)dd->sc2vl) + sc5); in sc_to_vlt()
1736 } while (read_seqretry(&dd->sc2vl_lock, seq)); in sc_to_vlt()
1745 * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1769 * ingress_pkey_table_search - search the entire pkey table for
1778 if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i])) in ingress_pkey_table_search()
1785 * ingress_pkey_table_fail - record a failure of ingress pkey validation,
1792 struct hfi1_devdata *dd = ppd->dd; in ingress_pkey_table_fail()
1794 incr_cntr64(&ppd->port_rcv_constraint_errors); in ingress_pkey_table_fail()
1795 if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) { in ingress_pkey_table_fail()
1796 dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK; in ingress_pkey_table_fail()
1797 dd->err_info_rcv_constraint.slid = slid; in ingress_pkey_table_fail()
1798 dd->err_info_rcv_constraint.pkey = pkey; in ingress_pkey_table_fail()
1803 * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
1813 if (!(force) && !(ppd->part_enforce & HFI1_PART_ENFORCE_IN)) in ingress_pkey_check()
1825 if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx])) in ingress_pkey_check()
1828 /* no match - try the whole table */ in ingress_pkey_check()
1838 * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
1846 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN)) in rcv_pkey_check()
1861 /* MTU enumeration, 256-4k match IB */
1904 return ppd->dd; in dd_from_ppd()
1930 u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */ in to_iport()
1932 WARN_ON(pidx >= dd->num_pports); in to_iport()
1933 return &dd->pport[pidx].ibport_data; in to_iport()
1938 return &rcd->ppd->ibport_data; in rcd_to_iport()
1942 * hfi1_may_ecn - Check whether FECN or BECN processing should be done
1956 if (pkt->etype == RHF_RCV_TYPE_BYPASS) { in hfi1_may_ecn()
1957 fecn = hfi1_16B_get_fecn(pkt->hdr); in hfi1_may_ecn()
1958 becn = hfi1_16B_get_becn(pkt->hdr); in hfi1_may_ecn()
1960 fecn = ib_bth_get_fecn(pkt->ohdr); in hfi1_may_ecn()
1961 becn = ib_bth_get_becn(pkt->ohdr); in hfi1_may_ecn()
1986 if (index >= ARRAY_SIZE(ppd->pkeys)) in hfi1_get_pkey()
1989 ret = ppd->pkeys[index]; in hfi1_get_pkey()
2002 return cpu_to_be64(ppd->guids[index]); in get_sguid()
2010 return rcu_dereference(ppd->cc_state); in get_cc_state()
2019 return rcu_dereference_protected(ppd->cc_state, in get_cc_state_protected()
2020 lockdep_is_held(&ppd->cc_state_lock)); in get_cc_state_protected()
2024 * values for dd->flags (_device_ related flags)
2035 #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
2044 /* waiting for an urgent packet to arrive */
2072 * ---
2075 * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
2088 * ---
2104 * hfi1_rcvhdrtail_kvaddr - return tail kvaddr
2105 * @rcd - the receive context
2109 return (__le64 *)rcd->rcvhdrtail_kvaddr; in hfi1_rcvhdrtail_kvaddr()
2131 if (likely(!rcd->rcvhdrtail_kvaddr)) { in hfi1_packet_present()
2260 /* turn on send-side job key checks if !A0 */ in hfi1_pkt_default_send_ctxt_mask()
2295 /* turn on send-side job key checks if !A0 */ in hfi1_pkt_base_sdma_integrity()
2304 dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
2305 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2308 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
2309 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2312 dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2313 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2317 dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
2318 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2321 dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2322 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2326 dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
2327 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2330 dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2331 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2335 dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
2336 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2339 dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
2340 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
2352 dd->z_int_counter = get_all_cpu_total(dd->int_counter); in hfi1_reset_cpu_counters()
2353 dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit); in hfi1_reset_cpu_counters()
2354 dd->z_send_schedule = get_all_cpu_total(dd->send_schedule); in hfi1_reset_cpu_counters()
2357 for (i = 0; i < dd->num_pports; i++, ppd++) { in hfi1_reset_cpu_counters()
2358 ppd->ibport_data.rvp.z_rc_acks = in hfi1_reset_cpu_counters()
2359 get_all_cpu_total(ppd->ibport_data.rvp.rc_acks); in hfi1_reset_cpu_counters()
2360 ppd->ibport_data.rvp.z_rc_qacks = in hfi1_reset_cpu_counters()
2361 get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks); in hfi1_reset_cpu_counters()
2383 return i2c_target(dd->hfi1_id); in qsfp_resource()
2389 return dd->pcidev->device == PCI_DEVICE_ID_INTEL1; in is_integrated()
2393 * hfi1_need_drop - detect need for drop
2394 * @dd: - the device
2402 if (unlikely(dd->do_drop && in hfi1_need_drop()
2403 atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) == in hfi1_need_drop()
2405 dd->do_drop = false; in hfi1_need_drop()
2413 #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
2430 (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))) && in hfi1_update_ah_attr()
2437 rdma_ah_set_subnet_prefix(attr, ibp->rvp.gid_prefix); in hfi1_update_ah_attr()
2442 * hfi1_check_mcast- Check if the given lid is
2467 return (lid - opa_get_mcast_base(OPA_MCAST_NR) + in __opa_get_lid()
2472 return (lid - opa_get_mcast_base(OPA_MCAST_NR) + in __opa_get_lid()
2477 return (lid - in __opa_get_lid()
2508 if (ib_is_opa_gid(&grh->dgid)) in hfi1_make_opa_lid()
2509 dlid = opa_get_lid_from_gid(&grh->dgid); in hfi1_make_opa_lid()
2513 dlid = dlid - be16_to_cpu(IB_MULTICAST_LID_BASE) + in hfi1_make_opa_lid()
2544 return (ib_is_opa_gid(&rdma_ah_read_grh(attr)->dgid)) ? in hfi1_get_hdr_type()
2561 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data; in hfi1_make_ext_grh()
2567 grh->hop_limit = 1; in hfi1_make_ext_grh()
2568 grh->sgid.global.subnet_prefix = ibp->rvp.gid_prefix; in hfi1_make_ext_grh()
2570 grh->sgid.global.interface_id = in hfi1_make_ext_grh()
2573 grh->sgid.global.interface_id = OPA_MAKE_ID(slid); in hfi1_make_ext_grh()
2582 grh->dgid.global.subnet_prefix = ibp->rvp.gid_prefix; in hfi1_make_ext_grh()
2583 grh->dgid.global.interface_id = in hfi1_make_ext_grh()
2584 cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]); in hfi1_make_ext_grh()
2589 return -(hdr_size + payload + (SIZE_OF_CRC << 2) + in hfi1_get_16b_padding()
2597 hdr->lrh[0] = cpu_to_be16(lrh0); in hfi1_make_ib_hdr()
2598 hdr->lrh[1] = cpu_to_be16(dlid); in hfi1_make_ib_hdr()
2599 hdr->lrh[2] = cpu_to_be16(len); in hfi1_make_ib_hdr()
2600 hdr->lrh[3] = cpu_to_be16(slid); in hfi1_make_ib_hdr()
2627 hdr->lrh[0] = lrh0; in hfi1_make_16b_hdr()
2628 hdr->lrh[1] = lrh1; in hfi1_make_16b_hdr()
2629 hdr->lrh[2] = lrh2; in hfi1_make_16b_hdr()
2630 hdr->lrh[3] = lrh3; in hfi1_make_16b_hdr()