Lines Matching +full:y +full:- +full:rp
1 // SPDX-License-Identifier: GPL-2.0-only
55 struct acpi_cdat_header *hdr = &header->cdat; in cdat_dsmas_handler()
63 len = le16_to_cpu((__force __le16)hdr->length); in cdat_dsmas_handler()
66 return -EINVAL; in cdat_dsmas_handler()
74 return -ENOMEM; in cdat_dsmas_handler()
76 dent->handle = dsmas->dsmad_handle; in cdat_dsmas_handler()
77 dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address); in cdat_dsmas_handler()
78 dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) + in cdat_dsmas_handler()
79 le64_to_cpu((__force __le64)dsmas->dpa_length) - 1; in cdat_dsmas_handler()
81 rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL); in cdat_dsmas_handler()
95 coord->read_latency = val; in __cxl_access_coordinate_set()
96 coord->write_latency = val; in __cxl_access_coordinate_set()
99 coord->read_latency = val; in __cxl_access_coordinate_set()
102 coord->write_latency = val; in __cxl_access_coordinate_set()
105 coord->read_bandwidth = val; in __cxl_access_coordinate_set()
106 coord->write_bandwidth = val; in __cxl_access_coordinate_set()
109 coord->read_bandwidth = val; in __cxl_access_coordinate_set()
112 coord->write_bandwidth = val; in __cxl_access_coordinate_set()
127 struct acpi_cdat_header *hdr = &header->cdat; in cdat_dslbis_handler()
137 len = le16_to_cpu((__force __le16)hdr->length); in cdat_dslbis_handler()
140 return -EINVAL; in cdat_dslbis_handler()
147 if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH) in cdat_dslbis_handler()
151 if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY) in cdat_dslbis_handler()
154 dent = xa_load(dsmas_xa, dslbis->handle); in cdat_dslbis_handler()
160 le_base = (__force __le64)dslbis->entry_base_unit; in cdat_dslbis_handler()
161 le_val = (__force __le16)dslbis->entry[0]; in cdat_dslbis_handler()
163 dslbis->data_type); in cdat_dslbis_handler()
165 cxl_access_coordinate_set(dent->cdat_coord, dslbis->data_type, val); in cdat_dslbis_handler()
175 return -ENOENT; in cdat_table_parse_output()
186 dsmas_xa, port->cdat.table, port->cdat.length); in cxl_cdat_endpoint_process()
192 dsmas_xa, port->cdat.table, port->cdat.length); in cxl_cdat_endpoint_process()
207 dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n"); in cxl_port_perf_data_calculate()
214 return -ENODEV; in cxl_port_perf_data_calculate()
216 if (!cxl_root->ops || !cxl_root->ops->qos_class) in cxl_port_perf_data_calculate()
217 return -EOPNOTSUPP; in cxl_port_perf_data_calculate()
222 cxl_coordinates_combine(dent->coord, dent->cdat_coord, ep_c); in cxl_port_perf_data_calculate()
223 dent->entries = 1; in cxl_port_perf_data_calculate()
224 rc = cxl_root->ops->qos_class(cxl_root, in cxl_port_perf_data_calculate()
225 &dent->coord[ACCESS_COORDINATE_CPU], in cxl_port_perf_data_calculate()
231 dent->qos_class = qos_class; in cxl_port_perf_data_calculate()
235 return -ENOENT; in cxl_port_perf_data_calculate()
244 dpa_perf->coord[i] = dent->coord[i]; in update_perf_entry()
245 dpa_perf->cdat_coord[i] = dent->cdat_coord[i]; in update_perf_entry()
247 dpa_perf->dpa_range = dent->dpa_range; in update_perf_entry()
248 dpa_perf->qos_class = dent->qos_class; in update_perf_entry()
251 &dent->dpa_range, dpa_perf->qos_class, in update_perf_entry()
252 dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth, in update_perf_entry()
253 dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth, in update_perf_entry()
254 dent->coord[ACCESS_COORDINATE_CPU].read_latency, in update_perf_entry()
255 dent->coord[ACCESS_COORDINATE_CPU].write_latency); in update_perf_entry()
262 struct device *dev = cxlds->dev; in cxl_memdev_set_qos_class()
264 .start = cxlds->pmem_res.start, in cxl_memdev_set_qos_class()
265 .end = cxlds->pmem_res.end, in cxl_memdev_set_qos_class()
268 .start = cxlds->ram_res.start, in cxl_memdev_set_qos_class()
269 .end = cxlds->ram_res.end, in cxl_memdev_set_qos_class()
275 if (resource_size(&cxlds->ram_res) && in cxl_memdev_set_qos_class()
276 range_contains(&ram_range, &dent->dpa_range)) in cxl_memdev_set_qos_class()
277 update_perf_entry(dev, dent, &mds->ram_perf); in cxl_memdev_set_qos_class()
278 else if (resource_size(&cxlds->pmem_res) && in cxl_memdev_set_qos_class()
279 range_contains(&pmem_range, &dent->dpa_range)) in cxl_memdev_set_qos_class()
280 update_perf_entry(dev, dent, &mds->pmem_perf); in cxl_memdev_set_qos_class()
283 &dent->dpa_range); in cxl_memdev_set_qos_class()
296 if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID) in match_cxlrd_qos_class()
299 if (cxlrd->qos_class == dev_qos_class) in match_cxlrd_qos_class()
315 if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID) in cxl_qos_match()
318 if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class, in cxl_qos_match()
335 cxlsd = &cxlrd->cxlsd; in match_cxlrd_hb()
338 for (int i = 0; i < cxlsd->nr_targets; i++) { in match_cxlrd_hb()
339 if (host_bridge == cxlsd->target[i]->dport_dev) in match_cxlrd_hb()
348 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_qos_class_verify()
354 find_cxl_root(cxlmd->endpoint); in cxl_qos_class_verify()
357 return -ENODEV; in cxl_qos_class_verify()
359 root_port = &cxl_root->port; in cxl_qos_class_verify()
362 if (!cxl_qos_match(root_port, &mds->ram_perf)) in cxl_qos_class_verify()
363 reset_dpa_perf(&mds->ram_perf); in cxl_qos_class_verify()
364 if (!cxl_qos_match(root_port, &mds->pmem_perf)) in cxl_qos_class_verify()
365 reset_dpa_perf(&mds->pmem_perf); in cxl_qos_class_verify()
368 rc = device_for_each_child(&root_port->dev, in cxl_qos_class_verify()
369 cxlmd->endpoint->host_bridge, match_cxlrd_hb); in cxl_qos_class_verify()
371 reset_dpa_perf(&mds->ram_perf); in cxl_qos_class_verify()
372 reset_dpa_perf(&mds->pmem_perf); in cxl_qos_class_verify()
393 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); in DEFINE_FREE()
394 struct cxl_dev_state *cxlds = cxlmd->cxlds; in DEFINE_FREE()
400 if (!port->cdat.table) in DEFINE_FREE()
405 dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc); in DEFINE_FREE()
411 dev_dbg(&port->dev, "Failed to do perf coord calculations.\n"); in DEFINE_FREE()
429 int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header); in cdat_sslbis_handler()
432 struct device *dev = &port->dev; in cdat_sslbis_handler()
436 len = le16_to_cpu((__force __le16)header->cdat.length); in cdat_sslbis_handler()
437 remain = len - size; in cdat_sslbis_handler()
438 if (!remain || remain % sizeof(tbl->entries[0]) || in cdat_sslbis_handler()
441 return -EINVAL; in cdat_sslbis_handler()
444 sslbis = &tbl->sslbis_header; in cdat_sslbis_handler()
446 if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH) in cdat_sslbis_handler()
449 entries = remain / sizeof(tbl->entries[0]); in cdat_sslbis_handler()
451 return -EINVAL; in cdat_sslbis_handler()
454 u16 x = le16_to_cpu((__force __le16)tbl->entries[i].portx_id); in cdat_sslbis_handler()
455 u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id); in cdat_sslbis_handler() local
465 dsp_id = y; in cdat_sslbis_handler()
468 switch (y) { in cdat_sslbis_handler()
476 dsp_id = y; in cdat_sslbis_handler()
485 le_base = (__force __le64)tbl->sslbis_header.entry_base_unit; in cdat_sslbis_handler()
486 le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth; in cdat_sslbis_handler()
488 sslbis->data_type); in cdat_sslbis_handler()
490 xa_for_each(&port->dports, index, dport) { in cdat_sslbis_handler()
492 dsp_id == dport->port_id) { in cdat_sslbis_handler()
493 cxl_access_coordinate_set(dport->coord, in cdat_sslbis_handler()
494 sslbis->data_type, in cdat_sslbis_handler()
507 if (!port->cdat.table) in cxl_switch_parse_cdat()
511 port, port->cdat.table, port->cdat.length); in cxl_switch_parse_cdat()
514 dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc); in cxl_switch_parse_cdat()
522 if (c1->write_bandwidth && c2->write_bandwidth) in __cxl_coordinates_combine()
523 out->write_bandwidth = min(c1->write_bandwidth, in __cxl_coordinates_combine()
524 c2->write_bandwidth); in __cxl_coordinates_combine()
525 out->write_latency = c1->write_latency + c2->write_latency; in __cxl_coordinates_combine()
527 if (c1->read_bandwidth && c2->read_bandwidth) in __cxl_coordinates_combine()
528 out->read_bandwidth = min(c1->read_bandwidth, in __cxl_coordinates_combine()
529 c2->read_bandwidth); in __cxl_coordinates_combine()
530 out->read_latency = c1->read_latency + c2->read_latency; in __cxl_coordinates_combine()
534 * cxl_coordinates_combine - Combine the two input coordinates
566 .start = dpa_res->start, in dpa_perf_contains()
567 .end = dpa_res->end, in dpa_perf_contains()
570 return range_contains(&perf->dpa_range, &dpa); in dpa_perf_contains()
577 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); in cxled_get_dpa_perf()
582 perf = &mds->ram_perf; in cxled_get_dpa_perf()
585 perf = &mds->pmem_perf; in cxled_get_dpa_perf()
588 return ERR_PTR(-EINVAL); in cxled_get_dpa_perf()
591 if (!dpa_perf_contains(perf, cxled->dpa_res)) in cxled_get_dpa_perf()
592 return ERR_PTR(-EINVAL); in cxled_get_dpa_perf()
607 * cxl_endpoint_gather_bandwidth - collect all the endpoint bandwidth in an xarray
614 * Return: 0 for success or -errno
617 * an xarray indexed by the upstream device of the switch or the RP
622 * device is directly connected to a RP, then no SSLBIS is involved.
629 struct cxl_port *endpoint = to_cxl_port(cxled->cxld.dev.parent); in cxl_endpoint_gather_bandwidth()
630 struct cxl_port *parent_port = to_cxl_port(endpoint->dev.parent); in cxl_endpoint_gather_bandwidth()
631 struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent); in cxl_endpoint_gather_bandwidth()
636 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_endpoint_gather_bandwidth()
637 struct pci_dev *pdev = to_pci_dev(cxlds->dev); in cxl_endpoint_gather_bandwidth()
644 if (!dev_is_pci(cxlds->dev)) in cxl_endpoint_gather_bandwidth()
645 return -ENODEV; in cxl_endpoint_gather_bandwidth()
647 if (cxlds->rcd) in cxl_endpoint_gather_bandwidth()
648 return -ENODEV; in cxl_endpoint_gather_bandwidth()
650 perf = cxled_get_dpa_perf(cxled, cxlr->mode); in cxl_endpoint_gather_bandwidth()
654 gp_port = to_cxl_port(parent_port->dev.parent); in cxl_endpoint_gather_bandwidth()
662 index = (unsigned long)endpoint->parent_dport->dport_dev; in cxl_endpoint_gather_bandwidth()
664 index = (unsigned long)parent_port->uport_dev; in cxl_endpoint_gather_bandwidth()
672 return -ENOMEM; in cxl_endpoint_gather_bandwidth()
677 perf_ctx->port = parent_port; in cxl_endpoint_gather_bandwidth()
689 cxl_coordinates_combine(ep_coord, pci_coord, perf->cdat_coord); in cxl_endpoint_gather_bandwidth()
713 * of the endpoints with the same switch upstream device or RP. in cxl_endpoint_gather_bandwidth()
715 cxl_bandwidth_add(perf_ctx->coord, perf_ctx->coord, ep_coord); in cxl_endpoint_gather_bandwidth()
736 * cxl_switch_gather_bandwidth - collect all the bandwidth at switch level in an xarray in DEFINE_FREE()
743 * or ERR_PTR(-errno) in DEFINE_FREE()
748 * device or a RP device. The function can be iterated over multiple switches in DEFINE_FREE()
766 return ERR_PTR(-ENOMEM); in DEFINE_FREE()
771 struct cxl_port *port = ctx->port; in DEFINE_FREE()
772 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); in DEFINE_FREE()
773 struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent); in DEFINE_FREE()
774 struct cxl_dport *dport = port->parent_dport; in DEFINE_FREE()
788 us_index = (unsigned long)port->parent_dport->dport_dev; in DEFINE_FREE()
790 us_index = (unsigned long)parent_port->uport_dev; in DEFINE_FREE()
798 return ERR_PTR(-ENOMEM); in DEFINE_FREE()
804 us_ctx->port = parent_port; in DEFINE_FREE()
812 return ERR_PTR(-EINVAL); in DEFINE_FREE()
817 return ERR_PTR(-ENXIO); in DEFINE_FREE()
823 cxl_coordinates_combine(coords, coords, ctx->coord); in DEFINE_FREE()
830 cxl_coordinates_combine(coords, coords, dport->coord); in DEFINE_FREE()
836 cxl_bandwidth_add(us_ctx->coord, us_ctx->coord, coords); in DEFINE_FREE()
842 dev_dbg(&cxlr->dev, in DEFINE_FREE()
844 return ERR_PTR(-EOPNOTSUPP); in DEFINE_FREE()
853 * cxl_rp_gather_bandwidth - handle the root port level bandwidth collection
857 * Return: xarray that holds cxl_perf_ctx per host bridge or ERR_PTR(-errno)
867 return ERR_PTR(-ENOMEM); in cxl_rp_gather_bandwidth()
871 struct cxl_port *port = ctx->port; in cxl_rp_gather_bandwidth()
872 unsigned long hb_index = (unsigned long)port->uport_dev; in cxl_rp_gather_bandwidth()
882 return ERR_PTR(-ENOMEM); in cxl_rp_gather_bandwidth()
887 hb_ctx->port = port; in cxl_rp_gather_bandwidth()
890 cxl_bandwidth_add(hb_ctx->coord, hb_ctx->coord, ctx->coord); in cxl_rp_gather_bandwidth()
897 * cxl_hb_gather_bandwidth - handle the host bridge level bandwidth collection
901 * Return: xarray that holds cxl_perf_ctx per ACPI0017 device or ERR_PTR(-errno)
911 return ERR_PTR(-ENOMEM); in cxl_hb_gather_bandwidth()
915 struct cxl_port *port = ctx->port; in cxl_hb_gather_bandwidth()
922 parent_port = to_cxl_port(port->dev.parent); in cxl_hb_gather_bandwidth()
923 mw_index = (unsigned long)parent_port->uport_dev; in cxl_hb_gather_bandwidth()
931 return ERR_PTR(-ENOMEM); in cxl_hb_gather_bandwidth()
938 dport = port->parent_dport; in cxl_hb_gather_bandwidth()
939 cxl_coordinates_combine(ctx->coord, ctx->coord, dport->coord); in cxl_hb_gather_bandwidth()
940 cxl_bandwidth_add(mw_ctx->coord, mw_ctx->coord, ctx->coord); in cxl_hb_gather_bandwidth()
947 * cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region
960 cxl_bandwidth_add(coord, coord, ctx->coord); in cxl_region_update_bandwidth()
963 cxlr->coord[i].read_bandwidth = coord[i].read_bandwidth; in cxl_region_update_bandwidth()
964 cxlr->coord[i].write_bandwidth = coord[i].write_bandwidth; in cxl_region_update_bandwidth()
969 * cxl_region_shared_upstream_bandwidth_update - Recalculate the bandwidth for
995 for (int i = 0; i < cxlr->params.nr_targets; i++) { in cxl_region_shared_upstream_bandwidth_update()
996 struct cxl_endpoint_decoder *cxled = cxlr->params.targets[i]; in cxl_region_shared_upstream_bandwidth_update()
1006 if (root_count && root_count != cxlr->params.nr_targets) { in cxl_region_shared_upstream_bandwidth_update()
1007 dev_dbg(&cxlr->dev, in cxl_region_shared_upstream_bandwidth_update()
1056 perf = cxled_get_dpa_perf(cxled, cxlr->mode); in cxl_region_perf_data_calculate()
1062 cxlr->coord[i].read_latency = max_t(unsigned int, in cxl_region_perf_data_calculate()
1063 cxlr->coord[i].read_latency, in cxl_region_perf_data_calculate()
1064 perf->coord[i].read_latency); in cxl_region_perf_data_calculate()
1065 cxlr->coord[i].write_latency = max_t(unsigned int, in cxl_region_perf_data_calculate()
1066 cxlr->coord[i].write_latency, in cxl_region_perf_data_calculate()
1067 perf->coord[i].write_latency); in cxl_region_perf_data_calculate()
1068 cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth; in cxl_region_perf_data_calculate()
1069 cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth; in cxl_region_perf_data_calculate()
1076 return hmat_update_target_coordinates(nid, &cxlr->coord[access], access); in cxl_update_hmat_access_coordinates()