Lines Matching +full:interleave +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/memory-tiers.h>
28 * 1. Interleave granularity
29 * 2. Interleave size
36 .attr = { .name = __stringify(_name), .mode = 0444 }, \
50 if (cxlr->coord[level].attrib == 0) \
51 return -ENOENT; \
53 return sysfs_emit(buf, "%u\n", cxlr->coord[level].attrib); \
94 cxlr->coord[level].read_latency == 0) \
98 cxlr->coord[level].write_latency == 0) \
102 cxlr->coord[level].read_bandwidth == 0) \
106 cxlr->coord[level].write_bandwidth == 0) \
109 return a->mode; \
141 struct cxl_region_params *p = &cxlr->params; in uuid_show()
147 if (cxlr->mode != CXL_DECODER_PMEM) in uuid_show()
150 rc = sysfs_emit(buf, "%pUb\n", &p->uuid); in uuid_show()
167 p = &cxlr->params; in is_dup()
169 if (uuid_equal(&p->uuid, uuid)) { in is_dup()
171 return -EBUSY; in is_dup()
181 struct cxl_region_params *p = &cxlr->params; in uuid_store()
186 return -EINVAL; in uuid_store()
193 return -EINVAL; in uuid_store()
199 if (uuid_equal(&p->uuid, &temp)) in uuid_store()
202 rc = -EBUSY; in uuid_store()
203 if (p->state >= CXL_CONFIG_ACTIVE) in uuid_store()
210 uuid_copy(&p->uuid, &temp); in uuid_store()
223 return xa_load(&port->regions, (unsigned long)cxlr); in cxl_rr_load()
231 &cxlr->dev, in cxl_region_invalidate_memregion()
235 dev_WARN(&cxlr->dev, in cxl_region_invalidate_memregion()
237 return -ENXIO; in cxl_region_invalidate_memregion()
247 struct cxl_region_params *p = &cxlr->params; in cxl_region_decode_reset()
257 for (i = count - 1; i >= 0; i--) { in cxl_region_decode_reset()
258 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_decode_reset()
261 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_region_decode_reset()
264 if (cxlds->rcd) in cxl_region_decode_reset()
267 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_decode_reset()
268 iter = to_cxl_port(iter->dev.parent); in cxl_region_decode_reset()
271 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_decode_reset()
276 cxld = cxl_rr->decoder; in cxl_region_decode_reset()
277 if (cxld->reset) in cxl_region_decode_reset()
278 cxld->reset(cxld); in cxl_region_decode_reset()
279 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); in cxl_region_decode_reset()
283 cxled->cxld.reset(&cxled->cxld); in cxl_region_decode_reset()
284 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); in cxl_region_decode_reset()
288 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); in cxl_region_decode_reset()
295 if (cxld->commit) in commit_decoder()
296 return cxld->commit(cxld); in commit_decoder()
298 if (is_switch_decoder(&cxld->dev)) in commit_decoder()
299 cxlsd = to_cxl_switch_decoder(&cxld->dev); in commit_decoder()
301 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1, in commit_decoder()
302 "->commit() is required\n")) in commit_decoder()
303 return -ENXIO; in commit_decoder()
309 struct cxl_region_params *p = &cxlr->params; in cxl_region_decode_commit()
312 for (i = 0; i < p->nr_targets; i++) { in cxl_region_decode_commit()
313 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_decode_commit()
322 iter = to_cxl_port(iter->dev.parent)) { in cxl_region_decode_commit()
324 cxld = cxl_rr->decoder; in cxl_region_decode_commit()
333 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_decode_commit()
335 cxld = cxl_rr->decoder; in cxl_region_decode_commit()
336 if (cxld->reset) in cxl_region_decode_commit()
337 cxld->reset(cxld); in cxl_region_decode_commit()
340 cxled->cxld.reset(&cxled->cxld); in cxl_region_decode_commit()
357 struct cxl_region_params *p = &cxlr->params; in commit_store()
370 if (commit && p->state >= CXL_CONFIG_COMMIT) in commit_store()
372 if (!commit && p->state < CXL_CONFIG_COMMIT) in commit_store()
376 if (commit && p->state < CXL_CONFIG_ACTIVE) { in commit_store()
377 rc = -ENXIO; in commit_store()
392 p->state = CXL_CONFIG_COMMIT; in commit_store()
394 p->state = CXL_CONFIG_RESET_PENDING; in commit_store()
396 device_release_driver(&cxlr->dev); in commit_store()
403 if (p->state == CXL_CONFIG_RESET_PENDING) { in commit_store()
404 cxl_region_decode_reset(cxlr, p->interleave_ways); in commit_store()
405 p->state = CXL_CONFIG_ACTIVE; in commit_store()
421 struct cxl_region_params *p = &cxlr->params; in commit_show()
427 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); in commit_show()
442 * regions regardless of mode. in cxl_region_visible()
444 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) in cxl_region_visible()
446 return a->mode; in cxl_region_visible()
453 struct cxl_region_params *p = &cxlr->params; in interleave_ways_show()
459 rc = sysfs_emit(buf, "%d\n", p->interleave_ways); in interleave_ways_show()
471 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in interleave_ways_store()
472 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in interleave_ways_store()
474 struct cxl_region_params *p = &cxlr->params; in interleave_ways_store()
488 * Even for x3, x6, and x12 interleaves the region interleave must be a in interleave_ways_store()
489 * power of 2 multiple of the host bridge interleave. in interleave_ways_store()
491 if (!is_power_of_2(val / cxld->interleave_ways) || in interleave_ways_store()
492 (val % cxld->interleave_ways)) { in interleave_ways_store()
493 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val); in interleave_ways_store()
494 return -EINVAL; in interleave_ways_store()
500 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in interleave_ways_store()
501 rc = -EBUSY; in interleave_ways_store()
505 save = p->interleave_ways; in interleave_ways_store()
506 p->interleave_ways = val; in interleave_ways_store()
507 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); in interleave_ways_store()
509 p->interleave_ways = save; in interleave_ways_store()
523 struct cxl_region_params *p = &cxlr->params; in interleave_granularity_show()
529 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity); in interleave_granularity_show()
539 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in interleave_granularity_store()
540 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in interleave_granularity_store()
542 struct cxl_region_params *p = &cxlr->params; in interleave_granularity_store()
555 * When the host-bridge is interleaved, disallow region granularity != in interleave_granularity_store()
557 * interleave result in needing multiple endpoints to support a single in interleave_granularity_store()
558 * slot in the interleave (possible to support in the future). Regions in interleave_granularity_store()
559 * with a granularity greater than the root interleave result in invalid in interleave_granularity_store()
562 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity) in interleave_granularity_store()
563 return -EINVAL; in interleave_granularity_store()
568 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in interleave_granularity_store()
569 rc = -EBUSY; in interleave_granularity_store()
573 p->interleave_granularity = val; in interleave_granularity_store()
586 struct cxl_region_params *p = &cxlr->params; in resource_show()
587 u64 resource = -1ULL; in resource_show()
593 if (p->res) in resource_show()
594 resource = p->res->start; in resource_show()
607 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode)); in mode_show()
609 static DEVICE_ATTR_RO(mode);
613 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in alloc_hpa()
614 struct cxl_region_params *p = &cxlr->params; in alloc_hpa()
621 if (p->res && resource_size(p->res) == size) in alloc_hpa()
625 if (p->res) in alloc_hpa()
626 return -EBUSY; in alloc_hpa()
628 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) in alloc_hpa()
629 return -EBUSY; in alloc_hpa()
632 if (!p->interleave_ways || !p->interleave_granularity || in alloc_hpa()
633 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) in alloc_hpa()
634 return -ENXIO; in alloc_hpa()
636 div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder); in alloc_hpa()
638 return -EINVAL; in alloc_hpa()
640 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M, in alloc_hpa()
641 dev_name(&cxlr->dev)); in alloc_hpa()
643 dev_dbg(&cxlr->dev, in alloc_hpa()
645 PTR_ERR(res), &size, cxlrd->res->name, cxlrd->res); in alloc_hpa()
649 p->res = res; in alloc_hpa()
650 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in alloc_hpa()
657 struct cxl_region_params *p = &cxlr->params; in cxl_region_iomem_release()
659 if (device_is_registered(&cxlr->dev)) in cxl_region_iomem_release()
661 if (p->res) { in cxl_region_iomem_release()
666 if (p->res->parent) in cxl_region_iomem_release()
667 remove_resource(p->res); in cxl_region_iomem_release()
668 kfree(p->res); in cxl_region_iomem_release()
669 p->res = NULL; in cxl_region_iomem_release()
675 struct cxl_region_params *p = &cxlr->params; in free_hpa()
679 if (!p->res) in free_hpa()
682 if (p->state >= CXL_CONFIG_ACTIVE) in free_hpa()
683 return -EBUSY; in free_hpa()
686 p->state = CXL_CONFIG_IDLE; in free_hpa()
721 struct cxl_region_params *p = &cxlr->params; in size_show()
728 if (p->res) in size_show()
729 size = resource_size(p->res); in size_show()
755 struct cxl_region_params *p = &cxlr->params; in show_targetN()
763 if (pos >= p->interleave_ways) { in show_targetN()
764 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in show_targetN()
765 p->interleave_ways); in show_targetN()
766 rc = -ENXIO; in show_targetN()
770 cxled = p->targets[pos]; in show_targetN()
774 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev)); in show_targetN()
786 * if port->commit_end is not the only free decoder, then out of in check_commit_order()
790 if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) in check_commit_order()
791 return -EBUSY; in check_commit_order()
797 struct cxl_port *port = to_cxl_port(dev->parent); in match_free_decoder()
806 if (cxld->id != port->commit_end + 1) in match_free_decoder()
809 if (cxld->region) { in match_free_decoder()
810 dev_dbg(dev->parent, in match_free_decoder()
812 dev_name(dev), dev_name(&cxld->region->dev)); in match_free_decoder()
816 rc = device_for_each_child_reverse_from(dev->parent, dev, NULL, in match_free_decoder()
819 dev_dbg(dev->parent, in match_free_decoder()
837 r = &cxld->hpa_range; in match_auto_decoder()
839 if (p->res && p->res->start == r->start && p->res->end == r->end) in match_auto_decoder()
853 return &cxled->cxld; in cxl_region_find_decoder()
855 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) in cxl_region_find_decoder()
856 dev = device_find_child(&port->dev, &cxlr->params, in cxl_region_find_decoder()
859 dev = device_find_child(&port->dev, NULL, match_free_decoder); in cxl_region_find_decoder()
876 struct cxl_decoder *cxld_iter = rr->decoder; in auto_order_ok()
879 * Allow the out of order assembly of auto-discovered regions. in auto_order_ok()
884 dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n", in auto_order_ok()
885 dev_name(&cxld->dev), cxld->id, in auto_order_ok()
886 dev_name(&cxld_iter->dev), cxld_iter->id); in auto_order_ok()
888 if (cxld_iter->id > cxld->id) in auto_order_ok()
898 struct cxl_region_params *p = &cxlr->params; in alloc_region_ref()
903 xa_for_each(&port->regions, index, iter) { in alloc_region_ref()
904 struct cxl_region_params *ip = &iter->region->params; in alloc_region_ref()
906 if (!ip->res || ip->res->start < p->res->start) in alloc_region_ref()
909 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in alloc_region_ref()
913 if (auto_order_ok(port, iter->region, cxld)) in alloc_region_ref()
916 dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n", in alloc_region_ref()
917 dev_name(&port->dev), in alloc_region_ref()
918 dev_name(&iter->region->dev), ip->res, p->res); in alloc_region_ref()
920 return ERR_PTR(-EBUSY); in alloc_region_ref()
925 return ERR_PTR(-ENOMEM); in alloc_region_ref()
926 cxl_rr->port = port; in alloc_region_ref()
927 cxl_rr->region = cxlr; in alloc_region_ref()
928 cxl_rr->nr_targets = 1; in alloc_region_ref()
929 xa_init(&cxl_rr->endpoints); in alloc_region_ref()
931 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL); in alloc_region_ref()
933 dev_dbg(&cxlr->dev, in alloc_region_ref()
935 dev_name(&port->dev), rc); in alloc_region_ref()
945 struct cxl_region *cxlr = cxl_rr->region; in cxl_rr_free_decoder()
946 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_rr_free_decoder()
951 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); in cxl_rr_free_decoder()
952 if (cxld->region == cxlr) { in cxl_rr_free_decoder()
953 cxld->region = NULL; in cxl_rr_free_decoder()
954 put_device(&cxlr->dev); in cxl_rr_free_decoder()
960 struct cxl_port *port = cxl_rr->port; in free_region_ref()
961 struct cxl_region *cxlr = cxl_rr->region; in free_region_ref()
964 xa_erase(&port->regions, (unsigned long)cxlr); in free_region_ref()
965 xa_destroy(&cxl_rr->endpoints); in free_region_ref()
973 struct cxl_port *port = cxl_rr->port; in cxl_rr_ep_add()
974 struct cxl_region *cxlr = cxl_rr->region; in cxl_rr_ep_add()
975 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_rr_ep_add()
979 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep, in cxl_rr_ep_add()
984 cxl_rr->nr_eps++; in cxl_rr_ep_add()
986 if (!cxld->region) { in cxl_rr_ep_add()
987 cxld->region = cxlr; in cxl_rr_ep_add()
988 get_device(&cxlr->dev); in cxl_rr_ep_add()
1002 dev_dbg(&cxlr->dev, "%s: no decoder available\n", in cxl_rr_alloc_decoder()
1003 dev_name(&port->dev)); in cxl_rr_alloc_decoder()
1004 return -EBUSY; in cxl_rr_alloc_decoder()
1007 if (cxld->region) { in cxl_rr_alloc_decoder()
1008 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", in cxl_rr_alloc_decoder()
1009 dev_name(&port->dev), dev_name(&cxld->dev), in cxl_rr_alloc_decoder()
1010 dev_name(&cxld->region->dev)); in cxl_rr_alloc_decoder()
1011 return -EBUSY; in cxl_rr_alloc_decoder()
1016 * assumption with an assertion. Switch-decoders change mapping-type in cxl_rr_alloc_decoder()
1019 dev_WARN_ONCE(&cxlr->dev, in cxl_rr_alloc_decoder()
1021 cxld->target_type != cxlr->type, in cxl_rr_alloc_decoder()
1022 "%s:%s mismatch decoder type %d -> %d\n", in cxl_rr_alloc_decoder()
1023 dev_name(&cxled_to_memdev(cxled)->dev), in cxl_rr_alloc_decoder()
1024 dev_name(&cxld->dev), cxld->target_type, cxlr->type); in cxl_rr_alloc_decoder()
1025 cxld->target_type = cxlr->type; in cxl_rr_alloc_decoder()
1026 cxl_rr->decoder = cxld; in cxl_rr_alloc_decoder()
1031 * cxl_port_attach_region() - track a region's interest in a port by endpoint
1035 * @pos: interleave position of @cxled in @cxlr
1043 * - validate that there are no other regions with a higher HPA already
1045 * - establish a region reference if one is not already present
1047 * - additionally allocate a decoder instance that will host @cxlr on
1050 * - pin the region reference by the endpoint
1051 * - account for how many entries in @port's target list are needed to
1064 int rc = -EBUSY; in cxl_port_attach_region()
1079 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { in cxl_port_attach_region()
1082 if (ep_iter->next == ep->next) { in cxl_port_attach_region()
1092 if (!found || !ep->next) { in cxl_port_attach_region()
1093 cxl_rr->nr_targets++; in cxl_port_attach_region()
1099 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1101 dev_name(&port->dev)); in cxl_port_attach_region()
1110 cxld = cxl_rr->decoder; in cxl_port_attach_region()
1116 if (is_switch_decoder(&cxld->dev)) { in cxl_port_attach_region()
1119 cxlsd = to_cxl_switch_decoder(&cxld->dev); in cxl_port_attach_region()
1120 if (cxl_rr->nr_targets > cxlsd->nr_targets) { in cxl_port_attach_region()
1121 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1123 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_attach_region()
1124 dev_name(&cxld->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
1125 dev_name(&cxled->cxld.dev), pos, in cxl_port_attach_region()
1126 cxlsd->nr_targets); in cxl_port_attach_region()
1127 rc = -ENXIO; in cxl_port_attach_region()
1134 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1136 dev_name(&port->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
1137 dev_name(&cxld->dev)); in cxl_port_attach_region()
1141 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1143 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_attach_region()
1144 dev_name(&cxld->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
1145 dev_name(&cxled->cxld.dev), pos, in cxl_port_attach_region()
1146 ep ? ep->next ? dev_name(ep->next->uport_dev) : in cxl_port_attach_region()
1147 dev_name(&cxlmd->dev) : in cxl_port_attach_region()
1149 cxl_rr->nr_eps, cxl_rr->nr_targets); in cxl_port_attach_region()
1154 cxl_rr->nr_targets--; in cxl_port_attach_region()
1155 if (cxl_rr->nr_eps == 0) in cxl_port_attach_region()
1177 if (cxl_rr->decoder == &cxled->cxld) in cxl_port_detach_region()
1178 cxl_rr->nr_eps--; in cxl_port_detach_region()
1180 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled); in cxl_port_detach_region()
1186 cxl_rr->nr_eps--; in cxl_port_detach_region()
1187 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { in cxl_port_detach_region()
1188 if (ep_iter->next == ep->next) { in cxl_port_detach_region()
1194 cxl_rr->nr_targets--; in cxl_port_detach_region()
1197 if (cxl_rr->nr_eps == 0) in cxl_port_detach_region()
1206 struct cxl_region *cxlr = cxl_rr->region; in check_last_peer()
1207 struct cxl_region_params *p = &cxlr->params; in check_last_peer()
1209 struct cxl_port *port = cxl_rr->port; in check_last_peer()
1212 int pos = cxled->pos; in check_last_peer()
1216 * then that endpoint, at index 'position - distance', must also be in check_last_peer()
1220 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n", in check_last_peer()
1221 dev_name(port->uport_dev), dev_name(&port->dev), in check_last_peer()
1222 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in check_last_peer()
1223 return -ENXIO; in check_last_peer()
1225 cxled_peer = p->targets[pos - distance]; in check_last_peer()
1228 if (ep->dport != ep_peer->dport) { in check_last_peer()
1229 dev_dbg(&cxlr->dev, in check_last_peer()
1231 dev_name(port->uport_dev), dev_name(&port->dev), in check_last_peer()
1232 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos, in check_last_peer()
1233 dev_name(&cxlmd_peer->dev), in check_last_peer()
1234 dev_name(&cxled_peer->cxld.dev)); in check_last_peer()
1235 return -ENXIO; in check_last_peer()
1243 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in check_interleave_cap()
1244 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in check_interleave_cap()
1250 if (!test_bit(iw, &cxlhdm->iw_cap_mask)) in check_interleave_cap()
1251 return -ENXIO; in check_interleave_cap()
1259 * interleave bits are none. in check_interleave_cap()
1266 * interleave bits are none. in check_interleave_cap()
1274 high_pos = eiw + eig - 1; in check_interleave_cap()
1279 if (interleave_mask & ~cxlhdm->interleave_mask) in check_interleave_cap()
1280 return -ENXIO; in check_interleave_cap()
1289 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_port_setup_targets()
1290 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos; in cxl_port_setup_targets()
1291 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); in cxl_port_setup_targets()
1295 struct cxl_region_params *p = &cxlr->params; in cxl_port_setup_targets()
1296 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_port_setup_targets()
1306 if (!is_power_of_2(cxl_rr->nr_targets)) { in cxl_port_setup_targets()
1307 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n", in cxl_port_setup_targets()
1308 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1309 cxl_rr->nr_targets); in cxl_port_setup_targets()
1310 return -EINVAL; in cxl_port_setup_targets()
1313 cxlsd = to_cxl_switch_decoder(&cxld->dev); in cxl_port_setup_targets()
1314 if (cxl_rr->nr_targets_set) { in cxl_port_setup_targets()
1320 * endpoint positions in the region interleave a given port can in cxl_port_setup_targets()
1324 * always 1 as every index targets a different host-bridge. At in cxl_port_setup_targets()
1330 distance *= cxl_rr_iter->nr_targets; in cxl_port_setup_targets()
1331 iter = to_cxl_port(iter->dev.parent); in cxl_port_setup_targets()
1333 distance *= cxlrd->cxlsd.cxld.interleave_ways; in cxl_port_setup_targets()
1335 for (i = 0; i < cxl_rr->nr_targets_set; i++) in cxl_port_setup_targets()
1336 if (ep->dport == cxlsd->target[i]) { in cxl_port_setup_targets()
1351 * does not allow interleaved host-bridges with in cxl_port_setup_targets()
1354 parent_ig = p->interleave_granularity; in cxl_port_setup_targets()
1355 parent_iw = cxlrd->cxlsd.cxld.interleave_ways; in cxl_port_setup_targets()
1357 * For purposes of address bit routing, use power-of-2 math for in cxl_port_setup_targets()
1367 parent_cxld = parent_rr->decoder; in cxl_port_setup_targets()
1368 parent_ig = parent_cxld->interleave_granularity; in cxl_port_setup_targets()
1369 parent_iw = parent_cxld->interleave_ways; in cxl_port_setup_targets()
1374 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n", in cxl_port_setup_targets()
1375 dev_name(parent_port->uport_dev), in cxl_port_setup_targets()
1376 dev_name(&parent_port->dev), parent_ig); in cxl_port_setup_targets()
1382 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n", in cxl_port_setup_targets()
1383 dev_name(parent_port->uport_dev), in cxl_port_setup_targets()
1384 dev_name(&parent_port->dev), parent_iw); in cxl_port_setup_targets()
1388 iw = cxl_rr->nr_targets; in cxl_port_setup_targets()
1391 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n", in cxl_port_setup_targets()
1392 dev_name(port->uport_dev), dev_name(&port->dev), iw); in cxl_port_setup_targets()
1397 * Interleave granularity is a multiple of @parent_port granularity. in cxl_port_setup_targets()
1398 * Multiplier is the parent port interleave ways. in cxl_port_setup_targets()
1402 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1404 dev_name(&parent_port->dev), parent_ig, parent_iw); in cxl_port_setup_targets()
1410 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n", in cxl_port_setup_targets()
1411 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1416 if (iw > 8 || iw > cxlsd->nr_targets) { in cxl_port_setup_targets()
1417 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1419 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1420 dev_name(&cxld->dev), iw, cxlsd->nr_targets); in cxl_port_setup_targets()
1421 return -ENXIO; in cxl_port_setup_targets()
1424 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in cxl_port_setup_targets()
1425 if (cxld->interleave_ways != iw || in cxl_port_setup_targets()
1426 cxld->interleave_granularity != ig || in cxl_port_setup_targets()
1427 cxld->hpa_range.start != p->res->start || in cxl_port_setup_targets()
1428 cxld->hpa_range.end != p->res->end || in cxl_port_setup_targets()
1429 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { in cxl_port_setup_targets()
1430 dev_err(&cxlr->dev, in cxl_port_setup_targets()
1432 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1433 __func__, iw, ig, p->res); in cxl_port_setup_targets()
1434 dev_err(&cxlr->dev, in cxl_port_setup_targets()
1436 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1437 __func__, cxld->interleave_ways, in cxl_port_setup_targets()
1438 cxld->interleave_granularity, in cxl_port_setup_targets()
1439 (cxld->flags & CXL_DECODER_F_ENABLE) ? in cxl_port_setup_targets()
1442 cxld->hpa_range.start, cxld->hpa_range.end); in cxl_port_setup_targets()
1443 return -ENXIO; in cxl_port_setup_targets()
1448 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1450 dev_name(port->uport_dev), in cxl_port_setup_targets()
1451 dev_name(&port->dev), iw, ig); in cxl_port_setup_targets()
1455 cxld->interleave_ways = iw; in cxl_port_setup_targets()
1456 cxld->interleave_granularity = ig; in cxl_port_setup_targets()
1457 cxld->hpa_range = (struct range) { in cxl_port_setup_targets()
1458 .start = p->res->start, in cxl_port_setup_targets()
1459 .end = p->res->end, in cxl_port_setup_targets()
1462 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev), in cxl_port_setup_targets()
1463 dev_name(&port->dev), iw, ig); in cxl_port_setup_targets()
1465 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) { in cxl_port_setup_targets()
1466 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1468 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1469 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in cxl_port_setup_targets()
1470 return -ENXIO; in cxl_port_setup_targets()
1472 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in cxl_port_setup_targets()
1473 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) { in cxl_port_setup_targets()
1474 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n", in cxl_port_setup_targets()
1475 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1476 dev_name(&cxlsd->cxld.dev), in cxl_port_setup_targets()
1477 dev_name(ep->dport->dport_dev), in cxl_port_setup_targets()
1478 cxl_rr->nr_targets_set); in cxl_port_setup_targets()
1479 return -ENXIO; in cxl_port_setup_targets()
1482 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport; in cxl_port_setup_targets()
1485 cxl_rr->nr_targets_set += inc; in cxl_port_setup_targets()
1486 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n", in cxl_port_setup_targets()
1487 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1488 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev), in cxl_port_setup_targets()
1489 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in cxl_port_setup_targets()
1506 cxl_rr->nr_targets_set = 0; in cxl_port_reset_targets()
1508 cxld = cxl_rr->decoder; in cxl_port_reset_targets()
1509 cxld->hpa_range = (struct range) { in cxl_port_reset_targets()
1511 .end = -1, in cxl_port_reset_targets()
1517 struct cxl_region_params *p = &cxlr->params; in cxl_region_teardown_targets()
1526 * In the auto-discovery case skip automatic teardown since the in cxl_region_teardown_targets()
1529 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) in cxl_region_teardown_targets()
1532 for (i = 0; i < p->nr_targets; i++) { in cxl_region_teardown_targets()
1533 cxled = p->targets[i]; in cxl_region_teardown_targets()
1535 cxlds = cxlmd->cxlds; in cxl_region_teardown_targets()
1537 if (cxlds->rcd) in cxl_region_teardown_targets()
1541 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_teardown_targets()
1542 iter = to_cxl_port(iter->dev.parent); in cxl_region_teardown_targets()
1545 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) in cxl_region_teardown_targets()
1552 struct cxl_region_params *p = &cxlr->params; in cxl_region_setup_targets()
1560 for (i = 0; i < p->nr_targets; i++) { in cxl_region_setup_targets()
1561 cxled = p->targets[i]; in cxl_region_setup_targets()
1563 cxlds = cxlmd->cxlds; in cxl_region_setup_targets()
1566 if (!cxlds->rcd) { in cxl_region_setup_targets()
1574 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_setup_targets()
1575 iter = to_cxl_port(iter->dev.parent); in cxl_region_setup_targets()
1582 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_setup_targets()
1592 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n"); in cxl_region_setup_targets()
1594 return -ENXIO; in cxl_region_setup_targets()
1605 struct cxl_region_params *p = &cxlr->params; in cxl_region_validate_position()
1608 if (pos < 0 || pos >= p->interleave_ways) { in cxl_region_validate_position()
1609 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in cxl_region_validate_position()
1610 p->interleave_ways); in cxl_region_validate_position()
1611 return -ENXIO; in cxl_region_validate_position()
1614 if (p->targets[pos] == cxled) in cxl_region_validate_position()
1617 if (p->targets[pos]) { in cxl_region_validate_position()
1618 struct cxl_endpoint_decoder *cxled_target = p->targets[pos]; in cxl_region_validate_position()
1621 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n", in cxl_region_validate_position()
1622 pos, dev_name(&cxlmd_target->dev), in cxl_region_validate_position()
1623 dev_name(&cxled_target->cxld.dev)); in cxl_region_validate_position()
1624 return -EBUSY; in cxl_region_validate_position()
1627 for (i = 0; i < p->interleave_ways; i++) { in cxl_region_validate_position()
1631 cxled_target = p->targets[i]; in cxl_region_validate_position()
1637 dev_dbg(&cxlr->dev, in cxl_region_validate_position()
1639 dev_name(&cxlmd->dev), pos, in cxl_region_validate_position()
1640 dev_name(&cxled_target->cxld.dev)); in cxl_region_validate_position()
1641 return -EBUSY; in cxl_region_validate_position()
1654 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; in cxl_region_attach_position()
1655 struct cxl_decoder *cxld = &cxlsd->cxld; in cxl_region_attach_position()
1656 int iw = cxld->interleave_ways; in cxl_region_attach_position()
1660 if (dport != cxlrd->cxlsd.target[pos % iw]) { in cxl_region_attach_position()
1661 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", in cxl_region_attach_position()
1662 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach_position()
1663 dev_name(&cxlrd->cxlsd.cxld.dev)); in cxl_region_attach_position()
1664 return -ENXIO; in cxl_region_attach_position()
1668 iter = to_cxl_port(iter->dev.parent)) { in cxl_region_attach_position()
1678 iter = to_cxl_port(iter->dev.parent)) in cxl_region_attach_position()
1686 struct cxl_region_params *p = &cxlr->params; in cxl_region_attach_auto()
1688 if (cxled->state != CXL_DECODER_STATE_AUTO) { in cxl_region_attach_auto()
1689 dev_err(&cxlr->dev, in cxl_region_attach_auto()
1691 dev_name(&cxled->cxld.dev)); in cxl_region_attach_auto()
1692 return -EINVAL; in cxl_region_attach_auto()
1696 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n", in cxl_region_attach_auto()
1697 dev_name(&cxled->cxld.dev), pos); in cxl_region_attach_auto()
1698 return -EINVAL; in cxl_region_attach_auto()
1701 if (p->nr_targets >= p->interleave_ways) { in cxl_region_attach_auto()
1702 dev_err(&cxlr->dev, "%s: no more target slots available\n", in cxl_region_attach_auto()
1703 dev_name(&cxled->cxld.dev)); in cxl_region_attach_auto()
1704 return -ENXIO; in cxl_region_attach_auto()
1713 pos = p->nr_targets; in cxl_region_attach_auto()
1714 p->targets[pos] = cxled; in cxl_region_attach_auto()
1715 cxled->pos = pos; in cxl_region_attach_auto()
1716 p->nr_targets++; in cxl_region_attach_auto()
1726 return cxled_a->pos - cxled_b->pos; in cmp_interleave_pos()
1731 if (!port->parent_dport) in next_port()
1733 return port->parent_dport->port; in next_port()
1747 r1 = &cxlsd->cxld.hpa_range; in match_switch_decoder_by_range()
1751 return (r1->start == r2->start && r1->end == r2->end); in match_switch_decoder_by_range()
1760 int rc = -ENXIO; in find_pos_and_ways()
1766 dev = device_find_child(&parent->dev, range, in find_pos_and_ways()
1769 dev_err(port->uport_dev, in find_pos_and_ways()
1770 "failed to find decoder mapping %#llx-%#llx\n", in find_pos_and_ways()
1771 range->start, range->end); in find_pos_and_ways()
1775 *ways = cxlsd->cxld.interleave_ways; in find_pos_and_ways()
1778 if (cxlsd->target[i] == port->parent_dport) { in find_pos_and_ways()
1790 * cxl_calc_interleave_pos() - calculate an endpoint position in a region
1802 * -ENXIO on failure
1808 struct range *range = &cxled->cxld.hpa_range; in cxl_calc_interleave_pos()
1813 * Example: the expected interleave order of the 4-way region shown in cxl_calc_interleave_pos()
1823 * uses the mem position in the host-bridge and the ways of the host- in cxl_calc_interleave_pos()
1825 * iteration uses the host-bridge position in the root_port and the ways in cxl_calc_interleave_pos()
1853 dev_dbg(&cxlmd->dev, in cxl_calc_interleave_pos()
1854 "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n", in cxl_calc_interleave_pos()
1855 dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent), in cxl_calc_interleave_pos()
1856 dev_name(&port->dev), range->start, range->end, pos); in cxl_calc_interleave_pos()
1863 struct cxl_region_params *p = &cxlr->params; in cxl_region_sort_targets()
1866 for (i = 0; i < p->nr_targets; i++) { in cxl_region_sort_targets()
1867 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_sort_targets()
1869 cxled->pos = cxl_calc_interleave_pos(cxled); in cxl_region_sort_targets()
1872 * cxled->pos so that follow-on code paths can reliably in cxl_region_sort_targets()
1873 * do p->targets[cxled->pos] to self-reference their entry. in cxl_region_sort_targets()
1875 if (cxled->pos < 0) in cxl_region_sort_targets()
1876 rc = -ENXIO; in cxl_region_sort_targets()
1878 /* Keep the cxlr target list in interleave position order */ in cxl_region_sort_targets()
1879 sort(p->targets, p->nr_targets, sizeof(p->targets[0]), in cxl_region_sort_targets()
1882 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful"); in cxl_region_sort_targets()
1889 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_region_attach()
1891 struct cxl_region_params *p = &cxlr->params; in cxl_region_attach()
1894 int rc = -ENXIO; in cxl_region_attach()
1896 rc = check_interleave_cap(&cxled->cxld, p->interleave_ways, in cxl_region_attach()
1897 p->interleave_granularity); in cxl_region_attach()
1899 dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n", in cxl_region_attach()
1900 dev_name(&cxled->cxld.dev), p->interleave_ways, in cxl_region_attach()
1901 p->interleave_granularity); in cxl_region_attach()
1905 if (cxled->mode != cxlr->mode) { in cxl_region_attach()
1906 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", in cxl_region_attach()
1907 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); in cxl_region_attach()
1908 return -EINVAL; in cxl_region_attach()
1911 if (cxled->mode == CXL_DECODER_DEAD) { in cxl_region_attach()
1912 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); in cxl_region_attach()
1913 return -ENODEV; in cxl_region_attach()
1916 /* all full of members, or interleave config not established? */ in cxl_region_attach()
1917 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) { in cxl_region_attach()
1918 dev_dbg(&cxlr->dev, "region already active\n"); in cxl_region_attach()
1919 return -EBUSY; in cxl_region_attach()
1920 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) { in cxl_region_attach()
1921 dev_dbg(&cxlr->dev, "interleave config missing\n"); in cxl_region_attach()
1922 return -ENXIO; in cxl_region_attach()
1925 if (p->nr_targets >= p->interleave_ways) { in cxl_region_attach()
1926 dev_dbg(&cxlr->dev, "region already has %d endpoints\n", in cxl_region_attach()
1927 p->nr_targets); in cxl_region_attach()
1928 return -EINVAL; in cxl_region_attach()
1933 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); in cxl_region_attach()
1935 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n", in cxl_region_attach()
1936 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1937 dev_name(cxlr->dev.parent)); in cxl_region_attach()
1938 return -ENXIO; in cxl_region_attach()
1941 if (cxled->cxld.target_type != cxlr->type) { in cxl_region_attach()
1942 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n", in cxl_region_attach()
1943 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1944 cxled->cxld.target_type, cxlr->type); in cxl_region_attach()
1945 return -ENXIO; in cxl_region_attach()
1948 if (!cxled->dpa_res) { in cxl_region_attach()
1949 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n", in cxl_region_attach()
1950 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev)); in cxl_region_attach()
1951 return -ENXIO; in cxl_region_attach()
1954 if (resource_size(cxled->dpa_res) * p->interleave_ways != in cxl_region_attach()
1955 resource_size(p->res)) { in cxl_region_attach()
1956 dev_dbg(&cxlr->dev, in cxl_region_attach()
1957 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n", in cxl_region_attach()
1958 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1959 (u64)resource_size(cxled->dpa_res), p->interleave_ways, in cxl_region_attach()
1960 (u64)resource_size(p->res)); in cxl_region_attach()
1961 return -EINVAL; in cxl_region_attach()
1966 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in cxl_region_attach()
1974 if (p->nr_targets < p->interleave_ways) in cxl_region_attach()
1986 for (i = 0; i < p->nr_targets; i++) { in cxl_region_attach()
1987 cxled = p->targets[i]; in cxl_region_attach()
1990 ep_port->host_bridge); in cxl_region_attach()
2005 p->state = CXL_CONFIG_COMMIT; in cxl_region_attach()
2019 p->targets[pos] = cxled; in cxl_region_attach()
2020 cxled->pos = pos; in cxl_region_attach()
2021 p->nr_targets++; in cxl_region_attach()
2023 if (p->nr_targets == p->interleave_ways) { in cxl_region_attach()
2027 p->state = CXL_CONFIG_ACTIVE; in cxl_region_attach()
2031 cxled->cxld.interleave_ways = p->interleave_ways; in cxl_region_attach()
2032 cxled->cxld.interleave_granularity = p->interleave_granularity; in cxl_region_attach()
2033 cxled->cxld.hpa_range = (struct range) { in cxl_region_attach()
2034 .start = p->res->start, in cxl_region_attach()
2035 .end = p->res->end, in cxl_region_attach()
2038 if (p->nr_targets != p->interleave_ways) in cxl_region_attach()
2042 * Test the auto-discovery position calculator function in cxl_region_attach()
2043 * against this successfully created user-defined region. in cxl_region_attach()
2044 * A fail message here means that this interleave config in cxl_region_attach()
2047 for (int i = 0; i < p->nr_targets; i++) { in cxl_region_attach()
2048 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_attach()
2052 dev_dbg(&cxled->cxld.dev, in cxl_region_attach()
2053 "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n", in cxl_region_attach()
2054 (test_pos == cxled->pos) ? "success" : "fail", in cxl_region_attach()
2055 test_pos, cxled->pos); in cxl_region_attach()
2064 struct cxl_region *cxlr = cxled->cxld.region; in cxl_region_detach()
2073 p = &cxlr->params; in cxl_region_detach()
2074 get_device(&cxlr->dev); in cxl_region_detach()
2076 if (p->state > CXL_CONFIG_ACTIVE) { in cxl_region_detach()
2077 cxl_region_decode_reset(cxlr, p->interleave_ways); in cxl_region_detach()
2078 p->state = CXL_CONFIG_ACTIVE; in cxl_region_detach()
2082 iter = to_cxl_port(iter->dev.parent)) in cxl_region_detach()
2085 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways || in cxl_region_detach()
2086 p->targets[cxled->pos] != cxled) { in cxl_region_detach()
2089 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n", in cxl_region_detach()
2090 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_detach()
2091 cxled->pos); in cxl_region_detach()
2095 if (p->state == CXL_CONFIG_ACTIVE) { in cxl_region_detach()
2096 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in cxl_region_detach()
2099 p->targets[cxled->pos] = NULL; in cxl_region_detach()
2100 p->nr_targets--; in cxl_region_detach()
2101 cxled->cxld.hpa_range = (struct range) { in cxl_region_detach()
2103 .end = -1, in cxl_region_detach()
2108 device_release_driver(&cxlr->dev); in cxl_region_detach()
2111 put_device(&cxlr->dev); in cxl_region_detach()
2118 cxled->mode = CXL_DECODER_DEAD; in cxl_decoder_kill_region()
2145 struct cxl_region_params *p = &cxlr->params; in detach_target()
2152 if (pos >= p->interleave_ways) { in detach_target()
2153 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in detach_target()
2154 p->interleave_ways); in detach_target()
2155 rc = -ENXIO; in detach_target()
2159 if (!p->targets[pos]) { in detach_target()
2164 rc = cxl_region_detach(p->targets[pos]); in detach_target()
2182 return -ENODEV; in store_targetN()
2185 rc = -EINVAL; in store_targetN()
2256 struct cxl_region_params *p = &cxlr->params; in cxl_region_target_visible()
2258 if (n < p->interleave_ways) in cxl_region_target_visible()
2259 return a->mode; in cxl_region_target_visible()
2284 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in cxl_region_release()
2286 int id = atomic_read(&cxlrd->region_id); in cxl_region_release()
2293 if (cxlr->id < id) in cxl_region_release()
2294 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) { in cxl_region_release()
2299 memregion_free(cxlr->id); in cxl_region_release()
2301 put_device(dev->parent); in cxl_region_release()
2313 return dev->type == &cxl_region_type; in is_cxl_region()
2319 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type, in to_cxl_region()
2329 struct cxl_region_params *p = &cxlr->params; in unregister_region()
2332 device_del(&cxlr->dev); in unregister_region()
2336 * read-only, so no need to hold the region rwsem to access the in unregister_region()
2339 for (i = 0; i < p->interleave_ways; i++) in unregister_region()
2343 put_device(&cxlr->dev); in unregister_region()
2356 return ERR_PTR(-ENOMEM); in cxl_region_alloc()
2359 dev = &cxlr->dev; in cxl_region_alloc()
2361 lockdep_set_class(&dev->mutex, &cxl_region_key); in cxl_region_alloc()
2362 dev->parent = &cxlrd->cxlsd.cxld.dev; in cxl_region_alloc()
2367 get_device(dev->parent); in cxl_region_alloc()
2369 dev->bus = &cxl_bus_type; in cxl_region_alloc()
2370 dev->type = &cxl_region_type; in cxl_region_alloc()
2371 cxlr->id = id; in cxl_region_alloc()
2382 if (cxlr->coord[i].read_bandwidth) { in cxl_region_update_coordinates()
2385 node_set_perf_attrs(nid, &cxlr->coord[i], i); in cxl_region_update_coordinates()
2397 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access0_group()); in cxl_region_update_coordinates()
2399 dev_dbg(&cxlr->dev, "Failed to update access0 group\n"); in cxl_region_update_coordinates()
2401 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access1_group()); in cxl_region_update_coordinates()
2403 dev_dbg(&cxlr->dev, "Failed to update access1 group\n"); in cxl_region_update_coordinates()
2414 int nid = mnb->status_change_nid; in cxl_region_perf_attrs_callback()
2424 region_nid = phys_to_target_node(cxlr->params.res->start); in cxl_region_perf_attrs_callback()
2447 region_nid = phys_to_target_node(cxlr->params.res->start); in cxl_region_calculate_adistance()
2451 perf = &cxlr->coord[ACCESS_COORDINATE_CPU]; in cxl_region_calculate_adistance()
2460 * devm_cxl_add_region - Adds a region to a decoder
2463 * @mode: mode for the endpoint decoders of this region
2464 * @type: select whether this is an expander or accelerator (type-2 or type-3)
2474 enum cxl_decoder_mode mode, in devm_cxl_add_region() argument
2477 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); in devm_cxl_add_region()
2485 cxlr->mode = mode; in devm_cxl_add_region()
2486 cxlr->type = type; in devm_cxl_add_region()
2488 dev = &cxlr->dev; in devm_cxl_add_region()
2497 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr); in devm_cxl_add_region()
2501 dev_dbg(port->uport_dev, "%s: created %s\n", in devm_cxl_add_region()
2502 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev)); in devm_cxl_add_region()
2512 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); in __create_region_show()
2528 enum cxl_decoder_mode mode, int id) in __create_region() argument
2532 switch (mode) { in __create_region()
2537 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); in __create_region()
2538 return ERR_PTR(-EINVAL); in __create_region()
2545 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) { in __create_region()
2547 return ERR_PTR(-EBUSY); in __create_region()
2550 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); in __create_region()
2554 size_t len, enum cxl_decoder_mode mode) in create_region_store() argument
2562 return -EINVAL; in create_region_store()
2564 cxlr = __create_region(cxlrd, mode, id); in create_region_store()
2597 if (cxld->region) in region_show()
2598 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev)); in region_show()
2610 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in cxl_find_region_by_name()
2613 region_dev = device_find_child_by_name(&cxld->dev, name); in cxl_find_region_by_name()
2615 return ERR_PTR(-ENODEV); in cxl_find_region_by_name()
2625 struct cxl_port *port = to_cxl_port(dev->parent); in delete_region_store()
2632 devm_release_action(port->uport_dev, unregister_region, cxlr); in delete_region_store()
2633 put_device(&cxlr->dev); in delete_region_store()
2644 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { in cxl_pmem_region_release()
2645 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd; in cxl_pmem_region_release()
2647 put_device(&cxlmd->dev); in cxl_pmem_region_release()
2666 return dev->type == &cxl_pmem_region_type; in is_cxl_pmem_region()
2681 enum cxl_decoder_mode mode; member
2688 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_get_poison_unmapped()
2697 * for unmapped resources based on the last decoder's mode: in cxl_get_poison_unmapped()
2702 if (ctx->mode == CXL_DECODER_RAM) { in cxl_get_poison_unmapped()
2703 offset = ctx->offset; in cxl_get_poison_unmapped()
2704 length = resource_size(&cxlds->ram_res) - offset; in cxl_get_poison_unmapped()
2706 if (rc == -EFAULT) in cxl_get_poison_unmapped()
2711 if (ctx->mode == CXL_DECODER_PMEM) { in cxl_get_poison_unmapped()
2712 offset = ctx->offset; in cxl_get_poison_unmapped()
2713 length = resource_size(&cxlds->dpa_res) - offset; in cxl_get_poison_unmapped()
2716 } else if (resource_size(&cxlds->pmem_res)) { in cxl_get_poison_unmapped()
2717 offset = cxlds->pmem_res.start; in cxl_get_poison_unmapped()
2718 length = resource_size(&cxlds->pmem_res); in cxl_get_poison_unmapped()
2738 if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) in poison_by_decoder()
2742 * Regions are only created with single mode decoders: pmem or ram. in poison_by_decoder()
2743 * Linux does not support mixed mode decoders. This means that in poison_by_decoder()
2748 if (cxled->mode == CXL_DECODER_MIXED) { in poison_by_decoder()
2749 dev_dbg(dev, "poison list read unsupported in mixed mode\n"); in poison_by_decoder()
2754 if (cxled->skip) { in poison_by_decoder()
2755 offset = cxled->dpa_res->start - cxled->skip; in poison_by_decoder()
2756 length = cxled->skip; in poison_by_decoder()
2758 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) in poison_by_decoder()
2764 offset = cxled->dpa_res->start; in poison_by_decoder()
2765 length = cxled->dpa_res->end - offset + 1; in poison_by_decoder()
2766 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region); in poison_by_decoder()
2767 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) in poison_by_decoder()
2773 if (cxled->cxld.id == ctx->port->commit_end) { in poison_by_decoder()
2774 ctx->offset = cxled->dpa_res->end + 1; in poison_by_decoder()
2775 ctx->mode = cxled->mode; in poison_by_decoder()
2791 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder); in cxl_get_poison_by_endpoint()
2793 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev), in cxl_get_poison_by_endpoint()
2809 u64 dpa = ctx->dpa; in __cxl_dpa_to_region()
2815 if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res)) in __cxl_dpa_to_region()
2818 if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) in __cxl_dpa_to_region()
2826 cxlr = cxled->cxld.region; in __cxl_dpa_to_region()
2829 dev_name(&cxlr->dev)); in __cxl_dpa_to_region()
2834 ctx->cxlr = cxlr; in __cxl_dpa_to_region()
2847 port = cxlmd->endpoint; in cxl_dpa_to_region()
2849 device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); in cxl_dpa_to_region()
2856 struct cxl_region_params *p = &cxlr->params; in cxl_is_hpa_in_chunk()
2857 int gran = p->interleave_granularity; in cxl_is_hpa_in_chunk()
2858 int ways = p->interleave_ways; in cxl_is_hpa_in_chunk()
2861 /* Is the hpa in an expected chunk for its pos(-ition) */ in cxl_is_hpa_in_chunk()
2862 offset = hpa - p->res->start; in cxl_is_hpa_in_chunk()
2867 dev_dbg(&cxlr->dev, in cxl_is_hpa_in_chunk()
2876 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_dpa_to_hpa()
2878 struct cxl_region_params *p = &cxlr->params; in cxl_dpa_to_hpa()
2884 for (int i = 0; i < p->nr_targets; i++) { in cxl_dpa_to_hpa()
2885 cxled = p->targets[i]; in cxl_dpa_to_hpa()
2892 pos = cxled->pos; in cxl_dpa_to_hpa()
2893 ways_to_eiw(p->interleave_ways, &eiw); in cxl_dpa_to_hpa()
2894 granularity_to_eig(p->interleave_granularity, &eig); in cxl_dpa_to_hpa()
2897 * The device position in the region interleave set was removed in cxl_dpa_to_hpa()
2898 * from the offset at HPA->DPA translation. To reconstruct the in cxl_dpa_to_hpa()
2901 * The placement of 'pos' in the HPA is determined by interleave in cxl_dpa_to_hpa()
2907 dpa_offset = dpa - cxl_dpa_resource_start(cxled); in cxl_dpa_to_hpa()
2917 hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); in cxl_dpa_to_hpa()
2924 hpa = hpa_offset + p->res->start; in cxl_dpa_to_hpa()
2927 if (cxlrd->hpa_to_spa) in cxl_dpa_to_hpa()
2928 hpa = cxlrd->hpa_to_spa(cxlrd, hpa); in cxl_dpa_to_hpa()
2930 if (hpa < p->res->start || hpa > p->res->end) { in cxl_dpa_to_hpa()
2931 dev_dbg(&cxlr->dev, in cxl_dpa_to_hpa()
2937 if (!cxlrd->hpa_to_spa && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos))) in cxl_dpa_to_hpa()
2947 struct cxl_region_params *p = &cxlr->params; in cxl_pmem_region_alloc()
2953 if (p->state != CXL_CONFIG_COMMIT) in cxl_pmem_region_alloc()
2954 return -ENXIO; in cxl_pmem_region_alloc()
2957 kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL); in cxl_pmem_region_alloc()
2959 return -ENOMEM; in cxl_pmem_region_alloc()
2961 cxlr_pmem->hpa_range.start = p->res->start; in cxl_pmem_region_alloc()
2962 cxlr_pmem->hpa_range.end = p->res->end; in cxl_pmem_region_alloc()
2965 cxlr_pmem->nr_mappings = p->nr_targets; in cxl_pmem_region_alloc()
2966 for (i = 0; i < p->nr_targets; i++) { in cxl_pmem_region_alloc()
2967 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_pmem_region_alloc()
2969 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; in cxl_pmem_region_alloc()
2976 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint); in cxl_pmem_region_alloc()
2978 return -ENODEV; in cxl_pmem_region_alloc()
2979 cxlr->cxl_nvb = cxl_nvb; in cxl_pmem_region_alloc()
2981 m->cxlmd = cxlmd; in cxl_pmem_region_alloc()
2982 get_device(&cxlmd->dev); in cxl_pmem_region_alloc()
2983 m->start = cxled->dpa_res->start; in cxl_pmem_region_alloc()
2984 m->size = resource_size(cxled->dpa_res); in cxl_pmem_region_alloc()
2985 m->position = i; in cxl_pmem_region_alloc()
2988 dev = &cxlr_pmem->dev; in cxl_pmem_region_alloc()
2990 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); in cxl_pmem_region_alloc()
2992 dev->parent = &cxlr->dev; in cxl_pmem_region_alloc()
2993 dev->bus = &cxl_bus_type; in cxl_pmem_region_alloc()
2994 dev->type = &cxl_pmem_region_type; in cxl_pmem_region_alloc()
2995 cxlr_pmem->cxlr = cxlr; in cxl_pmem_region_alloc()
2996 cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem); in cxl_pmem_region_alloc()
3021 return dev->type == &cxl_dax_region_type; in is_cxl_dax_region()
3037 struct cxl_region_params *p = &cxlr->params; in cxl_dax_region_alloc()
3042 if (p->state != CXL_CONFIG_COMMIT) { in cxl_dax_region_alloc()
3043 cxlr_dax = ERR_PTR(-ENXIO); in cxl_dax_region_alloc()
3049 cxlr_dax = ERR_PTR(-ENOMEM); in cxl_dax_region_alloc()
3053 cxlr_dax->hpa_range.start = p->res->start; in cxl_dax_region_alloc()
3054 cxlr_dax->hpa_range.end = p->res->end; in cxl_dax_region_alloc()
3056 dev = &cxlr_dax->dev; in cxl_dax_region_alloc()
3057 cxlr_dax->cxlr = cxlr; in cxl_dax_region_alloc()
3059 lockdep_set_class(&dev->mutex, &cxl_dax_region_key); in cxl_dax_region_alloc()
3061 dev->parent = &cxlr->dev; in cxl_dax_region_alloc()
3062 dev->bus = &cxl_bus_type; in cxl_dax_region_alloc()
3063 dev->type = &cxl_dax_region_type; in cxl_dax_region_alloc()
3073 struct cxl_region *cxlr = cxlr_pmem->cxlr; in cxlr_pmem_unregister()
3074 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; in cxlr_pmem_unregister()
3077 * Either the bridge is in ->remove() context under the device_lock(), in cxlr_pmem_unregister()
3082 device_lock_assert(&cxl_nvb->dev); in cxlr_pmem_unregister()
3083 cxlr->cxlr_pmem = NULL; in cxlr_pmem_unregister()
3084 cxlr_pmem->cxlr = NULL; in cxlr_pmem_unregister()
3085 device_unregister(&cxlr_pmem->dev); in cxlr_pmem_unregister()
3091 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; in cxlr_release_nvdimm()
3093 scoped_guard(device, &cxl_nvb->dev) { in cxlr_release_nvdimm()
3094 if (cxlr->cxlr_pmem) in cxlr_release_nvdimm()
3095 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, in cxlr_release_nvdimm()
3096 cxlr->cxlr_pmem); in cxlr_release_nvdimm()
3098 cxlr->cxl_nvb = NULL; in cxlr_release_nvdimm()
3099 put_device(&cxl_nvb->dev); in cxlr_release_nvdimm()
3103 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
3118 cxlr_pmem = cxlr->cxlr_pmem; in devm_cxl_add_pmem_region()
3119 cxl_nvb = cxlr->cxl_nvb; in devm_cxl_add_pmem_region()
3121 dev = &cxlr_pmem->dev; in devm_cxl_add_pmem_region()
3122 rc = dev_set_name(dev, "pmem_region%d", cxlr->id); in devm_cxl_add_pmem_region()
3130 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), in devm_cxl_add_pmem_region()
3133 scoped_guard(device, &cxl_nvb->dev) { in devm_cxl_add_pmem_region()
3134 if (cxl_nvb->dev.driver) in devm_cxl_add_pmem_region()
3135 rc = devm_add_action_or_reset(&cxl_nvb->dev, in devm_cxl_add_pmem_region()
3139 rc = -ENXIO; in devm_cxl_add_pmem_region()
3146 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr); in devm_cxl_add_pmem_region()
3151 put_device(&cxl_nvb->dev); in devm_cxl_add_pmem_region()
3152 cxlr->cxl_nvb = NULL; in devm_cxl_add_pmem_region()
3160 device_unregister(&cxlr_dax->dev); in cxlr_dax_unregister()
3173 dev = &cxlr_dax->dev; in devm_cxl_add_dax_region()
3174 rc = dev_set_name(dev, "dax_region%d", cxlr->id); in devm_cxl_add_dax_region()
3182 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), in devm_cxl_add_dax_region()
3185 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister, in devm_cxl_add_dax_region()
3202 r1 = &cxlrd->cxlsd.cxld.hpa_range; in match_root_decoder_by_range()
3217 p = &cxlr->params; in match_region_by_range()
3220 if (p->res && p->res->start == r->start && p->res->end == r->end) in match_region_by_range()
3233 struct range *hpa = &cxled->cxld.hpa_range; in construct_region()
3240 cxlr = __create_region(cxlrd, cxled->mode, in construct_region()
3241 atomic_read(&cxlrd->region_id)); in construct_region()
3242 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); in construct_region()
3245 dev_err(cxlmd->dev.parent, in construct_region()
3247 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in construct_region()
3253 p = &cxlr->params; in construct_region()
3254 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in construct_region()
3255 dev_err(cxlmd->dev.parent, in construct_region()
3257 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in construct_region()
3259 rc = -EBUSY; in construct_region()
3263 set_bit(CXL_REGION_F_AUTO, &cxlr->flags); in construct_region()
3267 rc = -ENOMEM; in construct_region()
3271 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa), in construct_region()
3272 dev_name(&cxlr->dev)); in construct_region()
3273 rc = insert_resource(cxlrd->res, res); in construct_region()
3276 * Platform-firmware may not have split resources like "System in construct_region()
3279 dev_warn(cxlmd->dev.parent, in construct_region()
3281 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in construct_region()
3282 __func__, dev_name(&cxlr->dev)); in construct_region()
3285 p->res = res; in construct_region()
3286 p->interleave_ways = cxled->cxld.interleave_ways; in construct_region()
3287 p->interleave_granularity = cxled->cxld.interleave_granularity; in construct_region()
3288 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in construct_region()
3290 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); in construct_region()
3294 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n", in construct_region()
3295 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__, in construct_region()
3296 dev_name(&cxlr->dev), p->res, p->interleave_ways, in construct_region()
3297 p->interleave_granularity); in construct_region()
3300 get_device(&cxlr->dev); in construct_region()
3307 devm_release_action(port->uport_dev, unregister_region, cxlr); in construct_region()
3314 struct range *hpa = &cxled->cxld.hpa_range; in cxl_add_to_region()
3315 struct cxl_decoder *cxld = &cxled->cxld; in cxl_add_to_region()
3323 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range, in cxl_add_to_region()
3326 dev_err(cxlmd->dev.parent, in cxl_add_to_region()
3328 dev_name(&cxlmd->dev), dev_name(&cxld->dev), in cxl_add_to_region()
3329 cxld->hpa_range.start, cxld->hpa_range.end); in cxl_add_to_region()
3330 return -ENXIO; in cxl_add_to_region()
3339 mutex_lock(&cxlrd->range_lock); in cxl_add_to_region()
3340 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa, in cxl_add_to_region()
3344 region_dev = &cxlr->dev; in cxl_add_to_region()
3347 mutex_unlock(&cxlrd->range_lock); in cxl_add_to_region()
3353 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE); in cxl_add_to_region()
3356 p = &cxlr->params; in cxl_add_to_region()
3357 attach = p->state == CXL_CONFIG_COMMIT; in cxl_add_to_region()
3363 * the platform-firmware memory map, otherwise the driver for in cxl_add_to_region()
3366 if (device_attach(&cxlr->dev) < 0) in cxl_add_to_region()
3367 dev_err(&cxlr->dev, "failed to enable, range: %pr\n", in cxl_add_to_region()
3368 p->res); in cxl_add_to_region()
3381 struct cxl_region_params *p = &cxlr->params; in is_system_ram()
3383 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res); in is_system_ram()
3391 unregister_memory_notifier(&cxlr->memory_notifier); in shutdown_notifiers()
3392 unregister_mt_adistance_algorithm(&cxlr->adist_notifier); in shutdown_notifiers()
3398 struct cxl_region_params *p = &cxlr->params; in cxl_region_probe()
3403 dev_dbg(&cxlr->dev, "probe interrupted\n"); in cxl_region_probe()
3407 if (p->state < CXL_CONFIG_COMMIT) { in cxl_region_probe()
3408 dev_dbg(&cxlr->dev, "config state: %d\n", p->state); in cxl_region_probe()
3409 rc = -ENXIO; in cxl_region_probe()
3413 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) { in cxl_region_probe()
3414 dev_err(&cxlr->dev, in cxl_region_probe()
3415 "failed to activate, re-commit region and retry\n"); in cxl_region_probe()
3416 rc = -ENXIO; in cxl_region_probe()
3430 cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback; in cxl_region_probe()
3431 cxlr->memory_notifier.priority = CXL_CALLBACK_PRI; in cxl_region_probe()
3432 register_memory_notifier(&cxlr->memory_notifier); in cxl_region_probe()
3434 cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance; in cxl_region_probe()
3435 cxlr->adist_notifier.priority = 100; in cxl_region_probe()
3436 register_mt_adistance_algorithm(&cxlr->adist_notifier); in cxl_region_probe()
3438 rc = devm_add_action_or_reset(&cxlr->dev, shutdown_notifiers, cxlr); in cxl_region_probe()
3442 switch (cxlr->mode) { in cxl_region_probe()
3452 p->res->start, p->res->end, cxlr, in cxl_region_probe()
3457 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n", in cxl_region_probe()
3458 cxlr->mode); in cxl_region_probe()
3459 return -ENXIO; in cxl_region_probe()