Lines Matching +full:non +full:- +full:masked

1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021-2023 Intel Corporation
9 #include <linux/io-64-nonatomic-lo-hi.h>
33 tile->mmio.regs = NULL; in tiles_fini()
37 * On multi-tile devices, partition the BAR space for MMIO on each tile,
43 * .----------------------. <- tile_count * tile_mmio_size
45 * |----------------------| <- 2 * tile_mmio_size
47 * |----------------------| <- 1 * tile_mmio_size + 4MB
48 * | tile1->mmio.regs |
49 * |----------------------| <- 1 * tile_mmio_size
51 * |----------------------| <- 4MB
52 * | tile0->mmio.regs |
53 * '----------------------' <- 0MB
63 * entire BAR mapped - see xe_mmio_init() in mmio_multi_tile_setup()
65 if (xe->info.tile_count == 1) in mmio_multi_tile_setup()
69 if (!xe->info.skip_mtcfg) { in mmio_multi_tile_setup()
75 * Although the per-tile mmio regs are not yet initialized, this in mmio_multi_tile_setup()
82 if (tile_count < xe->info.tile_count) { in mmio_multi_tile_setup()
83 drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n", in mmio_multi_tile_setup()
84 xe->info.tile_count, tile_count); in mmio_multi_tile_setup()
85 xe->info.tile_count = tile_count; in mmio_multi_tile_setup()
89 * should be impossible with multi-tile for now: in mmio_multi_tile_setup()
90 * multi-tile platform with standalone media doesn't in mmio_multi_tile_setup()
93 xe->info.gt_count = xe->info.tile_count; in mmio_multi_tile_setup()
97 regs = xe->mmio.regs; in mmio_multi_tile_setup()
99 tile->mmio.regs_size = SZ_4M; in mmio_multi_tile_setup()
100 tile->mmio.regs = regs; in mmio_multi_tile_setup()
101 tile->mmio.tile = tile; in mmio_multi_tile_setup()
107 * On top of all the multi-tile MMIO space there can be a platform-dependent
110 * .----------------------. <- ext_base + tile_count * tile_mmio_ext_size
112 * |----------------------| <- ext_base + 2 * tile_mmio_ext_size
113 * | tile1->mmio_ext.regs |
114 * |----------------------| <- ext_base + 1 * tile_mmio_ext_size
115 * | tile0->mmio_ext.regs |
116 * |======================| <- ext_base = tile_count * tile_mmio_size
120 * '----------------------' <- 0MB
122 * Set up the tile[]->mmio_ext pointers/sizes.
131 if (!xe->info.has_mmio_ext) in mmio_extension_setup()
134 regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count; in mmio_extension_setup()
136 tile->mmio_ext.regs_size = tile_mmio_ext_size; in mmio_extension_setup()
137 tile->mmio_ext.regs = regs; in mmio_extension_setup()
138 tile->mmio_ext.tile = tile; in mmio_extension_setup()
146 size_t tile_mmio_ext_size = xe->info.tile_mmio_ext_size; in xe_mmio_probe_tiles()
151 return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe); in xe_mmio_probe_tiles()
159 pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs); in mmio_fini()
160 xe->mmio.regs = NULL; in mmio_fini()
161 root_tile->mmio.regs = NULL; in mmio_fini()
167 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_mmio_init()
172 * registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB). in xe_mmio_init()
174 xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR); in xe_mmio_init()
175 xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0); in xe_mmio_init()
176 if (xe->mmio.regs == NULL) { in xe_mmio_init()
177 drm_err(&xe->drm, "failed to map registers\n"); in xe_mmio_init()
178 return -EIO; in xe_mmio_init()
182 root_tile->mmio.regs_size = SZ_4M; in xe_mmio_init()
183 root_tile->mmio.regs = xe->mmio.regs; in xe_mmio_init()
184 root_tile->mmio.tile = root_tile; in xe_mmio_init()
186 return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe); in xe_mmio_init()
194 if (mmio->tile->xe->info.platform != XE_LUNARLAKE) in mmio_flush_pending_writes()
199 writel(0, mmio->regs + DUMMY_REG_OFFSET); in mmio_flush_pending_writes()
210 val = readb(mmio->regs + addr); in xe_mmio_read8()
224 val = readw(mmio->regs + addr); in xe_mmio_read16()
236 if (!reg.vf && mmio->sriov_vf_gt) in xe_mmio_write32()
237 xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val); in xe_mmio_write32()
239 writel(val, mmio->regs + addr); in xe_mmio_write32()
250 if (!reg.vf && mmio->sriov_vf_gt) in xe_mmio_read32()
251 val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg); in xe_mmio_read32()
253 val = readl(mmio->regs + addr); in xe_mmio_read32()
279 return (reg_val & mask) != eval ? -EINVAL : 0; in xe_mmio_write32_and_verify()
288 return range && addr >= range->start && addr <= range->end; in xe_mmio_in_range()
292 * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
296 * Although Intel GPUs have some 64-bit registers, the hardware officially
299 * spec shouldn't be relied upon and all 64-bit register reads should be
300 * performed as two 32-bit reads of the upper and lower dwords.
303 * counters), a rollover of the lower dword between the two 32-bit reads
305 * stabilized before returning the 64-bit value.
307 * Note that because this function may re-read the register multiple times
311 * Returns the value of the 64-bit register.
322 xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4); in xe_mmio_read64_2x32()
325 for (retries = 5; retries; --retries) { in xe_mmio_read64_2x32()
335 drm_WARN(&mmio->tile->xe->drm, retries == 0, in xe_mmio_read64_2x32()
336 "64-bit read of %#x did not stabilize\n", reg.addr); in xe_mmio_read64_2x32()
346 int ret = -ETIMEDOUT; in __xe_mmio_wait32()
395 * xe_mmio_wait32() - Wait for a register to match the desired masked value
405 * This function polls for the desired masked value and returns zero on success
406 * or -ETIMEDOUT if timed out.
410 * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
420 * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value