Lines Matching full:mmio

33 		tile->mmio.regs = NULL;  in tiles_fini()
37 * On multi-tile devices, partition the BAR space for MMIO on each tile,
48 * | tile1->mmio.regs |
52 * | tile0->mmio.regs |
70 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in mmio_multi_tile_setup() local
75 * Although the per-tile mmio regs are not yet initialized, this in mmio_multi_tile_setup()
76 * is fine as it's going to the root tile's mmio, that's in mmio_multi_tile_setup()
79 mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR); in mmio_multi_tile_setup()
97 regs = xe->mmio.regs; in mmio_multi_tile_setup()
99 tile->mmio.regs_size = SZ_4M; in mmio_multi_tile_setup()
100 tile->mmio.regs = regs; in mmio_multi_tile_setup()
101 tile->mmio.tile = tile; in mmio_multi_tile_setup()
107 * On top of all the multi-tile MMIO space there can be a platform-dependent
118 * | mmio.regs |
134 regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count; in mmio_extension_setup()
159 pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs); in mmio_fini()
160 xe->mmio.regs = NULL; in mmio_fini()
161 root_tile->mmio.regs = NULL; in mmio_fini()
174 xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR); in xe_mmio_init()
175 xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0); in xe_mmio_init()
176 if (xe->mmio.regs == NULL) { in xe_mmio_init()
182 root_tile->mmio.regs_size = SZ_4M; in xe_mmio_init()
183 root_tile->mmio.regs = xe->mmio.regs; in xe_mmio_init()
184 root_tile->mmio.tile = root_tile; in xe_mmio_init()
189 static void mmio_flush_pending_writes(struct xe_mmio *mmio) in mmio_flush_pending_writes() argument
194 if (mmio->tile->xe->info.platform != XE_LUNARLAKE) in mmio_flush_pending_writes()
199 writel(0, mmio->regs + DUMMY_REG_OFFSET); in mmio_flush_pending_writes()
202 u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg) in xe_mmio_read8() argument
204 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_read8()
208 mmio_flush_pending_writes(mmio); in xe_mmio_read8()
210 val = readb(mmio->regs + addr); in xe_mmio_read8()
211 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); in xe_mmio_read8()
216 u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg) in xe_mmio_read16() argument
218 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_read16()
222 mmio_flush_pending_writes(mmio); in xe_mmio_read16()
224 val = readw(mmio->regs + addr); in xe_mmio_read16()
225 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); in xe_mmio_read16()
230 void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val) in xe_mmio_write32() argument
232 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_write32()
234 trace_xe_reg_rw(mmio, true, addr, val, sizeof(val)); in xe_mmio_write32()
236 if (!reg.vf && mmio->sriov_vf_gt) in xe_mmio_write32()
237 xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val); in xe_mmio_write32()
239 writel(val, mmio->regs + addr); in xe_mmio_write32()
242 u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg) in xe_mmio_read32() argument
244 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_read32()
248 mmio_flush_pending_writes(mmio); in xe_mmio_read32()
250 if (!reg.vf && mmio->sriov_vf_gt) in xe_mmio_read32()
251 val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg); in xe_mmio_read32()
253 val = readl(mmio->regs + addr); in xe_mmio_read32()
255 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); in xe_mmio_read32()
260 u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set) in xe_mmio_rmw32() argument
264 old = xe_mmio_read32(mmio, reg); in xe_mmio_rmw32()
266 xe_mmio_write32(mmio, reg, reg_val); in xe_mmio_rmw32()
271 int xe_mmio_write32_and_verify(struct xe_mmio *mmio, in xe_mmio_write32_and_verify() argument
276 xe_mmio_write32(mmio, reg, val); in xe_mmio_write32_and_verify()
277 reg_val = xe_mmio_read32(mmio, reg); in xe_mmio_write32_and_verify()
282 bool xe_mmio_in_range(const struct xe_mmio *mmio, in xe_mmio_in_range() argument
286 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_in_range()
293 * @mmio: MMIO target
313 u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg) in xe_mmio_read64_2x32() argument
318 reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_read64_2x32()
319 reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr); in xe_mmio_read64_2x32()
322 xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4); in xe_mmio_read64_2x32()
324 oldudw = xe_mmio_read32(mmio, reg_udw); in xe_mmio_read64_2x32()
326 ldw = xe_mmio_read32(mmio, reg); in xe_mmio_read64_2x32()
327 udw = xe_mmio_read32(mmio, reg_udw); in xe_mmio_read64_2x32()
335 drm_WARN(&mmio->tile->xe->drm, retries == 0, in xe_mmio_read64_2x32()
341 static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout… in __xe_mmio_wait32() argument
352 read = xe_mmio_read32(mmio, reg); in __xe_mmio_wait32()
378 read = xe_mmio_read32(mmio, reg); in __xe_mmio_wait32()
396 * @mmio: MMIO target
413 int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, in xe_mmio_wait32() argument
416 return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true); in xe_mmio_wait32()
421 * @mmio: MMIO target
432 int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, in xe_mmio_wait32_not() argument
435 return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false); in xe_mmio_wait32_not()