1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021-2024 NVIDIA CORPORATION & AFFILIATES. */
3 
4 #define dev_fmt(fmt) "tegra241_cmdqv: " fmt
5 
6 #include <linux/acpi.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/iommu.h>
11 #include <linux/iopoll.h>
12 
13 #include <acpi/acpixf.h>
14 
15 #include "arm-smmu-v3.h"
16 
17 /* CMDQV register page base and size defines */
18 #define TEGRA241_CMDQV_CONFIG_BASE	(0)
19 #define TEGRA241_CMDQV_CONFIG_SIZE	(SZ_64K)
20 #define TEGRA241_VCMDQ_PAGE0_BASE	(TEGRA241_CMDQV_CONFIG_BASE + SZ_64K)
21 #define TEGRA241_VCMDQ_PAGE1_BASE	(TEGRA241_VCMDQ_PAGE0_BASE + SZ_64K)
22 #define TEGRA241_VINTF_PAGE_BASE	(TEGRA241_VCMDQ_PAGE1_BASE + SZ_64K)
23 
24 /* CMDQV global base regs */
25 #define TEGRA241_CMDQV_CONFIG		0x0000
26 #define  CMDQV_EN			BIT(0)
27 
28 #define TEGRA241_CMDQV_PARAM		0x0004
29 #define  CMDQV_NUM_VINTF_LOG2		GENMASK(11, 8)
30 #define  CMDQV_NUM_VCMDQ_LOG2		GENMASK(7, 4)
31 
32 #define TEGRA241_CMDQV_STATUS		0x0008
33 #define  CMDQV_ENABLED			BIT(0)
34 
35 #define TEGRA241_CMDQV_VINTF_ERR_MAP	0x0014
36 #define TEGRA241_CMDQV_VINTF_INT_MASK	0x001C
37 #define TEGRA241_CMDQV_CMDQ_ERR_MAP(m)  (0x0024 + 0x4*(m))
38 
39 #define TEGRA241_CMDQV_CMDQ_ALLOC(q)	(0x0200 + 0x4*(q))
40 #define  CMDQV_CMDQ_ALLOC_VINTF		GENMASK(20, 15)
41 #define  CMDQV_CMDQ_ALLOC_LVCMDQ	GENMASK(7, 1)
42 #define  CMDQV_CMDQ_ALLOCATED		BIT(0)
43 
44 /* VINTF base regs */
45 #define TEGRA241_VINTF(v)		(0x1000 + 0x100*(v))
46 
47 #define TEGRA241_VINTF_CONFIG		0x0000
48 #define  VINTF_HYP_OWN			BIT(17)
49 #define  VINTF_VMID			GENMASK(16, 1)
50 #define  VINTF_EN			BIT(0)
51 
52 #define TEGRA241_VINTF_STATUS		0x0004
53 #define  VINTF_STATUS			GENMASK(3, 1)
54 #define  VINTF_ENABLED			BIT(0)
55 
56 #define TEGRA241_VINTF_LVCMDQ_ERR_MAP_64(m) \
57 					(0x00C0 + 0x8*(m))
58 #define  LVCMDQ_ERR_MAP_NUM_64		2
59 
60 /* VCMDQ base regs */
61 /* -- PAGE0 -- */
62 #define TEGRA241_VCMDQ_PAGE0(q)		(TEGRA241_VCMDQ_PAGE0_BASE + 0x80*(q))
63 
64 #define TEGRA241_VCMDQ_CONS		0x00000
65 #define  VCMDQ_CONS_ERR			GENMASK(30, 24)
66 
67 #define TEGRA241_VCMDQ_PROD		0x00004
68 
69 #define TEGRA241_VCMDQ_CONFIG		0x00008
70 #define  VCMDQ_EN			BIT(0)
71 
72 #define TEGRA241_VCMDQ_STATUS		0x0000C
73 #define  VCMDQ_ENABLED			BIT(0)
74 
75 #define TEGRA241_VCMDQ_GERROR		0x00010
76 #define TEGRA241_VCMDQ_GERRORN		0x00014
77 
78 /* -- PAGE1 -- */
79 #define TEGRA241_VCMDQ_PAGE1(q)		(TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
80 #define  VCMDQ_ADDR			GENMASK(47, 5)
81 #define  VCMDQ_LOG2SIZE			GENMASK(4, 0)
82 
83 #define TEGRA241_VCMDQ_BASE		0x00000
84 #define TEGRA241_VCMDQ_CONS_INDX_BASE	0x00008
85 
86 /* VINTF logical-VCMDQ pages */
87 #define TEGRA241_VINTFi_PAGE0(i)	(TEGRA241_VINTF_PAGE_BASE + SZ_128K*(i))
88 #define TEGRA241_VINTFi_PAGE1(i)	(TEGRA241_VINTFi_PAGE0(i) + SZ_64K)
89 #define TEGRA241_VINTFi_LVCMDQ_PAGE0(i, q) \
90 					(TEGRA241_VINTFi_PAGE0(i) + 0x80*(q))
91 #define TEGRA241_VINTFi_LVCMDQ_PAGE1(i, q) \
92 					(TEGRA241_VINTFi_PAGE1(i) + 0x80*(q))
93 
94 /* MMIO helpers */
95 #define REG_CMDQV(_cmdqv, _regname) \
96 	((_cmdqv)->base + TEGRA241_CMDQV_##_regname)
97 #define REG_VINTF(_vintf, _regname) \
98 	((_vintf)->base + TEGRA241_VINTF_##_regname)
99 #define REG_VCMDQ_PAGE0(_vcmdq, _regname) \
100 	((_vcmdq)->page0 + TEGRA241_VCMDQ_##_regname)
101 #define REG_VCMDQ_PAGE1(_vcmdq, _regname) \
102 	((_vcmdq)->page1 + TEGRA241_VCMDQ_##_regname)
103 
104 
105 static bool disable_cmdqv;
106 module_param(disable_cmdqv, bool, 0444);
107 MODULE_PARM_DESC(disable_cmdqv,
108 	"This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
109 
110 static bool bypass_vcmdq;
111 module_param(bypass_vcmdq, bool, 0444);
112 MODULE_PARM_DESC(bypass_vcmdq,
113 	"This allows to bypass VCMDQ for debugging use or perf comparison.");
114 
115 /**
116  * struct tegra241_vcmdq - Virtual Command Queue
117  * @idx: Global index in the CMDQV
118  * @lidx: Local index in the VINTF
119  * @enabled: Enable status
120  * @cmdqv: Parent CMDQV pointer
121  * @vintf: Parent VINTF pointer
122  * @cmdq: Command Queue struct
123  * @page0: MMIO Page0 base address
124  * @page1: MMIO Page1 base address
125  */
126 struct tegra241_vcmdq {
127 	u16 idx;
128 	u16 lidx;
129 
130 	bool enabled;
131 
132 	struct tegra241_cmdqv *cmdqv;
133 	struct tegra241_vintf *vintf;
134 	struct arm_smmu_cmdq cmdq;
135 
136 	void __iomem *page0;
137 	void __iomem *page1;
138 };
139 
140 /**
141  * struct tegra241_vintf - Virtual Interface
142  * @idx: Global index in the CMDQV
143  * @enabled: Enable status
144  * @hyp_own: Owned by hypervisor (in-kernel)
145  * @cmdqv: Parent CMDQV pointer
146  * @lvcmdqs: List of logical VCMDQ pointers
147  * @base: MMIO base address
148  */
149 struct tegra241_vintf {
150 	u16 idx;
151 
152 	bool enabled;
153 	bool hyp_own;
154 
155 	struct tegra241_cmdqv *cmdqv;
156 	struct tegra241_vcmdq **lvcmdqs;
157 
158 	void __iomem *base;
159 };
160 
161 /**
162  * struct tegra241_cmdqv - CMDQ-V for SMMUv3
163  * @smmu: SMMUv3 device
164  * @dev: CMDQV device
165  * @base: MMIO base address
166  * @irq: IRQ number
167  * @num_vintfs: Total number of VINTFs
168  * @num_vcmdqs: Total number of VCMDQs
169  * @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF
170  * @vintf_ids: VINTF id allocator
171  * @vintfs: List of VINTFs
172  */
173 struct tegra241_cmdqv {
174 	struct arm_smmu_device smmu;
175 	struct device *dev;
176 
177 	void __iomem *base;
178 	int irq;
179 
180 	/* CMDQV Hardware Params */
181 	u16 num_vintfs;
182 	u16 num_vcmdqs;
183 	u16 num_lvcmdqs_per_vintf;
184 
185 	struct ida vintf_ids;
186 
187 	struct tegra241_vintf **vintfs;
188 };
189 
190 /* Config and Polling Helpers */
191 
tegra241_cmdqv_write_config(struct tegra241_cmdqv * cmdqv,void __iomem * addr_config,void __iomem * addr_status,u32 regval,const char * header,bool * out_enabled)192 static inline int tegra241_cmdqv_write_config(struct tegra241_cmdqv *cmdqv,
193 					      void __iomem *addr_config,
194 					      void __iomem *addr_status,
195 					      u32 regval, const char *header,
196 					      bool *out_enabled)
197 {
198 	bool en = regval & BIT(0);
199 	int ret;
200 
201 	writel(regval, addr_config);
202 	ret = readl_poll_timeout(addr_status, regval,
203 				 en ? regval & BIT(0) : !(regval & BIT(0)),
204 				 1, ARM_SMMU_POLL_TIMEOUT_US);
205 	if (ret)
206 		dev_err(cmdqv->dev, "%sfailed to %sable, STATUS=0x%08X\n",
207 			header, en ? "en" : "dis", regval);
208 	if (out_enabled)
209 		WRITE_ONCE(*out_enabled, regval & BIT(0));
210 	return ret;
211 }
212 
cmdqv_write_config(struct tegra241_cmdqv * cmdqv,u32 regval)213 static inline int cmdqv_write_config(struct tegra241_cmdqv *cmdqv, u32 regval)
214 {
215 	return tegra241_cmdqv_write_config(cmdqv,
216 					   REG_CMDQV(cmdqv, CONFIG),
217 					   REG_CMDQV(cmdqv, STATUS),
218 					   regval, "CMDQV: ", NULL);
219 }
220 
vintf_write_config(struct tegra241_vintf * vintf,u32 regval)221 static inline int vintf_write_config(struct tegra241_vintf *vintf, u32 regval)
222 {
223 	char header[16];
224 
225 	snprintf(header, 16, "VINTF%u: ", vintf->idx);
226 	return tegra241_cmdqv_write_config(vintf->cmdqv,
227 					   REG_VINTF(vintf, CONFIG),
228 					   REG_VINTF(vintf, STATUS),
229 					   regval, header, &vintf->enabled);
230 }
231 
lvcmdq_error_header(struct tegra241_vcmdq * vcmdq,char * header,int hlen)232 static inline char *lvcmdq_error_header(struct tegra241_vcmdq *vcmdq,
233 					char *header, int hlen)
234 {
235 	WARN_ON(hlen < 64);
236 	if (WARN_ON(!vcmdq->vintf))
237 		return "";
238 	snprintf(header, hlen, "VINTF%u: VCMDQ%u/LVCMDQ%u: ",
239 		 vcmdq->vintf->idx, vcmdq->idx, vcmdq->lidx);
240 	return header;
241 }
242 
vcmdq_write_config(struct tegra241_vcmdq * vcmdq,u32 regval)243 static inline int vcmdq_write_config(struct tegra241_vcmdq *vcmdq, u32 regval)
244 {
245 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
246 
247 	return tegra241_cmdqv_write_config(vcmdq->cmdqv,
248 					   REG_VCMDQ_PAGE0(vcmdq, CONFIG),
249 					   REG_VCMDQ_PAGE0(vcmdq, STATUS),
250 					   regval, h, &vcmdq->enabled);
251 }
252 
253 /* ISR Functions */
254 
tegra241_vintf0_handle_error(struct tegra241_vintf * vintf)255 static void tegra241_vintf0_handle_error(struct tegra241_vintf *vintf)
256 {
257 	int i;
258 
259 	for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
260 		u64 map = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
261 
262 		while (map) {
263 			unsigned long lidx = __ffs64(map);
264 			struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
265 			u32 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
266 
267 			__arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq);
268 			writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
269 			map &= ~BIT_ULL(lidx);
270 		}
271 	}
272 }
273 
tegra241_cmdqv_isr(int irq,void * devid)274 static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid)
275 {
276 	struct tegra241_cmdqv *cmdqv = (struct tegra241_cmdqv *)devid;
277 	void __iomem *reg_vintf_map = REG_CMDQV(cmdqv, VINTF_ERR_MAP);
278 	char err_str[256];
279 	u64 vintf_map;
280 
281 	/* Use readl_relaxed() as register addresses are not 64-bit aligned */
282 	vintf_map = (u64)readl_relaxed(reg_vintf_map + 0x4) << 32 |
283 		    (u64)readl_relaxed(reg_vintf_map);
284 
285 	snprintf(err_str, sizeof(err_str),
286 		 "vintf_map: %016llx, vcmdq_map %08x:%08x:%08x:%08x", vintf_map,
287 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(3))),
288 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(2))),
289 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(1))),
290 		 readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(0))));
291 
292 	dev_warn(cmdqv->dev, "unexpected error reported. %s\n", err_str);
293 
294 	/* Handle VINTF0 and its LVCMDQs */
295 	if (vintf_map & BIT_ULL(0)) {
296 		tegra241_vintf0_handle_error(cmdqv->vintfs[0]);
297 		vintf_map &= ~BIT_ULL(0);
298 	}
299 
300 	return IRQ_HANDLED;
301 }
302 
303 /* Command Queue Function */
304 
tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent * ent)305 static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent)
306 {
307 	switch (ent->opcode) {
308 	case CMDQ_OP_TLBI_NH_ASID:
309 	case CMDQ_OP_TLBI_NH_VA:
310 	case CMDQ_OP_ATC_INV:
311 		return true;
312 	default:
313 		return false;
314 	}
315 }
316 
317 static struct arm_smmu_cmdq *
tegra241_cmdqv_get_cmdq(struct arm_smmu_device * smmu,struct arm_smmu_cmdq_ent * ent)318 tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
319 			struct arm_smmu_cmdq_ent *ent)
320 {
321 	struct tegra241_cmdqv *cmdqv =
322 		container_of(smmu, struct tegra241_cmdqv, smmu);
323 	struct tegra241_vintf *vintf = cmdqv->vintfs[0];
324 	struct tegra241_vcmdq *vcmdq;
325 	u16 lidx;
326 
327 	if (READ_ONCE(bypass_vcmdq))
328 		return NULL;
329 
330 	/* Use SMMU CMDQ if VINTF0 is uninitialized */
331 	if (!READ_ONCE(vintf->enabled))
332 		return NULL;
333 
334 	/*
335 	 * Select a LVCMDQ to use. Here we use a temporal solution to
336 	 * balance out traffic on cmdq issuing: each cmdq has its own
337 	 * lock, if all cpus issue cmdlist using the same cmdq, only
338 	 * one CPU at a time can enter the process, while the others
339 	 * will be spinning at the same lock.
340 	 */
341 	lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
342 	vcmdq = vintf->lvcmdqs[lidx];
343 	if (!vcmdq || !READ_ONCE(vcmdq->enabled))
344 		return NULL;
345 
346 	/* Unsupported CMD goes for smmu->cmdq pathway */
347 	if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent))
348 		return NULL;
349 	return &vcmdq->cmdq;
350 }
351 
352 /* HW Reset Functions */
353 
tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq * vcmdq)354 static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
355 {
356 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
357 	u32 gerrorn, gerror;
358 
359 	if (vcmdq_write_config(vcmdq, 0)) {
360 		dev_err(vcmdq->cmdqv->dev,
361 			"%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
362 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
363 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
364 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
365 	}
366 	writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, PROD));
367 	writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, CONS));
368 	writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, BASE));
369 	writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, CONS_INDX_BASE));
370 
371 	gerrorn = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN));
372 	gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
373 	if (gerror != gerrorn) {
374 		dev_warn(vcmdq->cmdqv->dev,
375 			 "%suncleared error detected, resetting\n", h);
376 		writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
377 	}
378 
379 	dev_dbg(vcmdq->cmdqv->dev, "%sdeinited\n", h);
380 }
381 
tegra241_vcmdq_hw_init(struct tegra241_vcmdq * vcmdq)382 static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq)
383 {
384 	char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
385 	int ret;
386 
387 	/* Reset VCMDQ */
388 	tegra241_vcmdq_hw_deinit(vcmdq);
389 
390 	/* Configure and enable VCMDQ */
391 	writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
392 
393 	ret = vcmdq_write_config(vcmdq, VCMDQ_EN);
394 	if (ret) {
395 		dev_err(vcmdq->cmdqv->dev,
396 			"%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
397 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
398 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
399 			readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
400 		return ret;
401 	}
402 
403 	dev_dbg(vcmdq->cmdqv->dev, "%sinited\n", h);
404 	return 0;
405 }
406 
tegra241_vintf_hw_deinit(struct tegra241_vintf * vintf)407 static void tegra241_vintf_hw_deinit(struct tegra241_vintf *vintf)
408 {
409 	u16 lidx;
410 
411 	for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
412 		if (vintf->lvcmdqs && vintf->lvcmdqs[lidx])
413 			tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
414 	vintf_write_config(vintf, 0);
415 }
416 
tegra241_vintf_hw_init(struct tegra241_vintf * vintf,bool hyp_own)417 static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
418 {
419 	u32 regval;
420 	u16 lidx;
421 	int ret;
422 
423 	/* Reset VINTF */
424 	tegra241_vintf_hw_deinit(vintf);
425 
426 	/* Configure and enable VINTF */
427 	/*
428 	 * Note that HYP_OWN bit is wired to zero when running in guest kernel,
429 	 * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
430 	 * restricted set of supported commands.
431 	 */
432 	regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own);
433 	writel(regval, REG_VINTF(vintf, CONFIG));
434 
435 	ret = vintf_write_config(vintf, regval | VINTF_EN);
436 	if (ret)
437 		return ret;
438 	/*
439 	 * As being mentioned above, HYP_OWN bit is wired to zero for a guest
440 	 * kernel, so read it back from HW to ensure that reflects in hyp_own
441 	 */
442 	vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
443 
444 	for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
445 		if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
446 			ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]);
447 			if (ret) {
448 				tegra241_vintf_hw_deinit(vintf);
449 				return ret;
450 			}
451 		}
452 	}
453 
454 	return 0;
455 }
456 
tegra241_cmdqv_hw_reset(struct arm_smmu_device * smmu)457 static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
458 {
459 	struct tegra241_cmdqv *cmdqv =
460 		container_of(smmu, struct tegra241_cmdqv, smmu);
461 	u16 qidx, lidx, idx;
462 	u32 regval;
463 	int ret;
464 
465 	/* Reset CMDQV */
466 	regval = readl_relaxed(REG_CMDQV(cmdqv, CONFIG));
467 	ret = cmdqv_write_config(cmdqv, regval & ~CMDQV_EN);
468 	if (ret)
469 		return ret;
470 	ret = cmdqv_write_config(cmdqv, regval | CMDQV_EN);
471 	if (ret)
472 		return ret;
473 
474 	/* Assign preallocated global VCMDQs to each VINTF as LVCMDQs */
475 	for (idx = 0, qidx = 0; idx < cmdqv->num_vintfs; idx++) {
476 		for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
477 			regval  = FIELD_PREP(CMDQV_CMDQ_ALLOC_VINTF, idx);
478 			regval |= FIELD_PREP(CMDQV_CMDQ_ALLOC_LVCMDQ, lidx);
479 			regval |= CMDQV_CMDQ_ALLOCATED;
480 			writel_relaxed(regval,
481 				       REG_CMDQV(cmdqv, CMDQ_ALLOC(qidx++)));
482 		}
483 	}
484 
485 	return tegra241_vintf_hw_init(cmdqv->vintfs[0], true);
486 }
487 
488 /* VCMDQ Resource Helpers */
489 
tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq * vcmdq)490 static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
491 {
492 	struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
493 	struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
494 	struct arm_smmu_queue *q = &cmdq->q;
495 	char name[16];
496 	u32 regval;
497 	int ret;
498 
499 	snprintf(name, 16, "vcmdq%u", vcmdq->idx);
500 
501 	/* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */
502 	regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
503 	q->llq.max_n_shift =
504 		min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, regval));
505 
506 	/* Use the common helper to init the VCMDQ, and then... */
507 	ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
508 				      TEGRA241_VCMDQ_PROD, TEGRA241_VCMDQ_CONS,
509 				      CMDQ_ENT_DWORDS, name);
510 	if (ret)
511 		return ret;
512 
513 	/* ...override q_base to write VCMDQ_BASE registers */
514 	q->q_base = q->base_dma & VCMDQ_ADDR;
515 	q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
516 
517 	if (!vcmdq->vintf->hyp_own)
518 		cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd;
519 
520 	return arm_smmu_cmdq_init(smmu, cmdq);
521 }
522 
523 /* VINTF Logical VCMDQ Resource Helpers */
524 
tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)525 static void tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
526 {
527 	vintf->lvcmdqs[lidx] = NULL;
528 }
529 
tegra241_vintf_init_lvcmdq(struct tegra241_vintf * vintf,u16 lidx,struct tegra241_vcmdq * vcmdq)530 static int tegra241_vintf_init_lvcmdq(struct tegra241_vintf *vintf, u16 lidx,
531 				      struct tegra241_vcmdq *vcmdq)
532 {
533 	struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
534 	u16 idx = vintf->idx;
535 
536 	vcmdq->idx = idx * cmdqv->num_lvcmdqs_per_vintf + lidx;
537 	vcmdq->lidx = lidx;
538 	vcmdq->cmdqv = cmdqv;
539 	vcmdq->vintf = vintf;
540 	vcmdq->page0 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE0(idx, lidx);
541 	vcmdq->page1 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE1(idx, lidx);
542 
543 	vintf->lvcmdqs[lidx] = vcmdq;
544 	return 0;
545 }
546 
tegra241_vintf_free_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)547 static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
548 {
549 	struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
550 	char header[64];
551 
552 	/* Note that the lvcmdq queue memory space is managed by devres */
553 
554 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
555 
556 	dev_dbg(vintf->cmdqv->dev,
557 		"%sdeallocated\n", lvcmdq_error_header(vcmdq, header, 64));
558 	kfree(vcmdq);
559 }
560 
561 static struct tegra241_vcmdq *
tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)562 tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
563 {
564 	struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
565 	struct tegra241_vcmdq *vcmdq;
566 	char header[64];
567 	int ret;
568 
569 	vcmdq = kzalloc(sizeof(*vcmdq), GFP_KERNEL);
570 	if (!vcmdq)
571 		return ERR_PTR(-ENOMEM);
572 
573 	ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq);
574 	if (ret)
575 		goto free_vcmdq;
576 
577 	/* Build an arm_smmu_cmdq for each LVCMDQ */
578 	ret = tegra241_vcmdq_alloc_smmu_cmdq(vcmdq);
579 	if (ret)
580 		goto deinit_lvcmdq;
581 
582 	dev_dbg(cmdqv->dev,
583 		"%sallocated\n", lvcmdq_error_header(vcmdq, header, 64));
584 	return vcmdq;
585 
586 deinit_lvcmdq:
587 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
588 free_vcmdq:
589 	kfree(vcmdq);
590 	return ERR_PTR(ret);
591 }
592 
593 /* VINTF Resource Helpers */
594 
tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv * cmdqv,u16 idx)595 static void tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
596 {
597 	kfree(cmdqv->vintfs[idx]->lvcmdqs);
598 	ida_free(&cmdqv->vintf_ids, idx);
599 	cmdqv->vintfs[idx] = NULL;
600 }
601 
tegra241_cmdqv_init_vintf(struct tegra241_cmdqv * cmdqv,u16 max_idx,struct tegra241_vintf * vintf)602 static int tegra241_cmdqv_init_vintf(struct tegra241_cmdqv *cmdqv, u16 max_idx,
603 				     struct tegra241_vintf *vintf)
604 {
605 
606 	u16 idx;
607 	int ret;
608 
609 	ret = ida_alloc_max(&cmdqv->vintf_ids, max_idx, GFP_KERNEL);
610 	if (ret < 0)
611 		return ret;
612 	idx = ret;
613 
614 	vintf->idx = idx;
615 	vintf->cmdqv = cmdqv;
616 	vintf->base = cmdqv->base + TEGRA241_VINTF(idx);
617 
618 	vintf->lvcmdqs = kcalloc(cmdqv->num_lvcmdqs_per_vintf,
619 				 sizeof(*vintf->lvcmdqs), GFP_KERNEL);
620 	if (!vintf->lvcmdqs) {
621 		ida_free(&cmdqv->vintf_ids, idx);
622 		return -ENOMEM;
623 	}
624 
625 	cmdqv->vintfs[idx] = vintf;
626 	return ret;
627 }
628 
629 /* Remove Helpers */
630 
tegra241_vintf_remove_lvcmdq(struct tegra241_vintf * vintf,u16 lidx)631 static void tegra241_vintf_remove_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
632 {
633 	tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
634 	tegra241_vintf_free_lvcmdq(vintf, lidx);
635 }
636 
tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv * cmdqv,u16 idx)637 static void tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
638 {
639 	struct tegra241_vintf *vintf = cmdqv->vintfs[idx];
640 	u16 lidx;
641 
642 	/* Remove LVCMDQ resources */
643 	for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
644 		if (vintf->lvcmdqs[lidx])
645 			tegra241_vintf_remove_lvcmdq(vintf, lidx);
646 
647 	/* Remove VINTF resources */
648 	tegra241_vintf_hw_deinit(vintf);
649 
650 	dev_dbg(cmdqv->dev, "VINTF%u: deallocated\n", vintf->idx);
651 	tegra241_cmdqv_deinit_vintf(cmdqv, idx);
652 	kfree(vintf);
653 }
654 
tegra241_cmdqv_remove(struct arm_smmu_device * smmu)655 static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu)
656 {
657 	struct tegra241_cmdqv *cmdqv =
658 		container_of(smmu, struct tegra241_cmdqv, smmu);
659 	u16 idx;
660 
661 	/* Remove VINTF resources */
662 	for (idx = 0; idx < cmdqv->num_vintfs; idx++) {
663 		if (cmdqv->vintfs[idx]) {
664 			/* Only vintf0 should remain at this stage */
665 			WARN_ON(idx > 0);
666 			tegra241_cmdqv_remove_vintf(cmdqv, idx);
667 		}
668 	}
669 
670 	/* Remove cmdqv resources */
671 	ida_destroy(&cmdqv->vintf_ids);
672 
673 	if (cmdqv->irq > 0)
674 		free_irq(cmdqv->irq, cmdqv);
675 	iounmap(cmdqv->base);
676 	kfree(cmdqv->vintfs);
677 	put_device(cmdqv->dev); /* smmu->impl_dev */
678 }
679 
680 static struct arm_smmu_impl_ops tegra241_cmdqv_impl_ops = {
681 	.get_secondary_cmdq = tegra241_cmdqv_get_cmdq,
682 	.device_reset = tegra241_cmdqv_hw_reset,
683 	.device_remove = tegra241_cmdqv_remove,
684 };
685 
686 /* Probe Functions */
687 
tegra241_cmdqv_acpi_is_memory(struct acpi_resource * res,void * data)688 static int tegra241_cmdqv_acpi_is_memory(struct acpi_resource *res, void *data)
689 {
690 	struct resource_win win;
691 
692 	return !acpi_dev_resource_address_space(res, &win);
693 }
694 
tegra241_cmdqv_acpi_get_irqs(struct acpi_resource * ares,void * data)695 static int tegra241_cmdqv_acpi_get_irqs(struct acpi_resource *ares, void *data)
696 {
697 	struct resource r;
698 	int *irq = data;
699 
700 	if (*irq <= 0 && acpi_dev_resource_interrupt(ares, 0, &r))
701 		*irq = r.start;
702 	return 1; /* No need to add resource to the list */
703 }
704 
705 static struct resource *
tegra241_cmdqv_find_acpi_resource(struct device * dev,int * irq)706 tegra241_cmdqv_find_acpi_resource(struct device *dev, int *irq)
707 {
708 	struct acpi_device *adev = to_acpi_device(dev);
709 	struct list_head resource_list;
710 	struct resource_entry *rentry;
711 	struct resource *res = NULL;
712 	int ret;
713 
714 	INIT_LIST_HEAD(&resource_list);
715 	ret = acpi_dev_get_resources(adev, &resource_list,
716 				     tegra241_cmdqv_acpi_is_memory, NULL);
717 	if (ret < 0) {
718 		dev_err(dev, "failed to get memory resource: %d\n", ret);
719 		return NULL;
720 	}
721 
722 	rentry = list_first_entry_or_null(&resource_list,
723 					  struct resource_entry, node);
724 	if (!rentry) {
725 		dev_err(dev, "failed to get memory resource entry\n");
726 		goto free_list;
727 	}
728 
729 	/* Caller must free the res */
730 	res = kzalloc(sizeof(*res), GFP_KERNEL);
731 	if (!res)
732 		goto free_list;
733 
734 	*res = *rentry->res;
735 
736 	acpi_dev_free_resource_list(&resource_list);
737 
738 	INIT_LIST_HEAD(&resource_list);
739 
740 	if (irq)
741 		ret = acpi_dev_get_resources(adev, &resource_list,
742 					     tegra241_cmdqv_acpi_get_irqs, irq);
743 	if (ret < 0 || !irq || *irq <= 0)
744 		dev_warn(dev, "no interrupt. errors will not be reported\n");
745 
746 free_list:
747 	acpi_dev_free_resource_list(&resource_list);
748 	return res;
749 }
750 
tegra241_cmdqv_init_structures(struct arm_smmu_device * smmu)751 static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
752 {
753 	struct tegra241_cmdqv *cmdqv =
754 		container_of(smmu, struct tegra241_cmdqv, smmu);
755 	struct tegra241_vintf *vintf;
756 	int lidx;
757 	int ret;
758 
759 	vintf = kzalloc(sizeof(*vintf), GFP_KERNEL);
760 	if (!vintf)
761 		return -ENOMEM;
762 
763 	/* Init VINTF0 for in-kernel use */
764 	ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
765 	if (ret) {
766 		dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
767 		return ret;
768 	}
769 
770 	/* Preallocate logical VCMDQs to VINTF0 */
771 	for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
772 		struct tegra241_vcmdq *vcmdq;
773 
774 		vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
775 		if (IS_ERR(vcmdq))
776 			return PTR_ERR(vcmdq);
777 	}
778 
779 	/* Now, we are ready to run all the impl ops */
780 	smmu->impl_ops = &tegra241_cmdqv_impl_ops;
781 	return 0;
782 }
783 
784 #ifdef CONFIG_IOMMU_DEBUGFS
785 static struct dentry *cmdqv_debugfs_dir;
786 #endif
787 
788 static struct arm_smmu_device *
__tegra241_cmdqv_probe(struct arm_smmu_device * smmu,struct resource * res,int irq)789 __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
790 		       int irq)
791 {
792 	static const struct arm_smmu_impl_ops init_ops = {
793 		.init_structures = tegra241_cmdqv_init_structures,
794 		.device_remove = tegra241_cmdqv_remove,
795 	};
796 	struct tegra241_cmdqv *cmdqv = NULL;
797 	struct arm_smmu_device *new_smmu;
798 	void __iomem *base;
799 	u32 regval;
800 	int ret;
801 
802 	static_assert(offsetof(struct tegra241_cmdqv, smmu) == 0);
803 
804 	base = ioremap(res->start, resource_size(res));
805 	if (!base) {
806 		dev_err(smmu->dev, "failed to ioremap\n");
807 		return NULL;
808 	}
809 
810 	regval = readl(base + TEGRA241_CMDQV_CONFIG);
811 	if (disable_cmdqv) {
812 		dev_info(smmu->dev, "Detected disable_cmdqv=true\n");
813 		writel(regval & ~CMDQV_EN, base + TEGRA241_CMDQV_CONFIG);
814 		goto iounmap;
815 	}
816 
817 	cmdqv = devm_krealloc(smmu->dev, smmu, sizeof(*cmdqv), GFP_KERNEL);
818 	if (!cmdqv)
819 		goto iounmap;
820 	new_smmu = &cmdqv->smmu;
821 
822 	cmdqv->irq = irq;
823 	cmdqv->base = base;
824 	cmdqv->dev = smmu->impl_dev;
825 
826 	if (cmdqv->irq > 0) {
827 		ret = request_irq(irq, tegra241_cmdqv_isr, 0, "tegra241-cmdqv",
828 				  cmdqv);
829 		if (ret) {
830 			dev_err(cmdqv->dev, "failed to request irq (%d): %d\n",
831 				cmdqv->irq, ret);
832 			goto iounmap;
833 		}
834 	}
835 
836 	regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM));
837 	cmdqv->num_vintfs = 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2, regval);
838 	cmdqv->num_vcmdqs = 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2, regval);
839 	cmdqv->num_lvcmdqs_per_vintf = cmdqv->num_vcmdqs / cmdqv->num_vintfs;
840 
841 	cmdqv->vintfs =
842 		kcalloc(cmdqv->num_vintfs, sizeof(*cmdqv->vintfs), GFP_KERNEL);
843 	if (!cmdqv->vintfs)
844 		goto free_irq;
845 
846 	ida_init(&cmdqv->vintf_ids);
847 
848 #ifdef CONFIG_IOMMU_DEBUGFS
849 	if (!cmdqv_debugfs_dir) {
850 		cmdqv_debugfs_dir =
851 			debugfs_create_dir("tegra241_cmdqv", iommu_debugfs_dir);
852 		debugfs_create_bool("bypass_vcmdq", 0644, cmdqv_debugfs_dir,
853 				    &bypass_vcmdq);
854 	}
855 #endif
856 
857 	/* Provide init-level ops only, until tegra241_cmdqv_init_structures */
858 	new_smmu->impl_ops = &init_ops;
859 
860 	return new_smmu;
861 
862 free_irq:
863 	if (cmdqv->irq > 0)
864 		free_irq(cmdqv->irq, cmdqv);
865 iounmap:
866 	iounmap(base);
867 	return NULL;
868 }
869 
tegra241_cmdqv_probe(struct arm_smmu_device * smmu)870 struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
871 {
872 	struct arm_smmu_device *new_smmu;
873 	struct resource *res = NULL;
874 	int irq;
875 
876 	if (!smmu->dev->of_node)
877 		res = tegra241_cmdqv_find_acpi_resource(smmu->impl_dev, &irq);
878 	if (!res)
879 		goto out_fallback;
880 
881 	new_smmu = __tegra241_cmdqv_probe(smmu, res, irq);
882 	kfree(res);
883 
884 	if (new_smmu)
885 		return new_smmu;
886 
887 out_fallback:
888 	dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
889 	smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
890 	put_device(smmu->impl_dev);
891 	return ERR_PTR(-ENODEV);
892 }
893