1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2024 Intel Corporation */
3
4 #include <linux/bitfield.h>
5 #include <linux/delay.h>
6 #include <linux/overflow.h>
7 #include <linux/regmap.h>
8 #include <linux/scatterlist.h>
9
10 #include "intel-thc-dev.h"
11 #include "intel-thc-dma.h"
12 #include "intel-thc-hw.h"
13
dma_set_prd_base_addr(struct thc_device * dev,u64 physical_addr,struct thc_dma_configuration * dma_config)14 static void dma_set_prd_base_addr(struct thc_device *dev, u64 physical_addr,
15 struct thc_dma_configuration *dma_config)
16 {
17 u32 addr_high, addr_low;
18
19 if (!dma_config->is_enabled)
20 return;
21
22 addr_high = upper_32_bits(physical_addr);
23 addr_low = lower_32_bits(physical_addr);
24
25 regmap_write(dev->thc_regmap, dma_config->prd_base_addr_high, addr_high);
26 regmap_write(dev->thc_regmap, dma_config->prd_base_addr_low, addr_low);
27 }
28
dma_set_start_bit(struct thc_device * dev,struct thc_dma_configuration * dma_config)29 static void dma_set_start_bit(struct thc_device *dev,
30 struct thc_dma_configuration *dma_config)
31 {
32 u32 ctrl, mask, mbits, data, offset;
33
34 if (!dma_config->is_enabled)
35 return;
36
37 switch (dma_config->dma_channel) {
38 case THC_RXDMA1:
39 case THC_RXDMA2:
40 if (dma_config->dma_channel == THC_RXDMA2) {
41 mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL,
42 THC_BITMASK_INTERRUPT_TYPE_DATA);
43 mask = THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL;
44 regmap_write_bits(dev->thc_regmap,
45 THC_M_PRT_DEVINT_CFG_1_OFFSET, mask, mbits);
46 }
47
48 mbits = THC_M_PRT_READ_DMA_CNTRL_IE_EOF |
49 THC_M_PRT_READ_DMA_CNTRL_SOO |
50 THC_M_PRT_READ_DMA_CNTRL_IE_STALL |
51 THC_M_PRT_READ_DMA_CNTRL_IE_ERROR |
52 THC_M_PRT_READ_DMA_CNTRL_START;
53
54 mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits;
55 mask |= THC_M_PRT_READ_DMA_CNTRL_INT_SW_DMA_EN;
56 ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits;
57 offset = dma_config->dma_channel == THC_RXDMA1 ?
58 THC_M_PRT_READ_DMA_CNTRL_1_OFFSET : THC_M_PRT_READ_DMA_CNTRL_2_OFFSET;
59 regmap_write_bits(dev->thc_regmap, offset, mask, ctrl);
60 break;
61
62 case THC_SWDMA:
63 mbits = THC_M_PRT_READ_DMA_CNTRL_IE_DMACPL |
64 THC_M_PRT_READ_DMA_CNTRL_IE_IOC |
65 THC_M_PRT_READ_DMA_CNTRL_SOO |
66 THC_M_PRT_READ_DMA_CNTRL_START;
67
68 mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits;
69 ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits;
70 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
71 mask, ctrl);
72 break;
73
74 case THC_TXDMA:
75 regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET,
76 THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS,
77 THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS);
78
79 /* Select interrupt or polling method upon Write completion */
80 if (dev->dma_ctx->use_write_interrupts)
81 data = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL;
82 else
83 data = 0;
84
85 data |= THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START;
86 mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL |
87 THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START;
88 regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
89 mask, data);
90 break;
91
92 default:
93 break;
94 }
95 }
96
dma_set_prd_control(struct thc_device * dev,u8 entry_count,u8 cb_depth,struct thc_dma_configuration * dma_config)97 static void dma_set_prd_control(struct thc_device *dev, u8 entry_count, u8 cb_depth,
98 struct thc_dma_configuration *dma_config)
99 {
100 u32 ctrl, mask;
101
102 if (!dma_config->is_enabled)
103 return;
104
105 if (dma_config->dma_channel == THC_TXDMA) {
106 mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC;
107 ctrl = FIELD_PREP(THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC, entry_count);
108 } else {
109 mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD;
110 ctrl = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PTEC, entry_count) |
111 FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PCD, cb_depth);
112 }
113
114 regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, ctrl);
115 }
116
dma_clear_prd_control(struct thc_device * dev,struct thc_dma_configuration * dma_config)117 static void dma_clear_prd_control(struct thc_device *dev,
118 struct thc_dma_configuration *dma_config)
119 {
120 u32 mask;
121
122 if (!dma_config->is_enabled)
123 return;
124
125 if (dma_config->dma_channel == THC_TXDMA)
126 mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC;
127 else
128 mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD;
129
130 regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, 0);
131 }
132
dma_get_read_pointer(struct thc_device * dev,struct thc_dma_configuration * dma_config)133 static u8 dma_get_read_pointer(struct thc_device *dev,
134 struct thc_dma_configuration *dma_config)
135 {
136 u32 ctrl, read_pointer;
137
138 regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl);
139 read_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCRP, ctrl);
140
141 dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCRP 0x%x\n",
142 ctrl, dma_config->dma_cntrl, read_pointer);
143
144 return read_pointer;
145 }
146
dma_get_write_pointer(struct thc_device * dev,struct thc_dma_configuration * dma_config)147 static u8 dma_get_write_pointer(struct thc_device *dev,
148 struct thc_dma_configuration *dma_config)
149 {
150 u32 ctrl, write_pointer;
151
152 regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl);
153 write_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCWP, ctrl);
154
155 dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCWP 0x%x\n",
156 ctrl, dma_config->dma_cntrl, write_pointer);
157
158 return write_pointer;
159 }
160
dma_set_write_pointer(struct thc_device * dev,u8 value,struct thc_dma_configuration * dma_config)161 static void dma_set_write_pointer(struct thc_device *dev, u8 value,
162 struct thc_dma_configuration *dma_config)
163 {
164 u32 ctrl, mask;
165
166 mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP;
167 ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, value);
168 regmap_write_bits(dev->thc_regmap, dma_config->dma_cntrl, mask, ctrl);
169 }
170
dma_get_max_packet_size(struct thc_device * dev,struct thc_dma_configuration * dma_config)171 static size_t dma_get_max_packet_size(struct thc_device *dev,
172 struct thc_dma_configuration *dma_config)
173 {
174 return dma_config->max_packet_size;
175 }
176
dma_set_max_packet_size(struct thc_device * dev,size_t size,struct thc_dma_configuration * dma_config)177 static void dma_set_max_packet_size(struct thc_device *dev, size_t size,
178 struct thc_dma_configuration *dma_config)
179 {
180 if (size) {
181 dma_config->max_packet_size = ALIGN(size, SZ_4K);
182 dma_config->is_enabled = true;
183 }
184 }
185
thc_copy_one_sgl_to_prd(struct thc_device * dev,struct thc_dma_configuration * config,unsigned int ind)186 static void thc_copy_one_sgl_to_prd(struct thc_device *dev,
187 struct thc_dma_configuration *config,
188 unsigned int ind)
189 {
190 struct thc_prd_table *prd_tbl;
191 struct scatterlist *sg;
192 int j;
193
194 prd_tbl = &config->prd_tbls[ind];
195
196 for_each_sg(config->sgls[ind], sg, config->sgls_nent[ind], j) {
197 prd_tbl->entries[j].dest_addr =
198 sg_dma_address(sg) >> THC_ADDRESS_SHIFT;
199 prd_tbl->entries[j].len = sg_dma_len(sg);
200 prd_tbl->entries[j].hw_status = 0;
201 prd_tbl->entries[j].end_of_prd = 0;
202 }
203
204 /* Set the end_of_prd flag in the last filled entry */
205 if (j > 0)
206 prd_tbl->entries[j - 1].end_of_prd = 1;
207 }
208
thc_copy_sgls_to_prd(struct thc_device * dev,struct thc_dma_configuration * config)209 static void thc_copy_sgls_to_prd(struct thc_device *dev,
210 struct thc_dma_configuration *config)
211 {
212 unsigned int i;
213
214 memset(config->prd_tbls, 0, array_size(PRD_TABLE_SIZE, config->prd_tbl_num));
215
216 for (i = 0; i < config->prd_tbl_num; i++)
217 thc_copy_one_sgl_to_prd(dev, config, i);
218 }
219
setup_dma_buffers(struct thc_device * dev,struct thc_dma_configuration * config,enum dma_data_direction dir)220 static int setup_dma_buffers(struct thc_device *dev,
221 struct thc_dma_configuration *config,
222 enum dma_data_direction dir)
223 {
224 size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num);
225 unsigned int i, nent = PRD_ENTRIES_NUM;
226 dma_addr_t dma_handle;
227 void *cpu_addr;
228 size_t buf_sz;
229 int count;
230
231 if (!config->is_enabled)
232 return 0;
233
234 memset(config->sgls, 0, sizeof(config->sgls));
235 memset(config->sgls_nent, 0, sizeof(config->sgls_nent));
236
237 cpu_addr = dma_alloc_coherent(dev->dev, prd_tbls_size,
238 &dma_handle, GFP_KERNEL);
239 if (!cpu_addr)
240 return -ENOMEM;
241
242 config->prd_tbls = cpu_addr;
243 config->prd_tbls_dma_handle = dma_handle;
244
245 buf_sz = dma_get_max_packet_size(dev, config);
246
247 /* Allocate and map the scatter-gather lists, one for each PRD table */
248 for (i = 0; i < config->prd_tbl_num; i++) {
249 config->sgls[i] = sgl_alloc(buf_sz, GFP_KERNEL, &nent);
250 if (!config->sgls[i] || nent > PRD_ENTRIES_NUM) {
251 dev_err_once(dev->dev, "sgl_alloc (%uth) failed, nent %u\n",
252 i, nent);
253 return -ENOMEM;
254 }
255 count = dma_map_sg(dev->dev, config->sgls[i], nent, dir);
256
257 config->sgls_nent[i] = count;
258 }
259
260 thc_copy_sgls_to_prd(dev, config);
261
262 return 0;
263 }
264
thc_reset_dma_settings(struct thc_device * dev)265 static void thc_reset_dma_settings(struct thc_device *dev)
266 {
267 /* Stop all DMA channels and reset DMA read pointers */
268 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
269 THC_M_PRT_READ_DMA_CNTRL_START, 0);
270 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
271 THC_M_PRT_READ_DMA_CNTRL_START, 0);
272 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
273 THC_M_PRT_READ_DMA_CNTRL_START, 0);
274 regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
275 THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START, 0);
276
277 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
278 THC_M_PRT_READ_DMA_CNTRL_TPCPR,
279 THC_M_PRT_READ_DMA_CNTRL_TPCPR);
280 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
281 THC_M_PRT_READ_DMA_CNTRL_TPCPR,
282 THC_M_PRT_READ_DMA_CNTRL_TPCPR);
283 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
284 THC_M_PRT_READ_DMA_CNTRL_TPCPR,
285 THC_M_PRT_READ_DMA_CNTRL_TPCPR);
286 }
287
release_dma_buffers(struct thc_device * dev,struct thc_dma_configuration * config)288 static void release_dma_buffers(struct thc_device *dev,
289 struct thc_dma_configuration *config)
290 {
291 size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num);
292 unsigned int i;
293
294 if (!config->is_enabled)
295 return;
296
297 for (i = 0; i < config->prd_tbl_num; i++) {
298 if (!config->sgls[i] | !config->sgls_nent[i])
299 continue;
300
301 dma_unmap_sg(dev->dev, config->sgls[i],
302 config->sgls_nent[i],
303 config->dir);
304
305 sgl_free(config->sgls[i]);
306 config->sgls[i] = NULL;
307 }
308
309 memset(config->prd_tbls, 0, prd_tbls_size);
310
311 if (config->prd_tbls) {
312 dma_free_coherent(dev->dev, prd_tbls_size, config->prd_tbls,
313 config->prd_tbls_dma_handle);
314 config->prd_tbls = NULL;
315 config->prd_tbls_dma_handle = 0;
316 }
317 }
318
thc_dma_init(struct thc_device * dev)319 struct thc_dma_context *thc_dma_init(struct thc_device *dev)
320 {
321 struct thc_dma_context *dma_ctx;
322
323 dma_ctx = devm_kzalloc(dev->dev, sizeof(*dma_ctx), GFP_KERNEL);
324 if (!dma_ctx)
325 return NULL;
326
327 dev->dma_ctx = dma_ctx;
328
329 dma_ctx->dma_config[THC_RXDMA1].dma_channel = THC_RXDMA1;
330 dma_ctx->dma_config[THC_RXDMA2].dma_channel = THC_RXDMA2;
331 dma_ctx->dma_config[THC_TXDMA].dma_channel = THC_TXDMA;
332 dma_ctx->dma_config[THC_SWDMA].dma_channel = THC_SWDMA;
333
334 dma_ctx->dma_config[THC_RXDMA1].dir = DMA_FROM_DEVICE;
335 dma_ctx->dma_config[THC_RXDMA2].dir = DMA_FROM_DEVICE;
336 dma_ctx->dma_config[THC_TXDMA].dir = DMA_TO_DEVICE;
337 dma_ctx->dma_config[THC_SWDMA].dir = DMA_FROM_DEVICE;
338
339 dma_ctx->dma_config[THC_RXDMA1].prd_tbl_num = PRD_TABLES_NUM;
340 dma_ctx->dma_config[THC_RXDMA2].prd_tbl_num = PRD_TABLES_NUM;
341 dma_ctx->dma_config[THC_TXDMA].prd_tbl_num = 1;
342 dma_ctx->dma_config[THC_SWDMA].prd_tbl_num = 1;
343
344 dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_1_OFFSET;
345 dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_2_OFFSET;
346 dma_ctx->dma_config[THC_TXDMA].prd_base_addr_high = THC_M_PRT_WPRD_BA_HI_OFFSET;
347 dma_ctx->dma_config[THC_SWDMA].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_SW_OFFSET;
348
349 dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_1_OFFSET;
350 dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_2_OFFSET;
351 dma_ctx->dma_config[THC_TXDMA].prd_base_addr_low = THC_M_PRT_WPRD_BA_LOW_OFFSET;
352 dma_ctx->dma_config[THC_SWDMA].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_SW_OFFSET;
353
354 dma_ctx->dma_config[THC_RXDMA1].prd_cntrl = THC_M_PRT_RPRD_CNTRL_1_OFFSET;
355 dma_ctx->dma_config[THC_RXDMA2].prd_cntrl = THC_M_PRT_RPRD_CNTRL_2_OFFSET;
356 dma_ctx->dma_config[THC_TXDMA].prd_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET;
357 dma_ctx->dma_config[THC_SWDMA].prd_cntrl = THC_M_PRT_RPRD_CNTRL_SW_OFFSET;
358
359 dma_ctx->dma_config[THC_RXDMA1].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_1_OFFSET;
360 dma_ctx->dma_config[THC_RXDMA2].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_2_OFFSET;
361 dma_ctx->dma_config[THC_TXDMA].dma_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET;
362 dma_ctx->dma_config[THC_SWDMA].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET;
363
364 /* Enable write DMA completion interrupt by default */
365 dma_ctx->use_write_interrupts = 1;
366
367 return dma_ctx;
368 }
369
370 /**
371 * thc_dma_set_max_packet_sizes - Set max packet sizes for all DMA engines
372 *
373 * @dev: The pointer of THC private device context
374 * @mps_read1: RxDMA1 max packet size
375 * @mps_read2: RxDMA2 max packet size
376 * @mps_write: TxDMA max packet size
377 * @mps_swdma: Software DMA max packet size
378 *
379 * If mps is not 0, it means the corresponding DMA channel is used, then set
380 * the flag to turn on this channel.
381 *
382 * Return: 0 on success, other error codes on failed.
383 */
thc_dma_set_max_packet_sizes(struct thc_device * dev,size_t mps_read1,size_t mps_read2,size_t mps_write,size_t mps_swdma)384 int thc_dma_set_max_packet_sizes(struct thc_device *dev, size_t mps_read1,
385 size_t mps_read2, size_t mps_write,
386 size_t mps_swdma)
387 {
388 if (!dev->dma_ctx) {
389 dev_err_once(dev->dev,
390 "Cannot set max packet sizes because DMA context is NULL!\n");
391 return -EINVAL;
392 }
393
394 dma_set_max_packet_size(dev, mps_read1, &dev->dma_ctx->dma_config[THC_RXDMA1]);
395 dma_set_max_packet_size(dev, mps_read2, &dev->dma_ctx->dma_config[THC_RXDMA2]);
396 dma_set_max_packet_size(dev, mps_write, &dev->dma_ctx->dma_config[THC_TXDMA]);
397 dma_set_max_packet_size(dev, mps_swdma, &dev->dma_ctx->dma_config[THC_SWDMA]);
398
399 return 0;
400 }
401 EXPORT_SYMBOL_NS_GPL(thc_dma_set_max_packet_sizes, "INTEL_THC");
402
403 /**
404 * thc_dma_allocate - Allocate DMA buffers for all DMA engines
405 *
406 * @dev: The pointer of THC private device context
407 *
408 * Return: 0 on success, other error codes on failed.
409 */
thc_dma_allocate(struct thc_device * dev)410 int thc_dma_allocate(struct thc_device *dev)
411 {
412 int ret, chan;
413
414 for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
415 ret = setup_dma_buffers(dev, &dev->dma_ctx->dma_config[chan],
416 dev->dma_ctx->dma_config[chan].dir);
417 if (ret < 0) {
418 dev_err_once(dev->dev, "DMA setup failed for DMA channel %d\n", chan);
419 goto release_bufs;
420 }
421 }
422
423 return 0;
424
425 release_bufs:
426 while (chan--)
427 release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]);
428
429 return ret;
430 }
431 EXPORT_SYMBOL_NS_GPL(thc_dma_allocate, "INTEL_THC");
432
433 /**
434 * thc_dma_release - Release DMA buffers for all DMA engines
435 *
436 * @dev: The pointer of THC private device context
437 */
thc_dma_release(struct thc_device * dev)438 void thc_dma_release(struct thc_device *dev)
439 {
440 int chan;
441
442 for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++)
443 release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]);
444 }
445 EXPORT_SYMBOL_NS_GPL(thc_dma_release, "INTEL_THC");
446
calc_prd_entries_num(struct thc_prd_table * prd_tbl,size_t mes_len,u8 * nent)447 static int calc_prd_entries_num(struct thc_prd_table *prd_tbl,
448 size_t mes_len, u8 *nent)
449 {
450 *nent = DIV_ROUND_UP(mes_len, THC_MIN_BYTES_PER_SG_LIST_ENTRY);
451 if (*nent > PRD_ENTRIES_NUM)
452 return -EMSGSIZE;
453
454 return 0;
455 }
456
calc_message_len(struct thc_prd_table * prd_tbl,u8 * nent)457 static size_t calc_message_len(struct thc_prd_table *prd_tbl, u8 *nent)
458 {
459 size_t mes_len = 0;
460 unsigned int j;
461
462 for (j = 0; j < PRD_ENTRIES_NUM; j++) {
463 mes_len += prd_tbl->entries[j].len;
464 if (prd_tbl->entries[j].end_of_prd)
465 break;
466 }
467
468 *nent = j + 1;
469
470 return mes_len;
471 }
472
473 /**
474 * thc_dma_configure - Configure DMA settings for all DMA engines
475 *
476 * @dev: The pointer of THC private device context
477 *
478 * Return: 0 on success, other error codes on failed.
479 */
thc_dma_configure(struct thc_device * dev)480 int thc_dma_configure(struct thc_device *dev)
481 {
482 struct thc_dma_context *dma_ctx = dev->dma_ctx;
483 int chan;
484
485 thc_reset_dma_settings(dev);
486
487 if (!dma_ctx) {
488 dev_err_once(dev->dev, "Cannot do DMA configure because DMA context is NULL\n");
489 return -EINVAL;
490 }
491
492 for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
493 dma_set_prd_base_addr(dev,
494 dma_ctx->dma_config[chan].prd_tbls_dma_handle,
495 &dma_ctx->dma_config[chan]);
496
497 dma_set_prd_control(dev, PRD_ENTRIES_NUM - 1,
498 dma_ctx->dma_config[chan].prd_tbl_num - 1,
499 &dma_ctx->dma_config[chan]);
500 }
501
502 /* Start read2 DMA engine */
503 dma_set_start_bit(dev, &dma_ctx->dma_config[THC_RXDMA2]);
504
505 dev_dbg(dev->dev, "DMA configured successfully!\n");
506
507 return 0;
508 }
509 EXPORT_SYMBOL_NS_GPL(thc_dma_configure, "INTEL_THC");
510
511 /**
512 * thc_dma_unconfigure - Unconfigure DMA settings for all DMA engines
513 *
514 * @dev: The pointer of THC private device context
515 */
thc_dma_unconfigure(struct thc_device * dev)516 void thc_dma_unconfigure(struct thc_device *dev)
517 {
518 int chan;
519
520 for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
521 dma_set_prd_base_addr(dev, 0, &dev->dma_ctx->dma_config[chan]);
522 dma_clear_prd_control(dev, &dev->dma_ctx->dma_config[chan]);
523 }
524
525 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
526 THC_M_PRT_READ_DMA_CNTRL_START, 0);
527
528 regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
529 THC_M_PRT_READ_DMA_CNTRL_START, 0);
530 }
531 EXPORT_SYMBOL_NS_GPL(thc_dma_unconfigure, "INTEL_THC");
532
thc_wait_for_dma_pause(struct thc_device * dev,enum thc_dma_channel channel)533 static int thc_wait_for_dma_pause(struct thc_device *dev, enum thc_dma_channel channel)
534 {
535 u32 ctrl_reg, sts_reg, sts;
536 int ret;
537
538 ctrl_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_CNTRL_1_OFFSET :
539 ((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_CNTRL_2_OFFSET :
540 THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET);
541
542 regmap_write_bits(dev->thc_regmap, ctrl_reg, THC_M_PRT_READ_DMA_CNTRL_START, 0);
543
544 sts_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_INT_STS_1_OFFSET :
545 ((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_INT_STS_2_OFFSET :
546 THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET);
547
548 ret = regmap_read_poll_timeout(dev->thc_regmap, sts_reg, sts,
549 !(sts & THC_M_PRT_READ_DMA_INT_STS_ACTIVE),
550 THC_DEFAULT_RXDMA_POLLING_US_INTERVAL,
551 THC_DEFAULT_RXDMA_POLLING_US_TIMEOUT);
552
553 if (ret) {
554 dev_err_once(dev->dev,
555 "Timeout while waiting for DMA %d stop\n", channel);
556 return ret;
557 }
558
559 return 0;
560 }
561
read_dma_buffer(struct thc_device * dev,struct thc_dma_configuration * read_config,u8 prd_table_index,void * read_buff)562 static int read_dma_buffer(struct thc_device *dev,
563 struct thc_dma_configuration *read_config,
564 u8 prd_table_index, void *read_buff)
565 {
566 struct thc_prd_table *prd_tbl;
567 struct scatterlist *sg;
568 size_t mes_len, ret;
569 u8 nent;
570
571 if (prd_table_index >= read_config->prd_tbl_num) {
572 dev_err_once(dev->dev, "PRD table index %d too big\n", prd_table_index);
573 return -EINVAL;
574 }
575
576 prd_tbl = &read_config->prd_tbls[prd_table_index];
577 mes_len = calc_message_len(prd_tbl, &nent);
578 if (mes_len > read_config->max_packet_size) {
579 dev_err(dev->dev,
580 "Message length %zu is bigger than buffer length %lu\n",
581 mes_len, read_config->max_packet_size);
582 return -EMSGSIZE;
583 }
584
585 sg = read_config->sgls[prd_table_index];
586 ret = sg_copy_to_buffer(sg, nent, read_buff, mes_len);
587 if (ret != mes_len) {
588 dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n",
589 ret, mes_len);
590 return -EIO;
591 }
592
593 return mes_len;
594 }
595
update_write_pointer(struct thc_device * dev,struct thc_dma_configuration * read_config)596 static void update_write_pointer(struct thc_device *dev,
597 struct thc_dma_configuration *read_config)
598 {
599 u8 write_ptr = dma_get_write_pointer(dev, read_config);
600
601 if (write_ptr + 1 == THC_WRAPAROUND_VALUE_ODD)
602 dma_set_write_pointer(dev, THC_POINTER_WRAPAROUND, read_config);
603 else if (write_ptr + 1 == THC_WRAPAROUND_VALUE_EVEN)
604 dma_set_write_pointer(dev, 0, read_config);
605 else
606 dma_set_write_pointer(dev, write_ptr + 1, read_config);
607 }
608
is_dma_buf_empty(struct thc_device * dev,struct thc_dma_configuration * read_config,u8 * read_ptr,u8 * write_ptr)609 static int is_dma_buf_empty(struct thc_device *dev,
610 struct thc_dma_configuration *read_config,
611 u8 *read_ptr, u8 *write_ptr)
612 {
613 *read_ptr = dma_get_read_pointer(dev, read_config);
614 *write_ptr = dma_get_write_pointer(dev, read_config);
615
616 if ((*read_ptr & THC_POINTER_MASK) == (*write_ptr & THC_POINTER_MASK))
617 if (*read_ptr != *write_ptr)
618 return true;
619
620 return false;
621 }
622
thc_dma_read(struct thc_device * dev,struct thc_dma_configuration * read_config,void * read_buff,size_t * read_len,int * read_finished)623 static int thc_dma_read(struct thc_device *dev,
624 struct thc_dma_configuration *read_config,
625 void *read_buff, size_t *read_len, int *read_finished)
626 {
627 u8 read_ptr, write_ptr, prd_table_index;
628 int status;
629
630 if (!is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr)) {
631 prd_table_index = write_ptr & THC_POINTER_MASK;
632
633 status = read_dma_buffer(dev, read_config, prd_table_index, read_buff);
634 if (status <= 0) {
635 dev_err_once(dev->dev, "read DMA buffer failed %d\n", status);
636 return -EIO;
637 }
638
639 *read_len = status;
640
641 /* Clear the relevant PRD table */
642 thc_copy_one_sgl_to_prd(dev, read_config, prd_table_index);
643
644 /* Increment the write pointer to let the HW know we have processed this PRD */
645 update_write_pointer(dev, read_config);
646 }
647
648 /*
649 * This function only reads one frame from PRD table for each call, so we need to
650 * check if all DMAed data is read out and return the flag to the caller. Caller
651 * should repeatedly call thc_dma_read() until all DMAed data is handled.
652 */
653 if (read_finished)
654 *read_finished = is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr) ? 1 : 0;
655
656 return 0;
657 }
658
659 /**
660 * thc_rxdma_read - Read data from RXDMA buffer
661 *
662 * @dev: The pointer of THC private device context
663 * @dma_channel: The RXDMA engine of read data source
664 * @read_buff: The pointer of the read data buffer
665 * @read_len: The pointer of the read data length
666 * @read_finished: The pointer of the flag indicating if all pending data has been read out
667 *
668 * Return: 0 on success, other error codes on failed.
669 */
thc_rxdma_read(struct thc_device * dev,enum thc_dma_channel dma_channel,void * read_buff,size_t * read_len,int * read_finished)670 int thc_rxdma_read(struct thc_device *dev, enum thc_dma_channel dma_channel,
671 void *read_buff, size_t *read_len, int *read_finished)
672 {
673 struct thc_dma_configuration *dma_config;
674 int ret;
675
676 dma_config = &dev->dma_ctx->dma_config[dma_channel];
677
678 if (!dma_config->is_enabled) {
679 dev_err_once(dev->dev, "The DMA channel %d is not enabled", dma_channel);
680 return -EINVAL;
681 }
682
683 if (!read_buff || !read_len) {
684 dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n",
685 read_buff, read_len);
686 return -EINVAL;
687 }
688
689 if (dma_channel >= THC_TXDMA) {
690 dev_err(dev->dev, "Unsupported DMA channel for RxDMA read, %d\n", dma_channel);
691 return -EINVAL;
692 }
693
694 ret = thc_dma_read(dev, dma_config, read_buff, read_len, read_finished);
695
696 return ret;
697 }
698 EXPORT_SYMBOL_NS_GPL(thc_rxdma_read, "INTEL_THC");
699
thc_swdma_read_start(struct thc_device * dev,void * write_buff,size_t write_len,u32 * prd_tbl_len)700 static int thc_swdma_read_start(struct thc_device *dev, void *write_buff,
701 size_t write_len, u32 *prd_tbl_len)
702 {
703 u32 mask, val, data0 = 0, data1 = 0;
704 int ret;
705
706 ret = thc_interrupt_quiesce(dev, true);
707 if (ret)
708 return ret;
709
710 if (thc_wait_for_dma_pause(dev, THC_RXDMA1) || thc_wait_for_dma_pause(dev, THC_RXDMA2))
711 return -EIO;
712
713 thc_reset_dma_settings(dev);
714
715 mask = THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC |
716 THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN;
717 val = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC, write_len) |
718 ((!prd_tbl_len) ? THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN : 0);
719 regmap_write_bits(dev->thc_regmap, THC_M_PRT_RPRD_CNTRL_SW_OFFSET,
720 mask, val);
721
722 if (prd_tbl_len) {
723 mask = THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN;
724 val = FIELD_PREP(THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN,
725 *prd_tbl_len);
726 regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_DMA_PRD_TABLE_LEN_OFFSET,
727 mask, val);
728 }
729
730 if (write_len <= sizeof(u32)) {
731 for (int i = 0; i < write_len; i++)
732 data0 |= *(((u8 *)write_buff) + i) << (i * 8);
733
734 regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0);
735 } else if (write_len <= 2 * sizeof(u32)) {
736 data0 = *(u32 *)write_buff;
737 regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0);
738
739 for (int i = 0; i < write_len - sizeof(u32); i++)
740 data1 |= *(((u8 *)write_buff) + sizeof(u32) + i) << (i * 8);
741
742 regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET, data1);
743 }
744 dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_SWDMA]);
745
746 return 0;
747 }
748
thc_swdma_read_completion(struct thc_device * dev)749 static int thc_swdma_read_completion(struct thc_device *dev)
750 {
751 int ret;
752
753 ret = thc_wait_for_dma_pause(dev, THC_SWDMA);
754 if (ret)
755 return ret;
756
757 thc_reset_dma_settings(dev);
758
759 dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_RXDMA2]);
760
761 ret = thc_interrupt_quiesce(dev, false);
762
763 return ret;
764 }
765
766 /**
767 * thc_swdma_read - Use software DMA to read data from touch device
768 *
769 * @dev: The pointer of THC private device context
770 * @write_buff: The pointer of write buffer for SWDMA sequence
771 * @write_len: The write data length for SWDMA sequence
772 * @prd_tbl_len: The prd table length of SWDMA engine, can be set to NULL
773 * @read_buff: The pointer of the read data buffer
774 * @read_len: The pointer of the read data length
775 *
776 * Return: 0 on success, other error codes on failed.
777 */
thc_swdma_read(struct thc_device * dev,void * write_buff,size_t write_len,u32 * prd_tbl_len,void * read_buff,size_t * read_len)778 int thc_swdma_read(struct thc_device *dev, void *write_buff, size_t write_len,
779 u32 *prd_tbl_len, void *read_buff, size_t *read_len)
780 {
781 int ret;
782
783 if (!(&dev->dma_ctx->dma_config[THC_SWDMA])->is_enabled) {
784 dev_err_once(dev->dev, "The SWDMA channel is not enabled");
785 return -EINVAL;
786 }
787
788 if (!read_buff || !read_len) {
789 dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n",
790 read_buff, read_len);
791 return -EINVAL;
792 }
793
794 if (mutex_lock_interruptible(&dev->thc_bus_lock))
795 return -EINTR;
796
797 dev->swdma_done = false;
798
799 ret = thc_swdma_read_start(dev, write_buff, write_len, prd_tbl_len);
800 if (ret)
801 goto end;
802
803 ret = wait_event_interruptible_timeout(dev->swdma_complete_wait, dev->swdma_done, 1 * HZ);
804 if (ret <= 0 || !dev->swdma_done) {
805 dev_err_once(dev->dev, "timeout for waiting SWDMA completion\n");
806 ret = -ETIMEDOUT;
807 goto end;
808 }
809
810 ret = thc_dma_read(dev, &dev->dma_ctx->dma_config[THC_SWDMA], read_buff, read_len, NULL);
811 if (ret)
812 goto end;
813
814 ret = thc_swdma_read_completion(dev);
815
816 end:
817 mutex_unlock(&dev->thc_bus_lock);
818 return ret;
819 }
820 EXPORT_SYMBOL_NS_GPL(thc_swdma_read, "INTEL_THC");
821
write_dma_buffer(struct thc_device * dev,void * buffer,size_t buf_len)822 static int write_dma_buffer(struct thc_device *dev,
823 void *buffer, size_t buf_len)
824 {
825 struct thc_dma_configuration *write_config = &dev->dma_ctx->dma_config[THC_TXDMA];
826 struct thc_prd_table *prd_tbl;
827 struct scatterlist *sg;
828 unsigned long len_left;
829 size_t ret;
830 u8 nent;
831 int i;
832
833 /* There is only one PRD table for write */
834 prd_tbl = &write_config->prd_tbls[0];
835
836 if (calc_prd_entries_num(prd_tbl, buf_len, &nent) < 0) {
837 dev_err(dev->dev, "Tx message length too big (%zu)\n", buf_len);
838 return -EOVERFLOW;
839 }
840
841 sg = write_config->sgls[0];
842 ret = sg_copy_from_buffer(sg, nent, buffer, buf_len);
843 if (ret != buf_len) {
844 dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n",
845 ret, buf_len);
846 return -EIO;
847 }
848
849 prd_tbl = &write_config->prd_tbls[0];
850 len_left = buf_len;
851
852 for_each_sg(write_config->sgls[0], sg, write_config->sgls_nent[0], i) {
853 if (sg_dma_address(sg) == 0 || sg_dma_len(sg) == 0) {
854 dev_err_once(dev->dev, "SGList: zero address or length\n");
855 return -EINVAL;
856 }
857
858 prd_tbl->entries[i].dest_addr =
859 sg_dma_address(sg) >> THC_ADDRESS_SHIFT;
860
861 if (len_left < sg_dma_len(sg)) {
862 prd_tbl->entries[i].len = len_left;
863 prd_tbl->entries[i].end_of_prd = 1;
864 break;
865 }
866
867 prd_tbl->entries[i].len = sg_dma_len(sg);
868 prd_tbl->entries[i].end_of_prd = 0;
869
870 len_left -= sg_dma_len(sg);
871 }
872
873 dma_set_prd_control(dev, i, 0, write_config);
874
875 return 0;
876 }
877
thc_ensure_performance_limitations(struct thc_device * dev)878 static void thc_ensure_performance_limitations(struct thc_device *dev)
879 {
880 unsigned long delay_usec = 0;
881 /*
882 * Minimum amount of delay the THC / QUICKSPI driver must wait
883 * between end of write operation and begin of read operation.
884 * This value shall be in 10us multiples.
885 */
886 if (dev->perf_limit > 0) {
887 delay_usec = dev->perf_limit * 10;
888 udelay(delay_usec);
889 }
890 }
891
thc_dma_write_completion(struct thc_device * dev)892 static void thc_dma_write_completion(struct thc_device *dev)
893 {
894 thc_ensure_performance_limitations(dev);
895 }
896
897 /**
898 * thc_dma_write - Use TXDMA to write data to touch device
899 *
900 * @dev: The pointer of THC private device context
901 * @buffer: The pointer of write data buffer
902 * @buf_len: The write data length
903 *
904 * Return: 0 on success, other error codes on failed.
905 */
thc_dma_write(struct thc_device * dev,void * buffer,size_t buf_len)906 int thc_dma_write(struct thc_device *dev, void *buffer, size_t buf_len)
907 {
908 bool restore_interrupts = false;
909 u32 sts, ctrl;
910 int ret;
911
912 if (!(&dev->dma_ctx->dma_config[THC_TXDMA])->is_enabled) {
913 dev_err_once(dev->dev, "The TxDMA channel is not enabled\n");
914 return -EINVAL;
915 }
916
917 if (!buffer || buf_len <= 0) {
918 dev_err(dev->dev, "Invalid input parameters, buffer %p\n, buf_len %zu\n",
919 buffer, buf_len);
920 return -EINVAL;
921 }
922
923 regmap_read(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET, &sts);
924 if (sts & THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ACTIVE) {
925 dev_err_once(dev->dev, "THC TxDMA is till active and can't start again\n");
926 return -EBUSY;
927 }
928
929 if (mutex_lock_interruptible(&dev->thc_bus_lock))
930 return -EINTR;
931
932 regmap_read(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, &ctrl);
933
934 ret = write_dma_buffer(dev, buffer, buf_len);
935 if (ret)
936 goto end;
937
938 if (dev->perf_limit && !(ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS)) {
939 ret = thc_interrupt_quiesce(dev, true);
940 if (ret)
941 goto end;
942
943 restore_interrupts = true;
944 }
945
946 dev->write_done = false;
947
948 dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_TXDMA]);
949
950 ret = wait_event_interruptible_timeout(dev->write_complete_wait, dev->write_done, 1 * HZ);
951 if (ret <= 0 || !dev->write_done) {
952 dev_err_once(dev->dev, "timeout for waiting TxDMA completion\n");
953 ret = -ETIMEDOUT;
954 goto end;
955 }
956
957 thc_dma_write_completion(dev);
958 mutex_unlock(&dev->thc_bus_lock);
959 return 0;
960
961 end:
962 mutex_unlock(&dev->thc_bus_lock);
963
964 if (restore_interrupts)
965 ret = thc_interrupt_quiesce(dev, false);
966
967 return ret;
968 }
969 EXPORT_SYMBOL_NS_GPL(thc_dma_write, "INTEL_THC");
970