Lines Matching +full:use +full:- +full:dma +full:- +full:tx

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Asynchronous RAID-6 recovery calculations ASYNC_TX API.
12 #include <linux/dma-mapping.h>
24 struct dma_device *dma = chan ? chan->device : NULL; in async_sum_product() local
30 if (dma) in async_sum_product()
31 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); in async_sum_product()
34 struct device *dev = dma->dev; in async_sum_product()
36 struct dma_async_tx_descriptor *tx; in async_sum_product() local
39 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product()
41 unmap->addr[0] = dma_map_page(dev, srcs[0], src_offs[0], in async_sum_product()
43 unmap->addr[1] = dma_map_page(dev, srcs[1], src_offs[1], in async_sum_product()
45 unmap->to_cnt = 2; in async_sum_product()
47 unmap->addr[2] = dma_map_page(dev, dest, d_off, in async_sum_product()
49 unmap->bidi_cnt = 1; in async_sum_product()
51 pq[1] = unmap->addr[2]; in async_sum_product()
53 unmap->len = len; in async_sum_product()
54 tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, in async_sum_product()
56 if (tx) { in async_sum_product()
57 dma_set_unmap(tx, unmap); in async_sum_product()
58 async_tx_submit(chan, tx, submit); in async_sum_product()
60 return tx; in async_sum_product()
70 async_tx_quiesce(&submit->depend_tx); in async_sum_product()
77 while (len--) { in async_sum_product()
93 struct dma_device *dma = chan ? chan->device : NULL; in async_mult() local
98 if (dma) in async_mult()
99 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); in async_mult()
103 struct device *dev = dma->dev; in async_mult()
104 struct dma_async_tx_descriptor *tx; in async_mult() local
107 if (submit->flags & ASYNC_TX_FENCE) in async_mult()
109 unmap->addr[0] = dma_map_page(dev, src, s_off, in async_mult()
111 unmap->to_cnt++; in async_mult()
112 unmap->addr[1] = dma_map_page(dev, dest, d_off, in async_mult()
114 dma_dest[1] = unmap->addr[1]; in async_mult()
115 unmap->bidi_cnt++; in async_mult()
116 unmap->len = len; in async_mult()
122 tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, in async_mult()
125 if (tx) { in async_mult()
126 dma_set_unmap(tx, unmap); in async_mult()
128 async_tx_submit(chan, tx, submit); in async_mult()
129 return tx; in async_mult()
141 async_tx_quiesce(&submit->depend_tx); in async_mult()
146 while (len--) in async_mult()
157 struct dma_async_tx_descriptor *tx = NULL; in __2data_recov_4() local
163 enum async_tx_flags flags = submit->flags; in __2data_recov_4()
164 dma_async_tx_callback cb_fn = submit->cb_fn; in __2data_recov_4()
165 void *cb_param = submit->cb_param; in __2data_recov_4()
166 void *scribble = submit->scribble; in __2data_recov_4()
168 p = blocks[disks-2]; in __2data_recov_4()
169 p_off = offs[disks-2]; in __2data_recov_4()
170 q = blocks[disks-1]; in __2data_recov_4()
171 q_off = offs[disks-1]; in __2data_recov_4()
184 coef[0] = raid6_gfexi[failb-faila]; in __2data_recov_4()
186 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_4()
187 tx = async_sum_product(b, b_off, srcs, src_offs, coef, bytes, submit); in __2data_recov_4()
194 init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, in __2data_recov_4()
196 tx = async_xor_offs(a, a_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_4()
198 return tx; in __2data_recov_4()
207 struct dma_async_tx_descriptor *tx = NULL; in __2data_recov_5() local
213 enum async_tx_flags flags = submit->flags; in __2data_recov_5()
214 dma_async_tx_callback cb_fn = submit->cb_fn; in __2data_recov_5()
215 void *cb_param = submit->cb_param; in __2data_recov_5()
216 void *scribble = submit->scribble; in __2data_recov_5()
220 good = -1; in __2data_recov_5()
221 for (i = 0; i < disks-2; i++) { in __2data_recov_5()
231 p = blocks[disks-2]; in __2data_recov_5()
232 p_off = offs[disks-2]; in __2data_recov_5()
233 q = blocks[disks-1]; in __2data_recov_5()
234 q_off = offs[disks-1]; in __2data_recov_5()
239 * Use the dead data pages as temporary storage for delta p and in __2data_recov_5()
247 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_5()
248 tx = async_memcpy(dp, g, dp_off, g_off, bytes, submit); in __2data_recov_5()
249 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_5()
250 tx = async_mult(dq, dq_off, g, g_off, in __2data_recov_5()
258 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_5()
260 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_5()
267 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_5()
269 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_5()
276 coef[0] = raid6_gfexi[failb-faila]; in __2data_recov_5()
278 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_5()
279 tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit); in __2data_recov_5()
286 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, in __2data_recov_5()
288 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_5()
290 return tx; in __2data_recov_5()
298 struct dma_async_tx_descriptor *tx = NULL; in __2data_recov_n() local
304 enum async_tx_flags flags = submit->flags; in __2data_recov_n()
305 dma_async_tx_callback cb_fn = submit->cb_fn; in __2data_recov_n()
306 void *cb_param = submit->cb_param; in __2data_recov_n()
307 void *scribble = submit->scribble; in __2data_recov_n()
309 p = blocks[disks-2]; in __2data_recov_n()
310 p_off = offs[disks-2]; in __2data_recov_n()
311 q = blocks[disks-1]; in __2data_recov_n()
312 q_off = offs[disks-1]; in __2data_recov_n()
315 * Use the dead data pages as temporary storage for in __2data_recov_n()
321 blocks[disks-2] = dp; in __2data_recov_n()
322 offs[disks-2] = dp_off; in __2data_recov_n()
326 blocks[disks-1] = dq; in __2data_recov_n()
327 offs[disks-1] = dq_off; in __2data_recov_n()
329 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_n()
330 tx = async_gen_syndrome(blocks, offs, disks, bytes, submit); in __2data_recov_n()
337 blocks[disks-2] = p; in __2data_recov_n()
338 offs[disks-2] = p_off; in __2data_recov_n()
339 blocks[disks-1] = q; in __2data_recov_n()
340 offs[disks-1] = q_off; in __2data_recov_n()
347 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_n()
349 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_n()
356 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_n()
358 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_n()
365 coef[0] = raid6_gfexi[failb-faila]; in __2data_recov_n()
367 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_n()
368 tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit); in __2data_recov_n()
375 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, in __2data_recov_n()
377 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_n()
379 return tx; in __2data_recov_n()
383 * async_raid6_2data_recov - asynchronously calculate two missing data blocks
384 * @disks: number of disks in the RAID-6 array
397 void *scribble = submit->scribble; in async_raid6_2data_recov()
406 /* if a dma resource is not available or a scribble buffer is not in async_raid6_2data_recov()
407 * available punt to the synchronous path. In the 'dma not in async_raid6_2data_recov()
408 * available' case be sure to use the scribble buffer to in async_raid6_2data_recov()
414 async_tx_quiesce(&submit->depend_tx); in async_raid6_2data_recov()
429 for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) in async_raid6_2data_recov()
435 /* There must be at least 2 sources - the failed devices. */ in async_raid6_2data_recov()
439 /* dma devices do not uniformly understand a zero source pq in async_raid6_2data_recov()
447 /* dma devices do not uniformly understand a single in async_raid6_2data_recov()
462 * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block
463 * @disks: number of disks in the RAID-6 array
475 struct dma_async_tx_descriptor *tx = NULL; in async_raid6_datap_recov() local
479 enum async_tx_flags flags = submit->flags; in async_raid6_datap_recov()
480 dma_async_tx_callback cb_fn = submit->cb_fn; in async_raid6_datap_recov()
481 void *cb_param = submit->cb_param; in async_raid6_datap_recov()
482 void *scribble = submit->scribble; in async_raid6_datap_recov()
489 /* if a dma resource is not available or a scribble buffer is not in async_raid6_datap_recov()
490 * available punt to the synchronous path. In the 'dma not in async_raid6_datap_recov()
491 * available' case be sure to use the scribble buffer to in async_raid6_datap_recov()
497 async_tx_quiesce(&submit->depend_tx); in async_raid6_datap_recov()
512 good = -1; in async_raid6_datap_recov()
513 for (i = 0; i < disks-2; i++) { in async_raid6_datap_recov()
525 p = blocks[disks-2]; in async_raid6_datap_recov()
526 p_off = offs[disks-2]; in async_raid6_datap_recov()
527 q = blocks[disks-1]; in async_raid6_datap_recov()
528 q_off = offs[disks-1]; in async_raid6_datap_recov()
531 * Use the dead data page as temporary storage for delta q in async_raid6_datap_recov()
536 blocks[disks-1] = dq; in async_raid6_datap_recov()
537 offs[disks-1] = dq_off; in async_raid6_datap_recov()
539 /* in the 4-disk case we only need to perform a single source in async_raid6_datap_recov()
546 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, in async_raid6_datap_recov()
548 tx = async_memcpy(p, g, p_off, g_off, bytes, submit); in async_raid6_datap_recov()
550 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, in async_raid6_datap_recov()
552 tx = async_mult(dq, dq_off, g, g_off, in async_raid6_datap_recov()
555 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, in async_raid6_datap_recov()
557 tx = async_gen_syndrome(blocks, offs, disks, bytes, submit); in async_raid6_datap_recov()
563 blocks[disks-1] = q; in async_raid6_datap_recov()
564 offs[disks-1] = q_off; in async_raid6_datap_recov()
566 /* calculate g^{-faila} */ in async_raid6_datap_recov()
573 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in async_raid6_datap_recov()
575 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); in async_raid6_datap_recov()
577 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in async_raid6_datap_recov()
578 tx = async_mult(dq, dq_off, dq, dq_off, coef, bytes, submit); in async_raid6_datap_recov()
584 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, in async_raid6_datap_recov()
586 tx = async_xor_offs(p, p_off, srcs, src_offs, 2, bytes, submit); in async_raid6_datap_recov()
588 return tx; in async_raid6_datap_recov()
593 MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");