1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/cleanup.h>
7 #include <linux/clk.h>
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interconnect.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/platform_device.h>
15 #include <linux/types.h>
16 #include <crypto/algapi.h>
17 #include <crypto/internal/hash.h>
18 
19 #include "core.h"
20 #include "cipher.h"
21 #include "sha.h"
22 #include "aead.h"
23 
24 #define QCE_MAJOR_VERSION5	0x05
25 #define QCE_QUEUE_LENGTH	1
26 
27 #define QCE_DEFAULT_MEM_BANDWIDTH	393600
28 
29 static const struct qce_algo_ops *qce_ops[] = {
30 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
31 	&skcipher_ops,
32 #endif
33 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
34 	&ahash_ops,
35 #endif
36 #ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
37 	&aead_ops,
38 #endif
39 };
40 
qce_unregister_algs(void * data)41 static void qce_unregister_algs(void *data)
42 {
43 	const struct qce_algo_ops *ops;
44 	struct qce_device *qce = data;
45 	int i;
46 
47 	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
48 		ops = qce_ops[i];
49 		ops->unregister_algs(qce);
50 	}
51 }
52 
devm_qce_register_algs(struct qce_device * qce)53 static int devm_qce_register_algs(struct qce_device *qce)
54 {
55 	const struct qce_algo_ops *ops;
56 	int i, j, ret = -ENODEV;
57 
58 	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
59 		ops = qce_ops[i];
60 		ret = ops->register_algs(qce);
61 		if (ret) {
62 			for (j = i - 1; j >= 0; j--)
63 				ops->unregister_algs(qce);
64 			return ret;
65 		}
66 	}
67 
68 	return devm_add_action_or_reset(qce->dev, qce_unregister_algs, qce);
69 }
70 
qce_handle_request(struct crypto_async_request * async_req)71 static int qce_handle_request(struct crypto_async_request *async_req)
72 {
73 	int ret = -EINVAL, i;
74 	const struct qce_algo_ops *ops;
75 	u32 type = crypto_tfm_alg_type(async_req->tfm);
76 
77 	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
78 		ops = qce_ops[i];
79 		if (type != ops->type)
80 			continue;
81 		ret = ops->async_req_handle(async_req);
82 		break;
83 	}
84 
85 	return ret;
86 }
87 
qce_handle_queue(struct qce_device * qce,struct crypto_async_request * req)88 static int qce_handle_queue(struct qce_device *qce,
89 			    struct crypto_async_request *req)
90 {
91 	struct crypto_async_request *async_req, *backlog;
92 	int ret = 0, err;
93 
94 	scoped_guard(mutex, &qce->lock) {
95 		if (req)
96 			ret = crypto_enqueue_request(&qce->queue, req);
97 
98 		/* busy, do not dequeue request */
99 		if (qce->req)
100 			return ret;
101 
102 		backlog = crypto_get_backlog(&qce->queue);
103 		async_req = crypto_dequeue_request(&qce->queue);
104 		if (async_req)
105 			qce->req = async_req;
106 	}
107 
108 	if (!async_req)
109 		return ret;
110 
111 	if (backlog) {
112 		scoped_guard(mutex, &qce->lock)
113 			crypto_request_complete(backlog, -EINPROGRESS);
114 	}
115 
116 	err = qce_handle_request(async_req);
117 	if (err) {
118 		qce->result = err;
119 		schedule_work(&qce->done_work);
120 	}
121 
122 	return ret;
123 }
124 
qce_req_done_work(struct work_struct * work)125 static void qce_req_done_work(struct work_struct *work)
126 {
127 	struct qce_device *qce = container_of(work, struct qce_device,
128 					      done_work);
129 	struct crypto_async_request *req;
130 
131 	scoped_guard(mutex, &qce->lock) {
132 		req = qce->req;
133 		qce->req = NULL;
134 	}
135 
136 	if (req)
137 		crypto_request_complete(req, qce->result);
138 
139 	qce_handle_queue(qce, NULL);
140 }
141 
qce_async_request_enqueue(struct qce_device * qce,struct crypto_async_request * req)142 static int qce_async_request_enqueue(struct qce_device *qce,
143 				     struct crypto_async_request *req)
144 {
145 	return qce_handle_queue(qce, req);
146 }
147 
qce_async_request_done(struct qce_device * qce,int ret)148 static void qce_async_request_done(struct qce_device *qce, int ret)
149 {
150 	qce->result = ret;
151 	schedule_work(&qce->done_work);
152 }
153 
qce_check_version(struct qce_device * qce)154 static int qce_check_version(struct qce_device *qce)
155 {
156 	u32 major, minor, step;
157 
158 	qce_get_version(qce, &major, &minor, &step);
159 
160 	/*
161 	 * the driver does not support v5 with minor 0 because it has special
162 	 * alignment requirements.
163 	 */
164 	if (major != QCE_MAJOR_VERSION5 || minor == 0)
165 		return -ENODEV;
166 
167 	qce->burst_size = QCE_BAM_BURST_SIZE;
168 
169 	/*
170 	 * Rx and tx pipes are treated as a pair inside CE.
171 	 * Pipe pair number depends on the actual BAM dma pipe
172 	 * that is used for transfers. The BAM dma pipes are passed
173 	 * from the device tree and used to derive the pipe pair
174 	 * id in the CE driver as follows.
175 	 * 	BAM dma pipes(rx, tx)		CE pipe pair id
176 	 *		0,1				0
177 	 *		2,3				1
178 	 *		4,5				2
179 	 *		6,7				3
180 	 *		...
181 	 */
182 	qce->pipe_pair_id = qce->dma.rxchan->chan_id >> 1;
183 
184 	dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
185 		major, minor, step);
186 
187 	return 0;
188 }
189 
qce_crypto_probe(struct platform_device * pdev)190 static int qce_crypto_probe(struct platform_device *pdev)
191 {
192 	struct device *dev = &pdev->dev;
193 	struct qce_device *qce;
194 	int ret;
195 
196 	qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
197 	if (!qce)
198 		return -ENOMEM;
199 
200 	qce->dev = dev;
201 	platform_set_drvdata(pdev, qce);
202 
203 	qce->base = devm_platform_ioremap_resource(pdev, 0);
204 	if (IS_ERR(qce->base))
205 		return PTR_ERR(qce->base);
206 
207 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
208 	if (ret < 0)
209 		return ret;
210 
211 	qce->core = devm_clk_get_optional_enabled(qce->dev, "core");
212 	if (IS_ERR(qce->core))
213 		return PTR_ERR(qce->core);
214 
215 	qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface");
216 	if (IS_ERR(qce->iface))
217 		return PTR_ERR(qce->iface);
218 
219 	qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus");
220 	if (IS_ERR(qce->bus))
221 		return PTR_ERR(qce->bus);
222 
223 	qce->mem_path = devm_of_icc_get(qce->dev, "memory");
224 	if (IS_ERR(qce->mem_path))
225 		return PTR_ERR(qce->mem_path);
226 
227 	ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
228 	if (ret)
229 		return ret;
230 
231 	ret = devm_qce_dma_request(qce->dev, &qce->dma);
232 	if (ret)
233 		return ret;
234 
235 	ret = qce_check_version(qce);
236 	if (ret)
237 		return ret;
238 
239 	ret = devm_mutex_init(qce->dev, &qce->lock);
240 	if (ret)
241 		return ret;
242 
243 	INIT_WORK(&qce->done_work, qce_req_done_work);
244 	crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
245 
246 	qce->async_req_enqueue = qce_async_request_enqueue;
247 	qce->async_req_done = qce_async_request_done;
248 
249 	return devm_qce_register_algs(qce);
250 }
251 
252 static const struct of_device_id qce_crypto_of_match[] = {
253 	{ .compatible = "qcom,crypto-v5.1", },
254 	{ .compatible = "qcom,crypto-v5.4", },
255 	{ .compatible = "qcom,qce", },
256 	{}
257 };
258 MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
259 
260 static struct platform_driver qce_crypto_driver = {
261 	.probe = qce_crypto_probe,
262 	.driver = {
263 		.name = KBUILD_MODNAME,
264 		.of_match_table = qce_crypto_of_match,
265 	},
266 };
267 module_platform_driver(qce_crypto_driver);
268 
269 MODULE_LICENSE("GPL v2");
270 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
271 MODULE_ALIAS("platform:" KBUILD_MODNAME);
272 MODULE_AUTHOR("The Linux Foundation");
273