1 // SPDX-License-Identifier: GPL-2.0-only
2 // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 /*
4  * Crypto driver for NVIDIA Security Engine in Tegra Chips
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 #include <linux/platform_device.h>
11 #include <linux/mod_devicetable.h>
12 
13 #include <crypto/engine.h>
14 
15 #include "tegra-se.h"
16 
tegra_se_cmdbuf_get(struct host1x_bo * host_bo)17 static struct host1x_bo *tegra_se_cmdbuf_get(struct host1x_bo *host_bo)
18 {
19 	struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
20 
21 	kref_get(&cmdbuf->ref);
22 
23 	return host_bo;
24 }
25 
tegra_se_cmdbuf_release(struct kref * ref)26 static void tegra_se_cmdbuf_release(struct kref *ref)
27 {
28 	struct tegra_se_cmdbuf *cmdbuf = container_of(ref, struct tegra_se_cmdbuf, ref);
29 
30 	dma_free_attrs(cmdbuf->dev, cmdbuf->size, cmdbuf->addr,
31 		       cmdbuf->iova, 0);
32 
33 	kfree(cmdbuf);
34 }
35 
tegra_se_cmdbuf_put(struct host1x_bo * host_bo)36 static void tegra_se_cmdbuf_put(struct host1x_bo *host_bo)
37 {
38 	struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
39 
40 	kref_put(&cmdbuf->ref, tegra_se_cmdbuf_release);
41 }
42 
43 static struct host1x_bo_mapping *
tegra_se_cmdbuf_pin(struct device * dev,struct host1x_bo * bo,enum dma_data_direction direction)44 tegra_se_cmdbuf_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
45 {
46 	struct tegra_se_cmdbuf *cmdbuf = container_of(bo, struct tegra_se_cmdbuf, bo);
47 	struct host1x_bo_mapping *map;
48 	int err;
49 
50 	map = kzalloc(sizeof(*map), GFP_KERNEL);
51 	if (!map)
52 		return ERR_PTR(-ENOMEM);
53 
54 	kref_init(&map->ref);
55 	map->bo = host1x_bo_get(bo);
56 	map->direction = direction;
57 	map->dev = dev;
58 
59 	map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
60 	if (!map->sgt) {
61 		err = -ENOMEM;
62 		goto free;
63 	}
64 
65 	err = dma_get_sgtable(dev, map->sgt, cmdbuf->addr,
66 			      cmdbuf->iova, cmdbuf->words * 4);
67 	if (err)
68 		goto free_sgt;
69 
70 	err = dma_map_sgtable(dev, map->sgt, direction, 0);
71 	if (err)
72 		goto free_sgt;
73 
74 	map->phys = sg_dma_address(map->sgt->sgl);
75 	map->size = cmdbuf->words * 4;
76 	map->chunks = err;
77 
78 	return map;
79 
80 free_sgt:
81 	sg_free_table(map->sgt);
82 	kfree(map->sgt);
83 free:
84 	kfree(map);
85 	return ERR_PTR(err);
86 }
87 
tegra_se_cmdbuf_unpin(struct host1x_bo_mapping * map)88 static void tegra_se_cmdbuf_unpin(struct host1x_bo_mapping *map)
89 {
90 	if (!map)
91 		return;
92 
93 	dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
94 	sg_free_table(map->sgt);
95 	kfree(map->sgt);
96 	host1x_bo_put(map->bo);
97 
98 	kfree(map);
99 }
100 
tegra_se_cmdbuf_mmap(struct host1x_bo * host_bo)101 static void *tegra_se_cmdbuf_mmap(struct host1x_bo *host_bo)
102 {
103 	struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
104 
105 	return cmdbuf->addr;
106 }
107 
tegra_se_cmdbuf_munmap(struct host1x_bo * host_bo,void * addr)108 static void tegra_se_cmdbuf_munmap(struct host1x_bo *host_bo, void *addr)
109 {
110 }
111 
112 static const struct host1x_bo_ops tegra_se_cmdbuf_ops = {
113 	.get = tegra_se_cmdbuf_get,
114 	.put = tegra_se_cmdbuf_put,
115 	.pin = tegra_se_cmdbuf_pin,
116 	.unpin = tegra_se_cmdbuf_unpin,
117 	.mmap = tegra_se_cmdbuf_mmap,
118 	.munmap = tegra_se_cmdbuf_munmap,
119 };
120 
tegra_se_host1x_bo_alloc(struct tegra_se * se,ssize_t size)121 static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssize_t size)
122 {
123 	struct tegra_se_cmdbuf *cmdbuf;
124 	struct device *dev = se->dev->parent;
125 
126 	cmdbuf = kzalloc(sizeof(*cmdbuf), GFP_KERNEL);
127 	if (!cmdbuf)
128 		return NULL;
129 
130 	cmdbuf->addr = dma_alloc_attrs(dev, size, &cmdbuf->iova,
131 				       GFP_KERNEL, 0);
132 	if (!cmdbuf->addr)
133 		return NULL;
134 
135 	cmdbuf->size = size;
136 	cmdbuf->dev  = dev;
137 
138 	host1x_bo_init(&cmdbuf->bo, &tegra_se_cmdbuf_ops);
139 	kref_init(&cmdbuf->ref);
140 
141 	return cmdbuf;
142 }
143 
tegra_se_host1x_submit(struct tegra_se * se,struct tegra_se_cmdbuf * cmdbuf,u32 size)144 int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
145 {
146 	struct host1x_job *job;
147 	int ret;
148 
149 	job = host1x_job_alloc(se->channel, 1, 0, true);
150 	if (!job) {
151 		dev_err(se->dev, "failed to allocate host1x job\n");
152 		return -ENOMEM;
153 	}
154 
155 	job->syncpt = host1x_syncpt_get(se->syncpt);
156 	job->syncpt_incrs = 1;
157 	job->client = &se->client;
158 	job->class = se->client.class;
159 	job->serialize = true;
160 	job->engine_fallback_streamid = se->stream_id;
161 	job->engine_streamid_offset = SE_STREAM_ID;
162 
163 	cmdbuf->words = size;
164 
165 	host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
166 
167 	ret = host1x_job_pin(job, se->dev);
168 	if (ret) {
169 		dev_err(se->dev, "failed to pin host1x job\n");
170 		goto job_put;
171 	}
172 
173 	ret = host1x_job_submit(job);
174 	if (ret) {
175 		dev_err(se->dev, "failed to submit host1x job\n");
176 		goto job_unpin;
177 	}
178 
179 	ret = host1x_syncpt_wait(job->syncpt, job->syncpt_end,
180 				 MAX_SCHEDULE_TIMEOUT, NULL);
181 	if (ret) {
182 		dev_err(se->dev, "host1x job timed out\n");
183 		return ret;
184 	}
185 
186 	host1x_job_put(job);
187 	return 0;
188 
189 job_unpin:
190 	host1x_job_unpin(job);
191 job_put:
192 	host1x_job_put(job);
193 
194 	return ret;
195 }
196 
tegra_se_client_init(struct host1x_client * client)197 static int tegra_se_client_init(struct host1x_client *client)
198 {
199 	struct tegra_se *se = container_of(client, struct tegra_se, client);
200 	int ret;
201 
202 	se->channel = host1x_channel_request(&se->client);
203 	if (!se->channel) {
204 		dev_err(se->dev, "host1x channel map failed\n");
205 		return -ENODEV;
206 	}
207 
208 	se->syncpt = host1x_syncpt_request(&se->client, 0);
209 	if (!se->syncpt) {
210 		dev_err(se->dev, "host1x syncpt allocation failed\n");
211 		ret = -EINVAL;
212 		goto channel_put;
213 	}
214 
215 	se->syncpt_id =  host1x_syncpt_id(se->syncpt);
216 
217 	se->cmdbuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
218 	if (!se->cmdbuf) {
219 		ret = -ENOMEM;
220 		goto syncpt_put;
221 	}
222 
223 	se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
224 	if (!se->keybuf) {
225 		ret = -ENOMEM;
226 		goto cmdbuf_put;
227 	}
228 
229 	ret = se->hw->init_alg(se);
230 	if (ret) {
231 		dev_err(se->dev, "failed to register algorithms\n");
232 		goto keybuf_put;
233 	}
234 
235 	return 0;
236 
237 keybuf_put:
238 	tegra_se_cmdbuf_put(&se->keybuf->bo);
239 cmdbuf_put:
240 	tegra_se_cmdbuf_put(&se->cmdbuf->bo);
241 syncpt_put:
242 	host1x_syncpt_put(se->syncpt);
243 channel_put:
244 	host1x_channel_put(se->channel);
245 
246 	return ret;
247 }
248 
tegra_se_client_deinit(struct host1x_client * client)249 static int tegra_se_client_deinit(struct host1x_client *client)
250 {
251 	struct tegra_se *se = container_of(client, struct tegra_se, client);
252 
253 	se->hw->deinit_alg(se);
254 	tegra_se_cmdbuf_put(&se->cmdbuf->bo);
255 	host1x_syncpt_put(se->syncpt);
256 	host1x_channel_put(se->channel);
257 
258 	return 0;
259 }
260 
261 static const struct host1x_client_ops tegra_se_client_ops = {
262 	.init = tegra_se_client_init,
263 	.exit = tegra_se_client_deinit,
264 };
265 
tegra_se_host1x_register(struct tegra_se * se)266 static int tegra_se_host1x_register(struct tegra_se *se)
267 {
268 	INIT_LIST_HEAD(&se->client.list);
269 	se->client.dev = se->dev;
270 	se->client.ops = &tegra_se_client_ops;
271 	se->client.class = se->hw->host1x_class;
272 	se->client.num_syncpts = 1;
273 
274 	host1x_client_register(&se->client);
275 
276 	return 0;
277 }
278 
tegra_se_probe(struct platform_device * pdev)279 static int tegra_se_probe(struct platform_device *pdev)
280 {
281 	struct device *dev = &pdev->dev;
282 	struct tegra_se *se;
283 	int ret;
284 
285 	se = devm_kzalloc(dev, sizeof(*se), GFP_KERNEL);
286 	if (!se)
287 		return -ENOMEM;
288 
289 	se->dev = dev;
290 	se->owner = TEGRA_GPSE_ID;
291 	se->hw = device_get_match_data(&pdev->dev);
292 
293 	se->base = devm_platform_ioremap_resource(pdev, 0);
294 	if (IS_ERR(se->base))
295 		return PTR_ERR(se->base);
296 
297 	dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
298 	platform_set_drvdata(pdev, se);
299 
300 	se->clk = devm_clk_get_enabled(se->dev, NULL);
301 	if (IS_ERR(se->clk))
302 		return dev_err_probe(dev, PTR_ERR(se->clk),
303 				"failed to enable clocks\n");
304 
305 	if (!tegra_dev_iommu_get_stream_id(dev, &se->stream_id))
306 		return dev_err_probe(dev, -ENODEV,
307 				"failed to get IOMMU stream ID\n");
308 
309 	writel(se->stream_id, se->base + SE_STREAM_ID);
310 
311 	se->engine = crypto_engine_alloc_init(dev, 0);
312 	if (!se->engine)
313 		return dev_err_probe(dev, -ENOMEM, "failed to init crypto engine\n");
314 
315 	ret = crypto_engine_start(se->engine);
316 	if (ret) {
317 		crypto_engine_exit(se->engine);
318 		return dev_err_probe(dev, ret, "failed to start crypto engine\n");
319 	}
320 
321 	ret = tegra_se_host1x_register(se);
322 	if (ret) {
323 		crypto_engine_exit(se->engine);
324 		return dev_err_probe(dev, ret, "failed to init host1x params\n");
325 	}
326 
327 	return 0;
328 }
329 
tegra_se_remove(struct platform_device * pdev)330 static void tegra_se_remove(struct platform_device *pdev)
331 {
332 	struct tegra_se *se = platform_get_drvdata(pdev);
333 
334 	crypto_engine_exit(se->engine);
335 	host1x_client_unregister(&se->client);
336 }
337 
338 static const struct tegra_se_regs tegra234_aes1_regs = {
339 	.config = SE_AES1_CFG,
340 	.op = SE_AES1_OPERATION,
341 	.last_blk = SE_AES1_LAST_BLOCK,
342 	.linear_ctr = SE_AES1_LINEAR_CTR,
343 	.aad_len = SE_AES1_AAD_LEN,
344 	.cryp_msg_len = SE_AES1_CRYPTO_MSG_LEN,
345 	.manifest = SE_AES1_KEYMANIFEST,
346 	.key_addr = SE_AES1_KEY_ADDR,
347 	.key_data = SE_AES1_KEY_DATA,
348 	.key_dst = SE_AES1_KEY_DST,
349 	.result = SE_AES1_CMAC_RESULT,
350 };
351 
352 static const struct tegra_se_regs tegra234_hash_regs = {
353 	.config = SE_SHA_CFG,
354 	.op = SE_SHA_OPERATION,
355 	.manifest = SE_SHA_KEYMANIFEST,
356 	.key_addr = SE_SHA_KEY_ADDR,
357 	.key_data = SE_SHA_KEY_DATA,
358 	.key_dst = SE_SHA_KEY_DST,
359 	.result = SE_SHA_HASH_RESULT,
360 };
361 
362 static const struct tegra_se_hw tegra234_aes_hw = {
363 	.regs = &tegra234_aes1_regs,
364 	.kac_ver = 1,
365 	.host1x_class = 0x3b,
366 	.init_alg = tegra_init_aes,
367 	.deinit_alg = tegra_deinit_aes,
368 };
369 
370 static const struct tegra_se_hw tegra234_hash_hw = {
371 	.regs = &tegra234_hash_regs,
372 	.kac_ver = 1,
373 	.host1x_class = 0x3d,
374 	.init_alg = tegra_init_hash,
375 	.deinit_alg = tegra_deinit_hash,
376 };
377 
378 static const struct of_device_id tegra_se_of_match[] = {
379 	{
380 		.compatible = "nvidia,tegra234-se-aes",
381 		.data = &tegra234_aes_hw
382 	}, {
383 		.compatible = "nvidia,tegra234-se-hash",
384 		.data = &tegra234_hash_hw,
385 	},
386 	{ },
387 };
388 MODULE_DEVICE_TABLE(of, tegra_se_of_match);
389 
390 static struct platform_driver tegra_se_driver = {
391 	.driver = {
392 		.name	= "tegra-se",
393 		.of_match_table = tegra_se_of_match,
394 	},
395 	.probe		= tegra_se_probe,
396 	.remove		= tegra_se_remove,
397 };
398 
tegra_se_host1x_probe(struct host1x_device * dev)399 static int tegra_se_host1x_probe(struct host1x_device *dev)
400 {
401 	return host1x_device_init(dev);
402 }
403 
tegra_se_host1x_remove(struct host1x_device * dev)404 static int tegra_se_host1x_remove(struct host1x_device *dev)
405 {
406 	host1x_device_exit(dev);
407 
408 	return 0;
409 }
410 
411 static struct host1x_driver tegra_se_host1x_driver = {
412 	.driver = {
413 		.name = "tegra-se-host1x",
414 	},
415 	.probe = tegra_se_host1x_probe,
416 	.remove = tegra_se_host1x_remove,
417 	.subdevs = tegra_se_of_match,
418 };
419 
tegra_se_module_init(void)420 static int __init tegra_se_module_init(void)
421 {
422 	int ret;
423 
424 	ret = host1x_driver_register(&tegra_se_host1x_driver);
425 	if (ret)
426 		return ret;
427 
428 	return platform_driver_register(&tegra_se_driver);
429 }
430 
tegra_se_module_exit(void)431 static void __exit tegra_se_module_exit(void)
432 {
433 	host1x_driver_unregister(&tegra_se_host1x_driver);
434 	platform_driver_unregister(&tegra_se_driver);
435 }
436 
437 module_init(tegra_se_module_init);
438 module_exit(tegra_se_module_exit);
439 
440 MODULE_DESCRIPTION("NVIDIA Tegra Security Engine Driver");
441 MODULE_AUTHOR("Akhil R <[email protected]>");
442 MODULE_LICENSE("GPL");
443