1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #ifndef _CORE_H_ 7 #define _CORE_H_ 8 9 #include <linux/mutex.h> 10 #include <linux/workqueue.h> 11 12 #include "dma.h" 13 14 /** 15 * struct qce_device - crypto engine device structure 16 * @queue: crypto request queue 17 * @lock: the lock protects queue and req 18 * @done_work: workqueue context 19 * @req: current active request 20 * @result: result of current transform 21 * @base: virtual IO base 22 * @dev: pointer to device structure 23 * @core: core device clock 24 * @iface: interface clock 25 * @bus: bus clock 26 * @dma: pointer to dma data 27 * @burst_size: the crypto burst size 28 * @pipe_pair_id: which pipe pair id the device using 29 * @async_req_enqueue: invoked by every algorithm to enqueue a request 30 * @async_req_done: invoked by every algorithm to finish its request 31 */ 32 struct qce_device { 33 struct crypto_queue queue; 34 struct mutex lock; 35 struct work_struct done_work; 36 struct crypto_async_request *req; 37 int result; 38 void __iomem *base; 39 struct device *dev; 40 struct clk *core, *iface, *bus; 41 struct icc_path *mem_path; 42 struct qce_dma_data dma; 43 int burst_size; 44 unsigned int pipe_pair_id; 45 int (*async_req_enqueue)(struct qce_device *qce, 46 struct crypto_async_request *req); 47 void (*async_req_done)(struct qce_device *qce, int ret); 48 }; 49 50 /** 51 * struct qce_algo_ops - algorithm operations per crypto type 52 * @type: should be CRYPTO_ALG_TYPE_XXX 53 * @register_algs: invoked by core to register the algorithms 54 * @unregister_algs: invoked by core to unregister the algorithms 55 * @async_req_handle: invoked by core to handle enqueued request 56 */ 57 struct qce_algo_ops { 58 u32 type; 59 int (*register_algs)(struct qce_device *qce); 60 void (*unregister_algs)(struct qce_device *qce); 61 int (*async_req_handle)(struct crypto_async_request *async_req); 62 }; 63 64 #endif /* _CORE_H_ */ 65