1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 ARM Ltd.
4  */
5 
6 #include <linux/arm-smccc.h>
7 #include <linux/cc_platform.h>
8 #include <linux/kernel.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/tsm.h>
13 #include <linux/types.h>
14 
15 #include <asm/rsi.h>
16 
17 /**
18  * struct arm_cca_token_info - a descriptor for the token buffer.
19  * @challenge:		Pointer to the challenge data
20  * @challenge_size:	Size of the challenge data
21  * @granule:		PA of the granule to which the token will be written
22  * @offset:		Offset within granule to start of buffer in bytes
23  * @result:		result of rsi_attestation_token_continue operation
24  */
25 struct arm_cca_token_info {
26 	void           *challenge;
27 	unsigned long   challenge_size;
28 	phys_addr_t     granule;
29 	unsigned long   offset;
30 	unsigned long   result;
31 };
32 
arm_cca_attestation_init(void * param)33 static void arm_cca_attestation_init(void *param)
34 {
35 	struct arm_cca_token_info *info;
36 
37 	info = (struct arm_cca_token_info *)param;
38 
39 	info->result = rsi_attestation_token_init(info->challenge,
40 						  info->challenge_size);
41 }
42 
43 /**
44  * arm_cca_attestation_continue - Retrieve the attestation token data.
45  *
46  * @param: pointer to the arm_cca_token_info
47  *
48  * Attestation token generation is a long running operation and therefore
49  * the token data may not be retrieved in a single call. Moreover, the
50  * token retrieval operation must be requested on the same CPU on which the
51  * attestation token generation was initialised.
52  * This helper function is therefore scheduled on the same CPU multiple
53  * times until the entire token data is retrieved.
54  */
arm_cca_attestation_continue(void * param)55 static void arm_cca_attestation_continue(void *param)
56 {
57 	unsigned long len;
58 	unsigned long size;
59 	struct arm_cca_token_info *info;
60 
61 	info = (struct arm_cca_token_info *)param;
62 
63 	size = RSI_GRANULE_SIZE - info->offset;
64 	info->result = rsi_attestation_token_continue(info->granule,
65 						      info->offset, size, &len);
66 	info->offset += len;
67 }
68 
69 /**
70  * arm_cca_report_new - Generate a new attestation token.
71  *
72  * @report: pointer to the TSM report context information.
73  * @data:  pointer to the context specific data for this module.
74  *
75  * Initialise the attestation token generation using the challenge data
76  * passed in the TSM descriptor. Allocate memory for the attestation token
77  * and schedule calls to retrieve the attestation token on the same CPU
78  * on which the attestation token generation was initialised.
79  *
80  * The challenge data must be at least 32 bytes and no more than 64 bytes. If
81  * less than 64 bytes are provided it will be zero padded to 64 bytes.
82  *
83  * Return:
84  * * %0        - Attestation token generated successfully.
85  * * %-EINVAL  - A parameter was not valid.
86  * * %-ENOMEM  - Out of memory.
87  * * %-EFAULT  - Failed to get IPA for memory page(s).
88  * * A negative status code as returned by smp_call_function_single().
89  */
arm_cca_report_new(struct tsm_report * report,void * data)90 static int arm_cca_report_new(struct tsm_report *report, void *data)
91 {
92 	int ret;
93 	int cpu;
94 	long max_size;
95 	unsigned long token_size = 0;
96 	struct arm_cca_token_info info;
97 	void *buf;
98 	u8 *token __free(kvfree) = NULL;
99 	struct tsm_desc *desc = &report->desc;
100 
101 	if (desc->inblob_len < 32 || desc->inblob_len > 64)
102 		return -EINVAL;
103 
104 	/*
105 	 * The attestation token 'init' and 'continue' calls must be
106 	 * performed on the same CPU. smp_call_function_single() is used
107 	 * instead of simply calling get_cpu() because of the need to
108 	 * allocate outblob based on the returned value from the 'init'
109 	 * call and that cannot be done in an atomic context.
110 	 */
111 	cpu = smp_processor_id();
112 
113 	info.challenge = desc->inblob;
114 	info.challenge_size = desc->inblob_len;
115 
116 	ret = smp_call_function_single(cpu, arm_cca_attestation_init,
117 				       &info, true);
118 	if (ret)
119 		return ret;
120 	max_size = info.result;
121 
122 	if (max_size <= 0)
123 		return -EINVAL;
124 
125 	/* Allocate outblob */
126 	token = kvzalloc(max_size, GFP_KERNEL);
127 	if (!token)
128 		return -ENOMEM;
129 
130 	/*
131 	 * Since the outblob may not be physically contiguous, use a page
132 	 * to bounce the buffer from RMM.
133 	 */
134 	buf = alloc_pages_exact(RSI_GRANULE_SIZE, GFP_KERNEL);
135 	if (!buf)
136 		return -ENOMEM;
137 
138 	/* Get the PA of the memory page(s) that were allocated */
139 	info.granule = (unsigned long)virt_to_phys(buf);
140 
141 	/* Loop until the token is ready or there is an error */
142 	do {
143 		/* Retrieve one RSI_GRANULE_SIZE data per loop iteration */
144 		info.offset = 0;
145 		do {
146 			/*
147 			 * Schedule a call to retrieve a sub-granule chunk
148 			 * of data per loop iteration.
149 			 */
150 			ret = smp_call_function_single(cpu,
151 						       arm_cca_attestation_continue,
152 						       (void *)&info, true);
153 			if (ret != 0) {
154 				token_size = 0;
155 				goto exit_free_granule_page;
156 			}
157 		} while (info.result == RSI_INCOMPLETE &&
158 			 info.offset < RSI_GRANULE_SIZE);
159 
160 		if (info.result != RSI_SUCCESS) {
161 			ret = -ENXIO;
162 			token_size = 0;
163 			goto exit_free_granule_page;
164 		}
165 
166 		/*
167 		 * Copy the retrieved token data from the granule
168 		 * to the token buffer, ensuring that the RMM doesn't
169 		 * overflow the buffer.
170 		 */
171 		if (WARN_ON(token_size + info.offset > max_size))
172 			break;
173 		memcpy(&token[token_size], buf, info.offset);
174 		token_size += info.offset;
175 	} while (info.result == RSI_INCOMPLETE);
176 
177 	report->outblob = no_free_ptr(token);
178 exit_free_granule_page:
179 	report->outblob_len = token_size;
180 	free_pages_exact(buf, RSI_GRANULE_SIZE);
181 	return ret;
182 }
183 
184 static const struct tsm_ops arm_cca_tsm_ops = {
185 	.name = KBUILD_MODNAME,
186 	.report_new = arm_cca_report_new,
187 };
188 
189 /**
190  * arm_cca_guest_init - Register with the Trusted Security Module (TSM)
191  * interface.
192  *
193  * Return:
194  * * %0        - Registered successfully with the TSM interface.
195  * * %-ENODEV  - The execution context is not an Arm Realm.
196  * * %-EBUSY   - Already registered.
197  */
arm_cca_guest_init(void)198 static int __init arm_cca_guest_init(void)
199 {
200 	int ret;
201 
202 	if (!is_realm_world())
203 		return -ENODEV;
204 
205 	ret = tsm_register(&arm_cca_tsm_ops, NULL);
206 	if (ret < 0)
207 		pr_err("Error %d registering with TSM\n", ret);
208 
209 	return ret;
210 }
211 module_init(arm_cca_guest_init);
212 
213 /**
214  * arm_cca_guest_exit - unregister with the Trusted Security Module (TSM)
215  * interface.
216  */
arm_cca_guest_exit(void)217 static void __exit arm_cca_guest_exit(void)
218 {
219 	tsm_unregister(&arm_cca_tsm_ops);
220 }
221 module_exit(arm_cca_guest_exit);
222 
223 /* modalias, so userspace can autoload this module when RSI is available */
224 static const struct platform_device_id arm_cca_match[] __maybe_unused = {
225 	{ RSI_PDEV_NAME, 0},
226 	{ }
227 };
228 
229 MODULE_DEVICE_TABLE(platform, arm_cca_match);
230 MODULE_AUTHOR("Sami Mujawar <[email protected]>");
231 MODULE_DESCRIPTION("Arm CCA Guest TSM Driver");
232 MODULE_LICENSE("GPL");
233