xref: /openwifi/driver/side_ch/side_ch.c (revision 3696c5e269693e1525db64bf1c2119578c6199b0)
1 /*
2  * openwifi side channel driver
3  * Author: Xianjun Jiao
4  * SPDX-FileCopyrightText: 2019 UGent
5  * SPDX-License-Identifier: AGPL-3.0-or-later
6 */
7 
8 #include <linux/bitops.h>
9 #include <linux/dmapool.h>
10 #include <linux/dma/xilinx_dma.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/module.h>
16 #include <linux/of_address.h>
17 #include <linux/of_dma.h>
18 #include <linux/of_platform.h>
19 #include <linux/of_irq.h>
20 #include <linux/slab.h>
21 #include <linux/clk.h>
22 #include <linux/io-64-nonatomic-lo-hi.h>
23 #include <linux/delay.h>
24 #include <linux/dmaengine.h>
25 
26 #include <net/sock.h>
27 #include <linux/netlink.h>
28 #include <linux/skbuff.h>
29 
30 #include "side_ch.h"
31 
32 static int num_eq_init = 8; // should be 0~8
33 static int iq_len_init = 0; //if iq_len>0, iq capture enabled, csi disabled
34 
35 module_param(num_eq_init, int, 0);
36 MODULE_PARM_DESC(num_eq_init, "num_eq_init. 0~8. number of equalizer output (52 each) appended to CSI");
37 
38 module_param(iq_len_init, int, 0);
39 MODULE_PARM_DESC(iq_len_init, "iq_len_init. if iq_len_init>0, iq capture enabled, csi disabled");
40 
41 static void __iomem *base_addr; // to store driver specific base address needed for mmu to translate virtual address to physical address in our FPGA design
42 
43 struct dma_chan *chan_to_pl = NULL;
44 struct dma_chan *chan_to_ps = NULL;
45 u8 *side_info_buf = NULL;
46 dma_cookie_t chan_to_ps_cookie;
47 const int max_side_info_buf_size = MAX_NUM_DMA_SYMBOL*8;
48 
49 /* IO accessors */
reg_read(u32 reg)50 static inline u32 reg_read(u32 reg)
51 {
52 	return ioread32(base_addr + reg);
53 }
54 
reg_write(u32 reg,u32 value)55 static inline void reg_write(u32 reg, u32 value)
56 {
57 	iowrite32(value, base_addr + reg);
58 }
59 
SIDE_CH_REG_MULTI_RST_write(u32 Data)60 static inline void SIDE_CH_REG_MULTI_RST_write(u32 Data) {
61 	reg_write(SIDE_CH_REG_MULTI_RST_ADDR, Data);
62 }
63 
SIDE_CH_REG_CONFIG_read(void)64 static inline u32 SIDE_CH_REG_CONFIG_read(void){
65 	return reg_read(SIDE_CH_REG_CONFIG_ADDR);
66 }
67 
SIDE_CH_REG_CONFIG_write(u32 value)68 static inline void SIDE_CH_REG_CONFIG_write(u32 value){
69 	reg_write(SIDE_CH_REG_CONFIG_ADDR, value);
70 }
71 
SIDE_CH_REG_NUM_DMA_SYMBOL_read(void)72 static inline u32 SIDE_CH_REG_NUM_DMA_SYMBOL_read(void){
73 	return reg_read(SIDE_CH_REG_NUM_DMA_SYMBOL_ADDR);
74 }
75 
SIDE_CH_REG_NUM_DMA_SYMBOL_write(u32 value)76 static inline void SIDE_CH_REG_NUM_DMA_SYMBOL_write(u32 value){
77 	reg_write(SIDE_CH_REG_NUM_DMA_SYMBOL_ADDR, value);
78 }
79 
SIDE_CH_REG_IQ_CAPTURE_read(void)80 static inline u32 SIDE_CH_REG_IQ_CAPTURE_read(void){
81 	return reg_read(SIDE_CH_REG_IQ_CAPTURE_ADDR);
82 }
83 
SIDE_CH_REG_IQ_CAPTURE_write(u32 value)84 static inline void SIDE_CH_REG_IQ_CAPTURE_write(u32 value){
85 	reg_write(SIDE_CH_REG_IQ_CAPTURE_ADDR, value);
86 }
87 
SIDE_CH_REG_NUM_EQ_read(void)88 static inline u32 SIDE_CH_REG_NUM_EQ_read(void){
89 	return reg_read(SIDE_CH_REG_NUM_EQ_ADDR);
90 }
91 
SIDE_CH_REG_NUM_EQ_write(u32 value)92 static inline void SIDE_CH_REG_NUM_EQ_write(u32 value){
93 	reg_write(SIDE_CH_REG_NUM_EQ_ADDR, value);
94 }
95 
SIDE_CH_REG_FC_TARGET_read(void)96 static inline u32 SIDE_CH_REG_FC_TARGET_read(void){
97 	return reg_read(SIDE_CH_REG_FC_TARGET_ADDR);
98 }
99 
SIDE_CH_REG_FC_TARGET_write(u32 value)100 static inline void SIDE_CH_REG_FC_TARGET_write(u32 value){
101 	reg_write(SIDE_CH_REG_FC_TARGET_ADDR, value);
102 }
103 
SIDE_CH_REG_ADDR1_TARGET_read(void)104 static inline u32 SIDE_CH_REG_ADDR1_TARGET_read(void){
105 	return reg_read(SIDE_CH_REG_ADDR1_TARGET_ADDR);
106 }
107 
SIDE_CH_REG_ADDR1_TARGET_write(u32 value)108 static inline void SIDE_CH_REG_ADDR1_TARGET_write(u32 value){
109 	reg_write(SIDE_CH_REG_ADDR1_TARGET_ADDR, value);
110 }
111 
SIDE_CH_REG_ADDR2_TARGET_read(void)112 static inline u32 SIDE_CH_REG_ADDR2_TARGET_read(void){
113 	return reg_read(SIDE_CH_REG_ADDR2_TARGET_ADDR);
114 }
115 
SIDE_CH_REG_ADDR2_TARGET_write(u32 value)116 static inline void SIDE_CH_REG_ADDR2_TARGET_write(u32 value){
117 	reg_write(SIDE_CH_REG_ADDR2_TARGET_ADDR, value);
118 }
119 
SIDE_CH_REG_IQ_TRIGGER_read(void)120 static inline u32 SIDE_CH_REG_IQ_TRIGGER_read(void){
121 	return reg_read(SIDE_CH_REG_IQ_TRIGGER_ADDR);
122 }
123 
SIDE_CH_REG_IQ_TRIGGER_write(u32 value)124 static inline void SIDE_CH_REG_IQ_TRIGGER_write(u32 value){
125 	reg_write(SIDE_CH_REG_IQ_TRIGGER_ADDR, value);
126 }
127 
SIDE_CH_REG_RSSI_TH_read(void)128 static inline u32 SIDE_CH_REG_RSSI_TH_read(void){
129 	return reg_read(SIDE_CH_REG_RSSI_TH_ADDR);
130 }
131 
SIDE_CH_REG_RSSI_TH_write(u32 value)132 static inline void SIDE_CH_REG_RSSI_TH_write(u32 value){
133 	reg_write(SIDE_CH_REG_RSSI_TH_ADDR, value);
134 }
135 
SIDE_CH_REG_GAIN_TH_read(void)136 static inline u32 SIDE_CH_REG_GAIN_TH_read(void){
137 	return reg_read(SIDE_CH_REG_GAIN_TH_ADDR);
138 }
139 
SIDE_CH_REG_GAIN_TH_write(u32 value)140 static inline void SIDE_CH_REG_GAIN_TH_write(u32 value){
141 	reg_write(SIDE_CH_REG_GAIN_TH_ADDR, value);
142 }
143 
SIDE_CH_REG_PRE_TRIGGER_LEN_read(void)144 static inline u32 SIDE_CH_REG_PRE_TRIGGER_LEN_read(void){
145 	return reg_read(SIDE_CH_REG_PRE_TRIGGER_LEN_ADDR);
146 }
147 
SIDE_CH_REG_PRE_TRIGGER_LEN_write(u32 value)148 static inline void SIDE_CH_REG_PRE_TRIGGER_LEN_write(u32 value){
149 	reg_write(SIDE_CH_REG_PRE_TRIGGER_LEN_ADDR, value);
150 }
151 
SIDE_CH_REG_IQ_LEN_read(void)152 static inline u32 SIDE_CH_REG_IQ_LEN_read(void){
153 	return reg_read(SIDE_CH_REG_IQ_LEN_ADDR);
154 }
155 
SIDE_CH_REG_IQ_LEN_write(u32 value)156 static inline void SIDE_CH_REG_IQ_LEN_write(u32 value){
157 	reg_write(SIDE_CH_REG_IQ_LEN_ADDR, value);
158 }
159 
SIDE_CH_REG_M_AXIS_DATA_COUNT_read(void)160 static inline u32 SIDE_CH_REG_M_AXIS_DATA_COUNT_read(void){
161 	return reg_read(SIDE_CH_REG_M_AXIS_DATA_COUNT_ADDR);
162 }
163 
SIDE_CH_REG_M_AXIS_DATA_COUNT_write(u32 value)164 static inline void SIDE_CH_REG_M_AXIS_DATA_COUNT_write(u32 value){
165 	reg_write(SIDE_CH_REG_M_AXIS_DATA_COUNT_ADDR, value);
166 }
167 
168 static const struct of_device_id dev_of_ids[] = {
169 	{ .compatible = "sdr,side_ch", },
170 	{}
171 };
172 MODULE_DEVICE_TABLE(of, dev_of_ids);
173 
chan_to_ps_callback(void * completion)174 static void chan_to_ps_callback(void *completion)
175 {
176 	complete(completion);
177 }
178 
179 #if 0
180 static void chan_to_pl_callback(void *completion)
181 {
182 	complete(completion);
183 }
184 
185 static int dma_loopback_test(int num_test, int num_dma_symbol) {
186 	int i, err = 0;
187 
188 	// -----------dma loop back test-------------------------
189 	enum dma_status status;
190 	enum dma_ctrl_flags flags;
191 	u8 *src_buf, *dst_buf;
192 	// int num_dma_symbol = 16;
193 	int test_buf_size = num_dma_symbol*8;
194 	dma_addr_t src_buf_dma;
195 	dma_addr_t dst_buf_dma;
196 	struct dma_device *chan_to_pl_dev = chan_to_pl->device;
197 	struct dma_device *chan_to_ps_dev = chan_to_ps->device;
198 	struct scatterlist chan_to_pl_sg[1];
199 	struct scatterlist chan_to_ps_sg[1];
200 	dma_cookie_t chan_to_pl_cookie;
201 	dma_cookie_t chan_to_ps_cookie;
202 	struct completion chan_to_pl_cmp;
203 	struct completion chan_to_ps_cmp;
204 	struct dma_async_tx_descriptor *chan_to_pl_d = NULL;
205 	struct dma_async_tx_descriptor *chan_to_ps_d = NULL;
206 	unsigned long chan_to_ps_tmo =	msecs_to_jiffies(300000);
207 	unsigned long chan_to_pl_tmo =  msecs_to_jiffies(30000);
208 	int test_idx;
209 
210 	for (test_idx=0; test_idx<num_test; test_idx++) {
211 		printk("%s test_idx %d\n", side_ch_compatible_str, test_idx);
212 		//set number of dma symbols expected to pl and ps
213 		SIDE_CH_REG_NUM_DMA_SYMBOL_write((num_dma_symbol<<16)|num_dma_symbol);
214 
215 		src_buf = kmalloc(test_buf_size, GFP_KERNEL);
216 		if (!src_buf)
217 			goto err_src_buf;
218 
219 		dst_buf = kmalloc(test_buf_size, GFP_KERNEL);
220 		if (!dst_buf)
221 			goto err_dst_buf;
222 
223 		// test buf init
224 		for (i=0; i<test_buf_size; i++) {
225 			src_buf[i] = (test_idx+test_buf_size-i-1);
226 			dst_buf[i] = 0;
227 		}
228 
229 		set_user_nice(current, 10);
230 		flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
231 
232 		src_buf_dma = dma_map_single(chan_to_pl_dev->dev, src_buf, test_buf_size, DMA_MEM_TO_DEV);
233 		if (dma_mapping_error(chan_to_pl_dev->dev, src_buf_dma)) {
234 			printk("%s dma_loopback_test WARNING chan_to_pl_dev DMA mapping error\n", side_ch_compatible_str);
235 			goto err_src_buf_dma_mapping;
236 		}
237 
238 		dst_buf_dma = dma_map_single(chan_to_ps_dev->dev, dst_buf, test_buf_size, DMA_DEV_TO_MEM);
239 		if (dma_mapping_error(chan_to_ps_dev->dev, dst_buf_dma)) {
240 			printk("%s dma_loopback_test WARNING chan_to_ps_dev DMA mapping error\n", side_ch_compatible_str);
241 			goto err_dst_buf_dma_mapping;
242 		}
243 
244 		sg_init_table(chan_to_ps_sg, 1);
245 		sg_init_table(chan_to_pl_sg, 1);
246 
247 		sg_dma_address(&chan_to_ps_sg[0]) = dst_buf_dma;
248 		sg_dma_address(&chan_to_pl_sg[0]) = src_buf_dma;
249 
250 		sg_dma_len(&chan_to_ps_sg[0]) = test_buf_size;
251 		sg_dma_len(&chan_to_pl_sg[0]) = test_buf_size;
252 
253 		chan_to_ps_d = chan_to_ps_dev->device_prep_slave_sg(chan_to_ps, chan_to_ps_sg, 1, DMA_DEV_TO_MEM, flags, NULL);
254 		chan_to_pl_d = chan_to_pl_dev->device_prep_slave_sg(chan_to_pl, chan_to_pl_sg, 1, DMA_MEM_TO_DEV, flags, NULL);
255 
256 		if (!chan_to_ps_d || !chan_to_pl_d) {
257 			printk("%s dma_loopback_test WARNING !chan_to_ps_d || !chan_to_pl_d\n", side_ch_compatible_str);
258 			goto err_dst_buf_with_unmap;
259 		}
260 
261 		init_completion(&chan_to_pl_cmp);
262 		chan_to_pl_d->callback = chan_to_pl_callback;
263 		chan_to_pl_d->callback_param = &chan_to_pl_cmp;
264 		chan_to_pl_cookie = chan_to_pl_d->tx_submit(chan_to_pl_d);
265 
266 		init_completion(&chan_to_ps_cmp);
267 		chan_to_ps_d->callback = chan_to_ps_callback;
268 		chan_to_ps_d->callback_param = &chan_to_ps_cmp;
269 		chan_to_ps_cookie = chan_to_ps_d->tx_submit(chan_to_ps_d);
270 
271 		if (dma_submit_error(chan_to_pl_cookie) ||	dma_submit_error(chan_to_ps_cookie)) {
272 			printk("%s dma_loopback_test WARNING dma_submit_error\n", side_ch_compatible_str);
273 			goto err_dst_buf_with_unmap;
274 		}
275 
276 		dma_async_issue_pending(chan_to_pl);
277 		dma_async_issue_pending(chan_to_ps);
278 
279 		chan_to_pl_tmo = wait_for_completion_timeout(&chan_to_pl_cmp, chan_to_pl_tmo);
280 
281 		status = dma_async_is_tx_complete(chan_to_pl, chan_to_pl_cookie, NULL, NULL);
282 		if (chan_to_pl_tmo == 0) {
283 			printk("%s dma_loopback_test chan_to_pl_tmo == 0\n", side_ch_compatible_str);
284 			goto err_dst_buf_with_unmap;
285 		} else if (status != DMA_COMPLETE) {
286 			printk("%s dma_loopback_test chan_to_pl status != DMA_COMPLETE\n", side_ch_compatible_str);
287 			goto err_dst_buf_with_unmap;
288 		}
289 
290 		chan_to_ps_tmo = wait_for_completion_timeout(&chan_to_ps_cmp, chan_to_ps_tmo);
291 		status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
292 		if (chan_to_ps_tmo == 0) {
293 			printk("%s dma_loopback_test chan_to_ps_tmo == 0\n", side_ch_compatible_str);
294 			goto err_dst_buf_with_unmap;
295 		} else if (status != DMA_COMPLETE) {
296 			printk("%s dma_loopback_test chan_to_ps status != DMA_COMPLETE\n", side_ch_compatible_str);
297 			goto err_dst_buf_with_unmap;
298 		}
299 
300 		dma_unmap_single(chan_to_pl_dev->dev, src_buf_dma, test_buf_size, DMA_MEM_TO_DEV);
301 		dma_unmap_single(chan_to_ps_dev->dev, dst_buf_dma, test_buf_size, DMA_DEV_TO_MEM);
302 
303 		// test buf verification
304 		for (i=0; i<test_buf_size; i++) {
305 			//printk("%d ", dst_buf[i]);
306 			if ( dst_buf[i] != ((test_idx+test_buf_size-i-1)%256) )
307 				break;
308 		}
309 		printk("\n");
310 		printk("%s dma_loopback_test buf verification end idx %d (test_buf_size %d)\n", side_ch_compatible_str, i, test_buf_size);
311 
312 		kfree(src_buf);
313 		kfree(dst_buf);
314 	}
315 
316 	printk("%s dma_loopback_test err %d\n", side_ch_compatible_str, err);
317 	return(err);
318 
319 err_dst_buf_with_unmap:
320 	dma_unmap_single(chan_to_ps_dev->dev, dst_buf_dma, test_buf_size, DMA_DEV_TO_MEM);
321 
322 err_dst_buf_dma_mapping:
323 	dma_unmap_single(chan_to_pl_dev->dev, src_buf_dma, test_buf_size, DMA_MEM_TO_DEV);
324 
325 err_src_buf_dma_mapping:
326 
327 err_dst_buf:
328 	err = -4;
329 	kfree((void*)dst_buf);
330 
331 err_src_buf:
332 	err = -3;
333 	kfree(src_buf);
334 
335 	return(err);
336 }
337 #endif
338 
init_side_channel(void)339 static int init_side_channel(void) {
340 	side_info_buf = kmalloc(max_side_info_buf_size, GFP_KERNEL);
341 	if (!side_info_buf)
342 		return(-1);
343 
344 	return(0);
345 }
346 
get_side_info(int num_eq,int iq_len)347 static int get_side_info(int num_eq, int iq_len) {
348 	// int err = 0;//, i;
349 	struct scatterlist chan_to_ps_sg[1];
350 	enum dma_status status;
351 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
352 	int num_dma_symbol, num_dma_symbol_per_trans, side_info_buf_size;
353 	dma_addr_t side_info_buf_dma;
354 	struct dma_device *chan_to_ps_dev = chan_to_ps->device;
355 	struct completion chan_to_ps_cmp;
356 	struct dma_async_tx_descriptor *chan_to_ps_d = NULL;
357 	unsigned long chan_to_ps_tmo =	msecs_to_jiffies(100);
358 
359 	if (side_info_buf==NULL) {
360 		printk("%s get_side_info WARNING side_info_buf==NULL\n", side_ch_compatible_str);
361 		return(-1);
362 	}
363 
364 	status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
365 	if (status!=DMA_COMPLETE) {
366 		printk("%s get_side_info WARNING status!=DMA_COMPLETE\n", side_ch_compatible_str);
367 		return(-1);
368 	}
369 
370 	set_user_nice(current, 10);
371 
372 	if (iq_len>0)
373 		num_dma_symbol_per_trans = 1+iq_len;
374 	else
375 		num_dma_symbol_per_trans = HEADER_LEN + CSI_LEN + num_eq*EQUALIZER_LEN;
376 	//set number of dma symbols expected to ps
377 	num_dma_symbol = SIDE_CH_REG_M_AXIS_DATA_COUNT_read();
378 	// printk("%s get_side_info m axis data count %d per trans %d\n", side_ch_compatible_str, num_dma_symbol, num_dma_symbol_per_trans);
379 	num_dma_symbol = num_dma_symbol_per_trans*(num_dma_symbol/num_dma_symbol_per_trans);
380 	// printk("%s get_side_info actual num dma symbol %d\n", side_ch_compatible_str, num_dma_symbol);
381 	if (num_dma_symbol == 0)
382 		return(-2);
383 
384 	side_info_buf_size = num_dma_symbol*8;
385 	side_info_buf_dma = dma_map_single(chan_to_ps_dev->dev, side_info_buf, side_info_buf_size, DMA_DEV_TO_MEM);
386 	if (dma_mapping_error(chan_to_ps_dev->dev, side_info_buf_dma)) {
387 		printk("%s get_side_info WARNING chan_to_ps_dev DMA mapping error\n", side_ch_compatible_str);
388 		return(-3);
389 	}
390 
391 	sg_init_table(chan_to_ps_sg, 1);
392 	sg_dma_address(&chan_to_ps_sg[0]) = side_info_buf_dma;
393 	sg_dma_len(&chan_to_ps_sg[0]) = side_info_buf_size;
394 
395 	chan_to_ps_d = chan_to_ps_dev->device_prep_slave_sg(chan_to_ps, chan_to_ps_sg, 1, DMA_DEV_TO_MEM, flags, NULL);
396 	if (!chan_to_ps_d) {
397 		printk("%s get_side_info WARNING !chan_to_ps_d\n", side_ch_compatible_str);
398 		goto err_dst_buf_with_unmap;
399 	}
400 
401 	init_completion(&chan_to_ps_cmp);
402 	chan_to_ps_d->callback = chan_to_ps_callback;
403 	chan_to_ps_d->callback_param = &chan_to_ps_cmp;
404 
405 	chan_to_ps_cookie = chan_to_ps_d->tx_submit(chan_to_ps_d);
406 	if (dma_submit_error(chan_to_ps_cookie)) {
407 		printk("%s get_side_info WARNING dma_submit_error\n", side_ch_compatible_str);
408 		goto err_dst_buf_with_unmap;
409 	}
410 
411 	SIDE_CH_REG_NUM_DMA_SYMBOL_write(num_dma_symbol); //dma from fpga will start automatically
412 
413 	dma_async_issue_pending(chan_to_ps);
414 
415 	chan_to_ps_tmo = wait_for_completion_timeout(&chan_to_ps_cmp, chan_to_ps_tmo);
416 	status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
417 	if (chan_to_ps_tmo == 0) {
418 		printk("%s get_side_info WARNING chan_to_ps_tmo == 0\n", side_ch_compatible_str);
419 		goto err_dst_buf_with_unmap;
420 	} else if (status != DMA_COMPLETE) {
421 		printk("%s get_side_info WARNING chan_to_ps status != DMA_COMPLETE\n", side_ch_compatible_str);
422 		goto err_dst_buf_with_unmap;
423 	}
424 
425 	dma_unmap_single(chan_to_ps_dev->dev, side_info_buf_dma, side_info_buf_size, DMA_DEV_TO_MEM);
426 	return(side_info_buf_size);
427 
428 err_dst_buf_with_unmap:
429 	dma_unmap_single(chan_to_ps_dev->dev, side_info_buf_dma, side_info_buf_size, DMA_DEV_TO_MEM);
430 	return(-100);
431 }
432 
433 // -----------------netlink recv and send-----------------
434 // should align with side_ch_ctl.c in user_space
435 #define ACTION_INVALID       0
436 #define ACTION_REG_WRITE     1
437 #define ACTION_REG_READ      2
438 #define ACTION_SIDE_INFO_GET 3
439 
440 #define REG_TYPE_INVALID     0
441 #define REG_TYPE_HARDWARE    1
442 #define REG_TYPE_SOFTWARE    2
443 
444 // #define NETLINK_USER 31
445 struct sock *nl_sk = NULL;
side_ch_nl_recv_msg(struct sk_buff * skb)446 static void side_ch_nl_recv_msg(struct sk_buff *skb) {
447 	struct nlmsghdr *nlh;
448 	int pid;
449 	struct sk_buff *skb_out;
450 	int msg_size;
451 	int *msg=(int*)side_info_buf;
452 	int action_flag, reg_type, reg_idx;
453 	u32 reg_val, *cmd_buf;
454 	int res;
455 
456 	// printk(KERN_INFO "Entering: %s\n", __FUNCTION__);
457 
458 	// msg_size=strlen(msg);
459 
460 	nlh=(struct nlmsghdr*)skb->data;
461 	cmd_buf = (u32*)nlmsg_data(nlh);
462 	// printk(KERN_INFO "Netlink received msg payload:%s\n",(char*)nlmsg_data(nlh));
463 	action_flag = cmd_buf[0];
464     reg_type = cmd_buf[1];
465     reg_idx = cmd_buf[2];
466     reg_val = cmd_buf[3];
467 	// printk("%s recv msg: len %d action_flag %d reg_type %d reg_idx %d reg_val %u\n", side_ch_compatible_str, nlmsg_len(nlh), action_flag, reg_type, reg_idx, reg_val);
468 
469 	pid = nlh->nlmsg_pid; /*pid of sending process */
470 
471 	if (action_flag==ACTION_SIDE_INFO_GET) {
472 		res = get_side_info(num_eq_init, iq_len_init);
473 		// printk(KERN_INFO "%s recv msg: get_side_info(%d,%d) res %d\n", side_ch_compatible_str, num_eq_init, iq_len_init, res);
474 		if (res>0) {
475 			msg_size = res;
476 			// printk("%s recv msg: %d %d %d %d %d %d %d %d\n", side_ch_compatible_str, msg[0], msg[1], msg[2], msg[3], msg[4], msg[5], msg[6], msg[7]);
477 		} else {
478 			msg_size = 4;
479 			msg[0] = -2;
480 		}
481 	} else if (action_flag==ACTION_REG_READ) {
482 		msg_size = 4;
483 		// if (reg_idx<0 || reg_idx>31) {
484 		// 	msg[0] = -3;
485 		// 	printk("%s recv msg: invalid reg_idx\n", side_ch_compatible_str);
486 		// } else {
487 			msg[0] = reg_read(reg_idx*4);
488 		// }
489 	} else if (action_flag==ACTION_REG_WRITE) {
490 		msg_size = 4;
491 		// if (reg_idx<0 || reg_idx>31) {
492 		// 	msg[0] = -4;
493 		// 	printk("%s recv msg: invalid reg_idx\n", side_ch_compatible_str);
494 		// } else {
495 			msg[0] = 0;
496 			reg_write(reg_idx*4, reg_val);
497 		// }
498 	} else {
499 		msg_size = 4;
500 		msg[0] = -1;
501 		printk("%s recv msg: invalid action_flag\n", side_ch_compatible_str);
502 	}
503 
504 	skb_out = nlmsg_new(msg_size,0);
505 	if(!skb_out)
506 	{
507 		printk(KERN_ERR "Failed to allocate new skb\n");
508 		return;
509 	}
510 	nlh=nlmsg_put(skb_out,0,0,NLMSG_DONE,msg_size,0);
511 	NETLINK_CB(skb_out).dst_group = 0; /* not in mcast group */
512 
513 	memcpy(nlmsg_data(nlh),msg,msg_size);
514 
515 	res=nlmsg_unicast(nl_sk,skb_out,pid);
516 
517 	if(res<0)
518 		printk(KERN_INFO "Error while sending bak to user\n");
519 }
520 
dev_probe(struct platform_device * pdev)521 static int dev_probe(struct platform_device *pdev) {
522 	struct netlink_kernel_cfg cfg = {
523 		.input = side_ch_nl_recv_msg,
524 	};
525 
526 	struct device_node *np = pdev->dev.of_node;
527 	struct resource *io;
528 	int err=1, i;
529 
530 	printk("\n");
531 
532 	if (np) {
533 		const struct of_device_id *match;
534 
535 		match = of_match_node(dev_of_ids, np);
536 		if (match) {
537 			printk("%s dev_probe: match!\n", side_ch_compatible_str);
538 			err = 0;
539 		}
540 	}
541 
542 	if (err)
543 		return err;
544 
545 	/* Request and map I/O memory */
546 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
547 	base_addr = devm_ioremap_resource(&pdev->dev, io);
548 	if (IS_ERR(base_addr))
549 		return PTR_ERR(base_addr);
550 
551 	printk("%s dev_probe: io start 0x%p end 0x%p name %s flags 0x%08x desc %s\n", side_ch_compatible_str, (void*)io->start, (void*)io->end, io->name, (u32)io->flags, (char*)io->desc);
552 	printk("%s dev_probe: base_addr 0x%p\n", side_ch_compatible_str, base_addr);
553 
554 	printk("%s dev_probe: succeed!\n", side_ch_compatible_str);
555 
556 	// --------------initialize netlink--------------
557 	//nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &cfg);
558 	nl_sk = netlink_kernel_create(&init_net, NETLINK_USERSOCK, &cfg);
559 	if(!nl_sk) {
560 		printk(KERN_ALERT "%s dev_probe: Error creating socket.\n", side_ch_compatible_str);
561 		return -10;
562 	}
563 
564 	//-----------------initialize fpga----------------
565 	printk("%s dev_probe: num_eq_init %d iq_len_init %d\n",side_ch_compatible_str, num_eq_init, iq_len_init);
566 
567 	// disable potential any action from side channel
568 	SIDE_CH_REG_MULTI_RST_write(4);
569 	// SIDE_CH_REG_CONFIG_write(0X6001); // match addr1 and addr2; bit12 FC; bit13 addr1; bit14 addr2
570 	SIDE_CH_REG_CONFIG_write(0x7001); // the most strict condition to prevent side channel action
571 	SIDE_CH_REG_IQ_TRIGGER_write(10); // set iq trigger to rssi, which will never happen when rssi_th is 0
572 	SIDE_CH_REG_NUM_EQ_write(num_eq_init);      // capture CSI + 8*equalizer by default
573 	if (iq_len_init>0) {//initialize the side channel into iq capture mode
574 		//Max UDP 65507 bytes; (65507/8)-1 = 8187
575 		if (iq_len_init>8187) {
576 			iq_len_init = 8187;
577 			printk("%s dev_probe: limit iq_len_init to 8187!\n",side_ch_compatible_str);
578 		}
579 		SIDE_CH_REG_IQ_CAPTURE_write(1);
580 		SIDE_CH_REG_PRE_TRIGGER_LEN_write(8190);
581 		SIDE_CH_REG_IQ_LEN_write(iq_len_init);
582 		SIDE_CH_REG_IQ_TRIGGER_write(0); // trigger is set to fcs ok/nok (both)
583 	}
584 
585 	SIDE_CH_REG_CONFIG_write(0x0001); // allow all packets by default; bit12 FC; bit13 addr1; bit14 addr2
586 
587 	//rst
588 	for (i=0;i<8;i++)
589 		SIDE_CH_REG_MULTI_RST_write(0);
590 	for (i=0;i<32;i++)
591 		SIDE_CH_REG_MULTI_RST_write(0xFFFFFFFF);
592 	for (i=0;i<8;i++)
593 		SIDE_CH_REG_MULTI_RST_write(0);
594 
595 	// chan_to_pl = dma_request_slave_channel(&(pdev->dev), "rx_dma_mm2s");
596 	// if (IS_ERR(chan_to_pl)) {
597 	// 	err = PTR_ERR(chan_to_pl);
598 	// 	pr_err("%s dev_probe: No channel to PL. %d\n",side_ch_compatible_str,err);
599 	// 	goto free_chan_to_pl;
600 	// }
601 
602 	chan_to_ps = dma_request_chan(&(pdev->dev), "tx_dma_s2mm");
603 	if (IS_ERR(chan_to_ps) || chan_to_ps==NULL) {
604 		err = PTR_ERR(chan_to_ps);
605 		if (err != -EPROBE_DEFER) {
606 			pr_err("%s dev_probe: No chan_to_ps ret %d chan_to_ps 0x%p\n",side_ch_compatible_str, err, chan_to_ps);
607 			goto free_chan_to_ps;
608 		}
609 	}
610 
611 	printk("%s dev_probe: DMA channel setup successfully. chan_to_pl 0x%p chan_to_ps 0x%p\n",side_ch_compatible_str, chan_to_pl, chan_to_ps);
612 
613 	// res = dma_loopback_test(3, 512);
614 	// printk(KERN_INFO "dma_loopback_test(3, 512) res %d\n", res);
615 
616 	err = init_side_channel();
617 	printk("%s dev_probe: init_side_channel() err %d\n",side_ch_compatible_str, err);
618 
619 	return(err);
620 
621 	// err = dma_loopback_test(7, 512);
622 	// if (err == 0)
623 	// 	return(err);
624 	// else
625 	// 	dma_release_channel(chan_to_ps);
626 
627 free_chan_to_ps:
628 	err = -2;
629 	dma_release_channel(chan_to_ps);
630 	return err;
631 
632 // free_chan_to_pl:
633 // 	err = -1;
634 // 	dma_release_channel(chan_to_pl);
635 // 	return err;
636 }
637 
dev_remove(struct platform_device * pdev)638 static int dev_remove(struct platform_device *pdev)
639 {
640 	printk("\n");
641 
642 	printk("%s dev_remove: release nl_sk\n", side_ch_compatible_str);
643 	netlink_kernel_release(nl_sk);
644 
645 	pr_info("%s dev_remove: dropped chan_to_pl 0x%p\n", side_ch_compatible_str, chan_to_pl);
646 	if (chan_to_pl != NULL) {
647 		pr_info("%s dev_remove: dropped channel %s\n", side_ch_compatible_str, dma_chan_name(chan_to_pl));
648 		// dmaengine_terminate_all(chan_to_pl); //this also terminate sdr.ko. do not use
649 		dma_release_channel(chan_to_pl);
650 	}
651 
652 	pr_info("%s dev_remove: dropped chan_to_ps 0x%p\n", side_ch_compatible_str, chan_to_ps);
653 	if (chan_to_pl != NULL) {
654 		pr_info("%s dev_remove: dropped channel %s\n", side_ch_compatible_str, dma_chan_name(chan_to_ps));
655 		// dmaengine_terminate_all(chan_to_ps); //this also terminate sdr.ko. do not use
656 		dma_release_channel(chan_to_ps);
657 	}
658 
659 	if (side_info_buf != NULL)
660 		kfree(side_info_buf);
661 
662 	printk("%s dev_remove: base_addr 0x%p\n", side_ch_compatible_str, base_addr);
663 	printk("%s dev_remove: succeed!\n", side_ch_compatible_str);
664 	return 0;
665 }
666 
667 static struct platform_driver dev_driver = {
668 	.driver = {
669 		.name = "sdr,side_ch",
670 		.owner = THIS_MODULE,
671 		.of_match_table = dev_of_ids,
672 	},
673 	.probe = dev_probe,
674 	.remove = dev_remove,
675 };
676 
677 module_platform_driver(dev_driver);
678 
679 MODULE_AUTHOR("Xianjun Jiao");
680 MODULE_DESCRIPTION("sdr,side_ch");
681 MODULE_LICENSE("GPL v2");
682