xref: /openwifi/driver/side_ch/side_ch.c (revision a6085186d94dfe08b0e09c18c8d4b1b4fe38ea35)
1 /*
2  * openwifi side channel driver
3  * SPDX-FileCopyrightText: 2019 Jiao Xianjun <[email protected]>
4  * SPDX-License-Identifier: AGPL-3.0-or-later
5 */
6 
7 #include <linux/bitops.h>
8 #include <linux/dmapool.h>
9 #include <linux/dma/xilinx_dma.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/of_dma.h>
17 #include <linux/of_platform.h>
18 #include <linux/of_irq.h>
19 #include <linux/slab.h>
20 #include <linux/clk.h>
21 #include <linux/io-64-nonatomic-lo-hi.h>
22 #include <linux/delay.h>
23 #include <linux/dmaengine.h>
24 
25 #include <net/sock.h>
26 #include <linux/netlink.h>
27 #include <linux/skbuff.h>
28 
29 #include "side_ch.h"
30 
31 static int num_eq_init = 8; // should be 0~8
32 static int iq_len_init = 0; //if iq_len>0, iq capture enabled, csi disabled
33 
34 module_param(num_eq_init, int, 0);
35 MODULE_PARM_DESC(num_eq_init, "num_eq_init. 0~8. number of equalizer output (52 each) appended to CSI");
36 
37 module_param(iq_len_init, int, 0);
38 MODULE_PARM_DESC(iq_len_init, "iq_len_init. if iq_len_init>0, iq capture enabled, csi disabled");
39 
40 static void __iomem *base_addr; // to store driver specific base address needed for mmu to translate virtual address to physical address in our FPGA design
41 
42 struct dma_chan *chan_to_pl = NULL;
43 struct dma_chan *chan_to_ps = NULL;
44 u8 *side_info_buf = NULL;
45 dma_cookie_t chan_to_ps_cookie;
46 const int max_side_info_buf_size = MAX_NUM_DMA_SYMBOL*8;
47 
48 /* IO accessors */
49 static inline u32 reg_read(u32 reg)
50 {
51 	return ioread32(base_addr + reg);
52 }
53 
54 static inline void reg_write(u32 reg, u32 value)
55 {
56 	iowrite32(value, base_addr + reg);
57 }
58 
59 static inline void SIDE_CH_REG_MULTI_RST_write(u32 Data) {
60 	reg_write(SIDE_CH_REG_MULTI_RST_ADDR, Data);
61 }
62 
63 static inline u32 SIDE_CH_REG_CONFIG_read(void){
64 	return reg_read(SIDE_CH_REG_CONFIG_ADDR);
65 }
66 
67 static inline void SIDE_CH_REG_CONFIG_write(u32 value){
68 	reg_write(SIDE_CH_REG_CONFIG_ADDR, value);
69 }
70 
71 static inline u32 SIDE_CH_REG_NUM_DMA_SYMBOL_read(void){
72 	return reg_read(SIDE_CH_REG_NUM_DMA_SYMBOL_ADDR);
73 }
74 
75 static inline void SIDE_CH_REG_NUM_DMA_SYMBOL_write(u32 value){
76 	reg_write(SIDE_CH_REG_NUM_DMA_SYMBOL_ADDR, value);
77 }
78 
79 static inline u32 SIDE_CH_REG_IQ_CAPTURE_read(void){
80 	return reg_read(SIDE_CH_REG_IQ_CAPTURE_ADDR);
81 }
82 
83 static inline void SIDE_CH_REG_IQ_CAPTURE_write(u32 value){
84 	reg_write(SIDE_CH_REG_IQ_CAPTURE_ADDR, value);
85 }
86 
87 static inline u32 SIDE_CH_REG_NUM_EQ_read(void){
88 	return reg_read(SIDE_CH_REG_NUM_EQ_ADDR);
89 }
90 
91 static inline void SIDE_CH_REG_NUM_EQ_write(u32 value){
92 	reg_write(SIDE_CH_REG_NUM_EQ_ADDR, value);
93 }
94 
95 static inline u32 SIDE_CH_REG_FC_TARGET_read(void){
96 	return reg_read(SIDE_CH_REG_FC_TARGET_ADDR);
97 }
98 
99 static inline void SIDE_CH_REG_FC_TARGET_write(u32 value){
100 	reg_write(SIDE_CH_REG_FC_TARGET_ADDR, value);
101 }
102 
103 static inline u32 SIDE_CH_REG_ADDR1_TARGET_read(void){
104 	return reg_read(SIDE_CH_REG_ADDR1_TARGET_ADDR);
105 }
106 
107 static inline void SIDE_CH_REG_ADDR1_TARGET_write(u32 value){
108 	reg_write(SIDE_CH_REG_ADDR1_TARGET_ADDR, value);
109 }
110 
111 static inline u32 SIDE_CH_REG_ADDR2_TARGET_read(void){
112 	return reg_read(SIDE_CH_REG_ADDR2_TARGET_ADDR);
113 }
114 
115 static inline void SIDE_CH_REG_ADDR2_TARGET_write(u32 value){
116 	reg_write(SIDE_CH_REG_ADDR2_TARGET_ADDR, value);
117 }
118 
119 static inline u32 SIDE_CH_REG_IQ_TRIGGER_read(void){
120 	return reg_read(SIDE_CH_REG_IQ_TRIGGER_ADDR);
121 }
122 
123 static inline void SIDE_CH_REG_IQ_TRIGGER_write(u32 value){
124 	reg_write(SIDE_CH_REG_IQ_TRIGGER_ADDR, value);
125 }
126 
127 static inline u32 SIDE_CH_REG_RSSI_TH_read(void){
128 	return reg_read(SIDE_CH_REG_RSSI_TH_ADDR);
129 }
130 
131 static inline void SIDE_CH_REG_RSSI_TH_write(u32 value){
132 	reg_write(SIDE_CH_REG_RSSI_TH_ADDR, value);
133 }
134 
135 static inline u32 SIDE_CH_REG_GAIN_TH_read(void){
136 	return reg_read(SIDE_CH_REG_GAIN_TH_ADDR);
137 }
138 
139 static inline void SIDE_CH_REG_GAIN_TH_write(u32 value){
140 	reg_write(SIDE_CH_REG_GAIN_TH_ADDR, value);
141 }
142 
143 static inline u32 SIDE_CH_REG_PRE_TRIGGER_LEN_read(void){
144 	return reg_read(SIDE_CH_REG_PRE_TRIGGER_LEN_ADDR);
145 }
146 
147 static inline void SIDE_CH_REG_PRE_TRIGGER_LEN_write(u32 value){
148 	reg_write(SIDE_CH_REG_PRE_TRIGGER_LEN_ADDR, value);
149 }
150 
151 static inline u32 SIDE_CH_REG_IQ_LEN_read(void){
152 	return reg_read(SIDE_CH_REG_IQ_LEN_ADDR);
153 }
154 
155 static inline void SIDE_CH_REG_IQ_LEN_write(u32 value){
156 	reg_write(SIDE_CH_REG_IQ_LEN_ADDR, value);
157 }
158 
159 static inline u32 SIDE_CH_REG_M_AXIS_DATA_COUNT_read(void){
160 	return reg_read(SIDE_CH_REG_M_AXIS_DATA_COUNT_ADDR);
161 }
162 
163 static inline void SIDE_CH_REG_M_AXIS_DATA_COUNT_write(u32 value){
164 	reg_write(SIDE_CH_REG_M_AXIS_DATA_COUNT_ADDR, value);
165 }
166 
167 static const struct of_device_id dev_of_ids[] = {
168 	{ .compatible = "sdr,side_ch", },
169 	{}
170 };
171 MODULE_DEVICE_TABLE(of, dev_of_ids);
172 
173 static void chan_to_ps_callback(void *completion)
174 {
175 	complete(completion);
176 }
177 
178 #if 0
179 static void chan_to_pl_callback(void *completion)
180 {
181 	complete(completion);
182 }
183 
184 static int dma_loopback_test(int num_test, int num_dma_symbol) {
185 	int i, err = 0;
186 
187 	// -----------dma loop back test-------------------------
188 	enum dma_status status;
189 	enum dma_ctrl_flags flags;
190 	u8 *src_buf, *dst_buf;
191 	// int num_dma_symbol = 16;
192 	int test_buf_size = num_dma_symbol*8;
193 	dma_addr_t src_buf_dma;
194 	dma_addr_t dst_buf_dma;
195 	struct dma_device *chan_to_pl_dev = chan_to_pl->device;
196 	struct dma_device *chan_to_ps_dev = chan_to_ps->device;
197 	struct scatterlist chan_to_pl_sg[1];
198 	struct scatterlist chan_to_ps_sg[1];
199 	dma_cookie_t chan_to_pl_cookie;
200 	dma_cookie_t chan_to_ps_cookie;
201 	struct completion chan_to_pl_cmp;
202 	struct completion chan_to_ps_cmp;
203 	struct dma_async_tx_descriptor *chan_to_pl_d = NULL;
204 	struct dma_async_tx_descriptor *chan_to_ps_d = NULL;
205 	unsigned long chan_to_ps_tmo =	msecs_to_jiffies(300000);
206 	unsigned long chan_to_pl_tmo =  msecs_to_jiffies(30000);
207 	int test_idx;
208 
209 	for (test_idx=0; test_idx<num_test; test_idx++) {
210 		printk("%s test_idx %d\n", side_ch_compatible_str, test_idx);
211 		//set number of dma symbols expected to pl and ps
212 		SIDE_CH_REG_NUM_DMA_SYMBOL_write((num_dma_symbol<<16)|num_dma_symbol);
213 
214 		src_buf = kmalloc(test_buf_size, GFP_KERNEL);
215 		if (!src_buf)
216 			goto err_src_buf;
217 
218 		dst_buf = kmalloc(test_buf_size, GFP_KERNEL);
219 		if (!dst_buf)
220 			goto err_dst_buf;
221 
222 		// test buf init
223 		for (i=0; i<test_buf_size; i++) {
224 			src_buf[i] = (test_idx+test_buf_size-i-1);
225 			dst_buf[i] = 0;
226 		}
227 
228 		set_user_nice(current, 10);
229 		flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
230 
231 		src_buf_dma = dma_map_single(chan_to_pl_dev->dev, src_buf, test_buf_size, DMA_MEM_TO_DEV);
232 		if (dma_mapping_error(chan_to_pl_dev->dev, src_buf_dma)) {
233 			printk("%s dma_loopback_test WARNING chan_to_pl_dev DMA mapping error\n", side_ch_compatible_str);
234 			goto err_src_buf_dma_mapping;
235 		}
236 
237 		dst_buf_dma = dma_map_single(chan_to_ps_dev->dev, dst_buf, test_buf_size, DMA_DEV_TO_MEM);
238 		if (dma_mapping_error(chan_to_ps_dev->dev, dst_buf_dma)) {
239 			printk("%s dma_loopback_test WARNING chan_to_ps_dev DMA mapping error\n", side_ch_compatible_str);
240 			goto err_dst_buf_dma_mapping;
241 		}
242 
243 		sg_init_table(chan_to_ps_sg, 1);
244 		sg_init_table(chan_to_pl_sg, 1);
245 
246 		sg_dma_address(&chan_to_ps_sg[0]) = dst_buf_dma;
247 		sg_dma_address(&chan_to_pl_sg[0]) = src_buf_dma;
248 
249 		sg_dma_len(&chan_to_ps_sg[0]) = test_buf_size;
250 		sg_dma_len(&chan_to_pl_sg[0]) = test_buf_size;
251 
252 		chan_to_ps_d = chan_to_ps_dev->device_prep_slave_sg(chan_to_ps, chan_to_ps_sg, 1, DMA_DEV_TO_MEM, flags, NULL);
253 		chan_to_pl_d = chan_to_pl_dev->device_prep_slave_sg(chan_to_pl, chan_to_pl_sg, 1, DMA_MEM_TO_DEV, flags, NULL);
254 
255 		if (!chan_to_ps_d || !chan_to_pl_d) {
256 			printk("%s dma_loopback_test WARNING !chan_to_ps_d || !chan_to_pl_d\n", side_ch_compatible_str);
257 			goto err_dst_buf_with_unmap;
258 		}
259 
260 		init_completion(&chan_to_pl_cmp);
261 		chan_to_pl_d->callback = chan_to_pl_callback;
262 		chan_to_pl_d->callback_param = &chan_to_pl_cmp;
263 		chan_to_pl_cookie = chan_to_pl_d->tx_submit(chan_to_pl_d);
264 
265 		init_completion(&chan_to_ps_cmp);
266 		chan_to_ps_d->callback = chan_to_ps_callback;
267 		chan_to_ps_d->callback_param = &chan_to_ps_cmp;
268 		chan_to_ps_cookie = chan_to_ps_d->tx_submit(chan_to_ps_d);
269 
270 		if (dma_submit_error(chan_to_pl_cookie) ||	dma_submit_error(chan_to_ps_cookie)) {
271 			printk("%s dma_loopback_test WARNING dma_submit_error\n", side_ch_compatible_str);
272 			goto err_dst_buf_with_unmap;
273 		}
274 
275 		dma_async_issue_pending(chan_to_pl);
276 		dma_async_issue_pending(chan_to_ps);
277 
278 		chan_to_pl_tmo = wait_for_completion_timeout(&chan_to_pl_cmp, chan_to_pl_tmo);
279 
280 		status = dma_async_is_tx_complete(chan_to_pl, chan_to_pl_cookie, NULL, NULL);
281 		if (chan_to_pl_tmo == 0) {
282 			printk("%s dma_loopback_test chan_to_pl_tmo == 0\n", side_ch_compatible_str);
283 			goto err_dst_buf_with_unmap;
284 		} else if (status != DMA_COMPLETE) {
285 			printk("%s dma_loopback_test chan_to_pl status != DMA_COMPLETE\n", side_ch_compatible_str);
286 			goto err_dst_buf_with_unmap;
287 		}
288 
289 		chan_to_ps_tmo = wait_for_completion_timeout(&chan_to_ps_cmp, chan_to_ps_tmo);
290 		status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
291 		if (chan_to_ps_tmo == 0) {
292 			printk("%s dma_loopback_test chan_to_ps_tmo == 0\n", side_ch_compatible_str);
293 			goto err_dst_buf_with_unmap;
294 		} else if (status != DMA_COMPLETE) {
295 			printk("%s dma_loopback_test chan_to_ps status != DMA_COMPLETE\n", side_ch_compatible_str);
296 			goto err_dst_buf_with_unmap;
297 		}
298 
299 		dma_unmap_single(chan_to_pl_dev->dev, src_buf_dma, test_buf_size, DMA_MEM_TO_DEV);
300 		dma_unmap_single(chan_to_ps_dev->dev, dst_buf_dma, test_buf_size, DMA_DEV_TO_MEM);
301 
302 		// test buf verification
303 		for (i=0; i<test_buf_size; i++) {
304 			//printk("%d ", dst_buf[i]);
305 			if ( dst_buf[i] != ((test_idx+test_buf_size-i-1)%256) )
306 				break;
307 		}
308 		printk("\n");
309 		printk("%s dma_loopback_test buf verification end idx %d (test_buf_size %d)\n", side_ch_compatible_str, i, test_buf_size);
310 
311 		kfree(src_buf);
312 		kfree(dst_buf);
313 	}
314 
315 	printk("%s dma_loopback_test err %d\n", side_ch_compatible_str, err);
316 	return(err);
317 
318 err_dst_buf_with_unmap:
319 	dma_unmap_single(chan_to_ps_dev->dev, dst_buf_dma, test_buf_size, DMA_DEV_TO_MEM);
320 
321 err_dst_buf_dma_mapping:
322 	dma_unmap_single(chan_to_pl_dev->dev, src_buf_dma, test_buf_size, DMA_MEM_TO_DEV);
323 
324 err_src_buf_dma_mapping:
325 
326 err_dst_buf:
327 	err = -4;
328 	kfree((void*)dst_buf);
329 
330 err_src_buf:
331 	err = -3;
332 	kfree(src_buf);
333 
334 	return(err);
335 }
336 #endif
337 
338 static int init_side_channel(void) {
339 	side_info_buf = kmalloc(max_side_info_buf_size, GFP_KERNEL);
340 	if (!side_info_buf)
341 		return(-1);
342 
343 	return(0);
344 }
345 
346 static int get_side_info(int num_eq, int iq_len) {
347 	// int err = 0;//, i;
348 	struct scatterlist chan_to_ps_sg[1];
349 	enum dma_status status;
350 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
351 	int num_dma_symbol, num_dma_symbol_per_trans, side_info_buf_size;
352 	dma_addr_t side_info_buf_dma;
353 	struct dma_device *chan_to_ps_dev = chan_to_ps->device;
354 	struct completion chan_to_ps_cmp;
355 	struct dma_async_tx_descriptor *chan_to_ps_d = NULL;
356 	unsigned long chan_to_ps_tmo =	msecs_to_jiffies(100);
357 
358 	if (side_info_buf==NULL) {
359 		printk("%s get_side_info WARNING side_info_buf==NULL\n", side_ch_compatible_str);
360 		return(-1);
361 	}
362 
363 	status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
364 	if (status!=DMA_COMPLETE) {
365 		printk("%s get_side_info WARNING status!=DMA_COMPLETE\n", side_ch_compatible_str);
366 		return(-1);
367 	}
368 
369 	set_user_nice(current, 10);
370 
371 	if (iq_len>0)
372 		num_dma_symbol_per_trans = 1+iq_len;
373 	else
374 		num_dma_symbol_per_trans = HEADER_LEN + CSI_LEN + num_eq*EQUALIZER_LEN;
375 	//set number of dma symbols expected to ps
376 	num_dma_symbol = SIDE_CH_REG_M_AXIS_DATA_COUNT_read();
377 	printk("%s get_side_info m axis data count %d per trans %d\n", side_ch_compatible_str, num_dma_symbol, num_dma_symbol_per_trans);
378 	num_dma_symbol = num_dma_symbol_per_trans*(num_dma_symbol/num_dma_symbol_per_trans);
379 	printk("%s get_side_info actual num dma symbol %d\n", side_ch_compatible_str, num_dma_symbol);
380 	if (num_dma_symbol == 0)
381 		return(-2);
382 
383 	side_info_buf_size = num_dma_symbol*8;
384 	side_info_buf_dma = dma_map_single(chan_to_ps_dev->dev, side_info_buf, side_info_buf_size, DMA_DEV_TO_MEM);
385 	if (dma_mapping_error(chan_to_ps_dev->dev, side_info_buf_dma)) {
386 		printk("%s get_side_info WARNING chan_to_ps_dev DMA mapping error\n", side_ch_compatible_str);
387 		return(-3);
388 	}
389 
390 	sg_init_table(chan_to_ps_sg, 1);
391 	sg_dma_address(&chan_to_ps_sg[0]) = side_info_buf_dma;
392 	sg_dma_len(&chan_to_ps_sg[0]) = side_info_buf_size;
393 
394 	chan_to_ps_d = chan_to_ps_dev->device_prep_slave_sg(chan_to_ps, chan_to_ps_sg, 1, DMA_DEV_TO_MEM, flags, NULL);
395 	if (!chan_to_ps_d) {
396 		printk("%s get_side_info WARNING !chan_to_ps_d\n", side_ch_compatible_str);
397 		goto err_dst_buf_with_unmap;
398 	}
399 
400 	init_completion(&chan_to_ps_cmp);
401 	chan_to_ps_d->callback = chan_to_ps_callback;
402 	chan_to_ps_d->callback_param = &chan_to_ps_cmp;
403 
404 	chan_to_ps_cookie = chan_to_ps_d->tx_submit(chan_to_ps_d);
405 	if (dma_submit_error(chan_to_ps_cookie)) {
406 		printk("%s get_side_info WARNING dma_submit_error\n", side_ch_compatible_str);
407 		goto err_dst_buf_with_unmap;
408 	}
409 
410 	SIDE_CH_REG_NUM_DMA_SYMBOL_write(num_dma_symbol); //dma from fpga will start automatically
411 
412 	dma_async_issue_pending(chan_to_ps);
413 
414 	chan_to_ps_tmo = wait_for_completion_timeout(&chan_to_ps_cmp, chan_to_ps_tmo);
415 	status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
416 	if (chan_to_ps_tmo == 0) {
417 		printk("%s get_side_info WARNING chan_to_ps_tmo == 0\n", side_ch_compatible_str);
418 		goto err_dst_buf_with_unmap;
419 	} else if (status != DMA_COMPLETE) {
420 		printk("%s get_side_info WARNING chan_to_ps status != DMA_COMPLETE\n", side_ch_compatible_str);
421 		goto err_dst_buf_with_unmap;
422 	}
423 
424 	dma_unmap_single(chan_to_ps_dev->dev, side_info_buf_dma, side_info_buf_size, DMA_DEV_TO_MEM);
425 	return(side_info_buf_size);
426 
427 err_dst_buf_with_unmap:
428 	dma_unmap_single(chan_to_ps_dev->dev, side_info_buf_dma, side_info_buf_size, DMA_DEV_TO_MEM);
429 	return(-100);
430 }
431 
432 // -----------------netlink recv and send-----------------
433 // should align with side_ch_ctl.c in user_space
434 #define ACTION_INVALID       0
435 #define ACTION_REG_WRITE     1
436 #define ACTION_REG_READ      2
437 #define ACTION_SIDE_INFO_GET 3
438 
439 #define REG_TYPE_INVALID     0
440 #define REG_TYPE_HARDWARE    1
441 #define REG_TYPE_SOFTWARE    2
442 
443 // #define NETLINK_USER 31
444 struct sock *nl_sk = NULL;
445 static void side_ch_nl_recv_msg(struct sk_buff *skb) {
446 	struct nlmsghdr *nlh;
447 	int pid;
448 	struct sk_buff *skb_out;
449 	int msg_size;
450 	int *msg=(int*)side_info_buf;
451 	int action_flag, reg_type, reg_idx;
452 	u32 reg_val, *cmd_buf;
453 	int res;
454 
455 	// printk(KERN_INFO "Entering: %s\n", __FUNCTION__);
456 
457 	// msg_size=strlen(msg);
458 
459 	nlh=(struct nlmsghdr*)skb->data;
460 	cmd_buf = (u32*)nlmsg_data(nlh);
461 	// printk(KERN_INFO "Netlink received msg payload:%s\n",(char*)nlmsg_data(nlh));
462 	action_flag = cmd_buf[0];
463     reg_type = cmd_buf[1];
464     reg_idx = cmd_buf[2];
465     reg_val = cmd_buf[3];
466 	printk("%s recv msg: len %d action_flag %d reg_type %d reg_idx %d reg_val %u\n", side_ch_compatible_str, nlmsg_len(nlh), action_flag, reg_type, reg_idx, reg_val);
467 
468 	pid = nlh->nlmsg_pid; /*pid of sending process */
469 
470 	if (action_flag==ACTION_SIDE_INFO_GET) {
471 		res = get_side_info(num_eq_init, iq_len_init);
472 		printk(KERN_INFO "%s recv msg: get_side_info(%d,%d) res %d\n", side_ch_compatible_str, num_eq_init, iq_len_init, res);
473 		if (res>0) {
474 			msg_size = res;
475 			// printk("%s recv msg: %d %d %d %d %d %d %d %d\n", side_ch_compatible_str, msg[0], msg[1], msg[2], msg[3], msg[4], msg[5], msg[6], msg[7]);
476 		} else {
477 			msg_size = 4;
478 			msg[0] = -2;
479 		}
480 	} else if (action_flag==ACTION_REG_READ) {
481 		msg_size = 4;
482 		// if (reg_idx<0 || reg_idx>31) {
483 		// 	msg[0] = -3;
484 		// 	printk("%s recv msg: invalid reg_idx\n", side_ch_compatible_str);
485 		// } else {
486 			msg[0] = reg_read(reg_idx*4);
487 		// }
488 	} else if (action_flag==ACTION_REG_WRITE) {
489 		msg_size = 4;
490 		// if (reg_idx<0 || reg_idx>31) {
491 		// 	msg[0] = -4;
492 		// 	printk("%s recv msg: invalid reg_idx\n", side_ch_compatible_str);
493 		// } else {
494 			msg[0] = 0;
495 			reg_write(reg_idx*4, reg_val);
496 		// }
497 	} else {
498 		msg_size = 4;
499 		msg[0] = -1;
500 		printk("%s recv msg: invalid action_flag\n", side_ch_compatible_str);
501 	}
502 
503 	skb_out = nlmsg_new(msg_size,0);
504 	if(!skb_out)
505 	{
506 		printk(KERN_ERR "Failed to allocate new skb\n");
507 		return;
508 	}
509 	nlh=nlmsg_put(skb_out,0,0,NLMSG_DONE,msg_size,0);
510 	NETLINK_CB(skb_out).dst_group = 0; /* not in mcast group */
511 
512 	memcpy(nlmsg_data(nlh),msg,msg_size);
513 
514 	res=nlmsg_unicast(nl_sk,skb_out,pid);
515 
516 	if(res<0)
517 		printk(KERN_INFO "Error while sending bak to user\n");
518 }
519 
520 static int dev_probe(struct platform_device *pdev) {
521 	struct netlink_kernel_cfg cfg = {
522 		.input = side_ch_nl_recv_msg,
523 	};
524 
525 	struct device_node *np = pdev->dev.of_node;
526 	struct resource *io;
527 	int err=1, i;
528 
529 	printk("\n");
530 
531 	if (np) {
532 		const struct of_device_id *match;
533 
534 		match = of_match_node(dev_of_ids, np);
535 		if (match) {
536 			printk("%s dev_probe: match!\n", side_ch_compatible_str);
537 			err = 0;
538 		}
539 	}
540 
541 	if (err)
542 		return err;
543 
544 	/* Request and map I/O memory */
545 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
546 	base_addr = devm_ioremap_resource(&pdev->dev, io);
547 	if (IS_ERR(base_addr))
548 		return PTR_ERR(base_addr);
549 
550 	printk("%s dev_probe: io start 0x%p end 0x%p name %s flags 0x%08x desc %s\n", side_ch_compatible_str, (void*)io->start, (void*)io->end, io->name, (u32)io->flags, (char*)io->desc);
551 	printk("%s dev_probe: base_addr 0x%p\n", side_ch_compatible_str, base_addr);
552 
553 	printk("%s dev_probe: succeed!\n", side_ch_compatible_str);
554 
555 	// --------------initialize netlink--------------
556 	//nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &cfg);
557 	nl_sk = netlink_kernel_create(&init_net, NETLINK_USERSOCK, &cfg);
558 	if(!nl_sk) {
559 		printk(KERN_ALERT "%s dev_probe: Error creating socket.\n", side_ch_compatible_str);
560 		return -10;
561 	}
562 
563 	//-----------------initialize fpga----------------
564 	printk("%s dev_probe: num_eq_init %d iq_len_init %d\n",side_ch_compatible_str, num_eq_init, iq_len_init);
565 
566 	// disable potential any action from side channel
567 	SIDE_CH_REG_MULTI_RST_write(4);
568 	// SIDE_CH_REG_CONFIG_write(0X6001); // match addr1 and addr2; bit12 FC; bit13 addr1; bit14 addr2
569 	SIDE_CH_REG_CONFIG_write(0x7001); // the most strict condition to prevent side channel action
570 	SIDE_CH_REG_IQ_TRIGGER_write(10); // set iq trigger to rssi, which will never happen when rssi_th is 0
571 	SIDE_CH_REG_NUM_EQ_write(num_eq_init);      // capture CSI + 8*equalizer by default
572 	if (iq_len_init>0) {//initialize the side channel into iq capture mode
573 		//Max UDP 65507 bytes; (65507/8)-1 = 8187
574 		if (iq_len_init>8187) {
575 			iq_len_init = 8187;
576 			printk("%s dev_probe: limit iq_len_init to 8187!\n",side_ch_compatible_str);
577 		}
578 		SIDE_CH_REG_IQ_CAPTURE_write(1);
579 		SIDE_CH_REG_PRE_TRIGGER_LEN_write(8190);
580 		SIDE_CH_REG_IQ_LEN_write(iq_len_init);
581 		SIDE_CH_REG_IQ_TRIGGER_write(0); // trigger is set to fcs ok/nok (both)
582 	}
583 
584 	SIDE_CH_REG_CONFIG_write(0x0001); // allow all packets by default; bit12 FC; bit13 addr1; bit14 addr2
585 
586 	//rst
587 	for (i=0;i<8;i++)
588 		SIDE_CH_REG_MULTI_RST_write(0);
589 	for (i=0;i<32;i++)
590 		SIDE_CH_REG_MULTI_RST_write(0xFFFFFFFF);
591 	for (i=0;i<8;i++)
592 		SIDE_CH_REG_MULTI_RST_write(0);
593 
594 	// chan_to_pl = dma_request_slave_channel(&(pdev->dev), "rx_dma_mm2s");
595 	// if (IS_ERR(chan_to_pl)) {
596 	// 	err = PTR_ERR(chan_to_pl);
597 	// 	pr_err("%s dev_probe: No channel to PL. %d\n",side_ch_compatible_str,err);
598 	// 	goto free_chan_to_pl;
599 	// }
600 
601 	chan_to_ps = dma_request_slave_channel(&(pdev->dev), "tx_dma_s2mm");
602 	if (IS_ERR(chan_to_ps)) {
603 		err = PTR_ERR(chan_to_ps);
604 		pr_err("%s dev_probe: No channel to PS. %d\n",side_ch_compatible_str,err);
605 		goto free_chan_to_ps;
606 	}
607 
608 	printk("%s dev_probe: DMA channel setup successfully. chan_to_pl 0x%p chan_to_ps 0x%p\n",side_ch_compatible_str, chan_to_pl, chan_to_ps);
609 
610 	// res = dma_loopback_test(3, 512);
611 	// printk(KERN_INFO "dma_loopback_test(3, 512) res %d\n", res);
612 
613 	err = init_side_channel();
614 	printk("%s dev_probe: init_side_channel() err %d\n",side_ch_compatible_str, err);
615 
616 	return(err);
617 
618 	// err = dma_loopback_test(7, 512);
619 	// if (err == 0)
620 	// 	return(err);
621 	// else
622 	// 	dma_release_channel(chan_to_ps);
623 
624 free_chan_to_ps:
625 	err = -2;
626 	dma_release_channel(chan_to_ps);
627 	return err;
628 
629 // free_chan_to_pl:
630 // 	err = -1;
631 // 	dma_release_channel(chan_to_pl);
632 // 	return err;
633 }
634 
635 static int dev_remove(struct platform_device *pdev)
636 {
637 	printk("\n");
638 
639 	printk("%s dev_remove: release nl_sk\n", side_ch_compatible_str);
640 	netlink_kernel_release(nl_sk);
641 
642 	pr_info("%s dev_remove: dropped chan_to_pl 0x%p\n", side_ch_compatible_str, chan_to_pl);
643 	if (chan_to_pl != NULL) {
644 		pr_info("%s dev_remove: dropped channel %s\n", side_ch_compatible_str, dma_chan_name(chan_to_pl));
645 		// dmaengine_terminate_all(chan_to_pl); //this also terminate sdr.ko. do not use
646 		dma_release_channel(chan_to_pl);
647 	}
648 
649 	pr_info("%s dev_remove: dropped chan_to_ps 0x%p\n", side_ch_compatible_str, chan_to_ps);
650 	if (chan_to_pl != NULL) {
651 		pr_info("%s dev_remove: dropped channel %s\n", side_ch_compatible_str, dma_chan_name(chan_to_ps));
652 		// dmaengine_terminate_all(chan_to_ps); //this also terminate sdr.ko. do not use
653 		dma_release_channel(chan_to_ps);
654 	}
655 
656 	if (side_info_buf != NULL)
657 		kfree(side_info_buf);
658 
659 	printk("%s dev_remove: base_addr 0x%p\n", side_ch_compatible_str, base_addr);
660 	printk("%s dev_remove: succeed!\n", side_ch_compatible_str);
661 	return 0;
662 }
663 
664 static struct platform_driver dev_driver = {
665 	.driver = {
666 		.name = "sdr,side_ch",
667 		.owner = THIS_MODULE,
668 		.of_match_table = dev_of_ids,
669 	},
670 	.probe = dev_probe,
671 	.remove = dev_remove,
672 };
673 
674 module_platform_driver(dev_driver);
675 
676 MODULE_AUTHOR("Xianjun Jiao");
677 MODULE_DESCRIPTION("sdr,side_ch");
678 MODULE_LICENSE("GPL v2");
679