xref: /openwifi/driver/side_ch/side_ch.c (revision 855b59fd6a93401097234a1948a6fe61b591cced)
1 /*
2  * openwifi side channel driver
3  * Xianjun jiao. [email protected]; [email protected]
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/dmapool.h>
8 #include <linux/dma/xilinx_dma.h>
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/module.h>
14 #include <linux/of_address.h>
15 #include <linux/of_dma.h>
16 #include <linux/of_platform.h>
17 #include <linux/of_irq.h>
18 #include <linux/slab.h>
19 #include <linux/clk.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 
24 #include <net/sock.h>
25 #include <linux/netlink.h>
26 #include <linux/skbuff.h>
27 
28 #include "side_ch.h"
29 
30 static int num_eq_init = 8; // should be 0~8
31 static int iq_len_init = 0; //if iq_len>0, iq capture enabled, csi disabled
32 
33 module_param(num_eq_init, int, 0);
34 MODULE_PARM_DESC(num_eq_init, "num_eq_init. 0~8. number of equalizer output (52 each) appended to CSI");
35 
36 module_param(iq_len_init, int, 0);
37 MODULE_PARM_DESC(iq_len_init, "iq_len_init. if iq_len_init>0, iq capture enabled, csi disabled");
38 
39 static void __iomem *base_addr; // to store driver specific base address needed for mmu to translate virtual address to physical address in our FPGA design
40 
41 struct dma_chan *chan_to_pl = NULL;
42 struct dma_chan *chan_to_ps = NULL;
43 u8 *side_info_buf = NULL;
44 dma_cookie_t chan_to_ps_cookie;
45 const int max_side_info_buf_size = MAX_NUM_DMA_SYMBOL*8;
46 
47 /* IO accessors */
48 static inline u32 reg_read(u32 reg)
49 {
50 	return ioread32(base_addr + reg);
51 }
52 
53 static inline void reg_write(u32 reg, u32 value)
54 {
55 	iowrite32(value, base_addr + reg);
56 }
57 
58 static inline void SIDE_CH_REG_MULTI_RST_write(u32 Data) {
59 	reg_write(SIDE_CH_REG_MULTI_RST_ADDR, Data);
60 }
61 
62 static inline u32 SIDE_CH_REG_CONFIG_read(void){
63 	return reg_read(SIDE_CH_REG_CONFIG_ADDR);
64 }
65 
66 static inline void SIDE_CH_REG_CONFIG_write(u32 value){
67 	reg_write(SIDE_CH_REG_CONFIG_ADDR, value);
68 }
69 
70 static inline u32 SIDE_CH_REG_NUM_DMA_SYMBOL_read(void){
71 	return reg_read(SIDE_CH_REG_NUM_DMA_SYMBOL_ADDR);
72 }
73 
74 static inline void SIDE_CH_REG_NUM_DMA_SYMBOL_write(u32 value){
75 	reg_write(SIDE_CH_REG_NUM_DMA_SYMBOL_ADDR, value);
76 }
77 
78 static inline u32 SIDE_CH_REG_IQ_CAPTURE_read(void){
79 	return reg_read(SIDE_CH_REG_IQ_CAPTURE_ADDR);
80 }
81 
82 static inline void SIDE_CH_REG_IQ_CAPTURE_write(u32 value){
83 	reg_write(SIDE_CH_REG_IQ_CAPTURE_ADDR, value);
84 }
85 
86 static inline u32 SIDE_CH_REG_NUM_EQ_read(void){
87 	return reg_read(SIDE_CH_REG_NUM_EQ_ADDR);
88 }
89 
90 static inline void SIDE_CH_REG_NUM_EQ_write(u32 value){
91 	reg_write(SIDE_CH_REG_NUM_EQ_ADDR, value);
92 }
93 
94 static inline u32 SIDE_CH_REG_FC_TARGET_read(void){
95 	return reg_read(SIDE_CH_REG_FC_TARGET_ADDR);
96 }
97 
98 static inline void SIDE_CH_REG_FC_TARGET_write(u32 value){
99 	reg_write(SIDE_CH_REG_FC_TARGET_ADDR, value);
100 }
101 
102 static inline u32 SIDE_CH_REG_ADDR1_TARGET_read(void){
103 	return reg_read(SIDE_CH_REG_ADDR1_TARGET_ADDR);
104 }
105 
106 static inline void SIDE_CH_REG_ADDR1_TARGET_write(u32 value){
107 	reg_write(SIDE_CH_REG_ADDR1_TARGET_ADDR, value);
108 }
109 
110 static inline u32 SIDE_CH_REG_ADDR2_TARGET_read(void){
111 	return reg_read(SIDE_CH_REG_ADDR2_TARGET_ADDR);
112 }
113 
114 static inline void SIDE_CH_REG_ADDR2_TARGET_write(u32 value){
115 	reg_write(SIDE_CH_REG_ADDR2_TARGET_ADDR, value);
116 }
117 
118 static inline u32 SIDE_CH_REG_IQ_TRIGGER_read(void){
119 	return reg_read(SIDE_CH_REG_IQ_TRIGGER_ADDR);
120 }
121 
122 static inline void SIDE_CH_REG_IQ_TRIGGER_write(u32 value){
123 	reg_write(SIDE_CH_REG_IQ_TRIGGER_ADDR, value);
124 }
125 
126 static inline u32 SIDE_CH_REG_RSSI_TH_read(void){
127 	return reg_read(SIDE_CH_REG_RSSI_TH_ADDR);
128 }
129 
130 static inline void SIDE_CH_REG_RSSI_TH_write(u32 value){
131 	reg_write(SIDE_CH_REG_RSSI_TH_ADDR, value);
132 }
133 
134 static inline u32 SIDE_CH_REG_GAIN_TH_read(void){
135 	return reg_read(SIDE_CH_REG_GAIN_TH_ADDR);
136 }
137 
138 static inline void SIDE_CH_REG_GAIN_TH_write(u32 value){
139 	reg_write(SIDE_CH_REG_GAIN_TH_ADDR, value);
140 }
141 
142 static inline u32 SIDE_CH_REG_PRE_TRIGGER_LEN_read(void){
143 	return reg_read(SIDE_CH_REG_PRE_TRIGGER_LEN_ADDR);
144 }
145 
146 static inline void SIDE_CH_REG_PRE_TRIGGER_LEN_write(u32 value){
147 	reg_write(SIDE_CH_REG_PRE_TRIGGER_LEN_ADDR, value);
148 }
149 
150 static inline u32 SIDE_CH_REG_IQ_LEN_read(void){
151 	return reg_read(SIDE_CH_REG_IQ_LEN_ADDR);
152 }
153 
154 static inline void SIDE_CH_REG_IQ_LEN_write(u32 value){
155 	reg_write(SIDE_CH_REG_IQ_LEN_ADDR, value);
156 }
157 
158 static inline u32 SIDE_CH_REG_M_AXIS_DATA_COUNT_read(void){
159 	return reg_read(SIDE_CH_REG_M_AXIS_DATA_COUNT_ADDR);
160 }
161 
162 static inline void SIDE_CH_REG_M_AXIS_DATA_COUNT_write(u32 value){
163 	reg_write(SIDE_CH_REG_M_AXIS_DATA_COUNT_ADDR, value);
164 }
165 
166 static const struct of_device_id dev_of_ids[] = {
167 	{ .compatible = "sdr,side_ch", },
168 	{}
169 };
170 MODULE_DEVICE_TABLE(of, dev_of_ids);
171 
172 static void chan_to_ps_callback(void *completion)
173 {
174 	complete(completion);
175 }
176 
177 #if 0
178 static void chan_to_pl_callback(void *completion)
179 {
180 	complete(completion);
181 }
182 
183 static int dma_loopback_test(int num_test, int num_dma_symbol) {
184 	int i, err = 0;
185 
186 	// -----------dma loop back test-------------------------
187 	enum dma_status status;
188 	enum dma_ctrl_flags flags;
189 	u8 *src_buf, *dst_buf;
190 	// int num_dma_symbol = 16;
191 	int test_buf_size = num_dma_symbol*8;
192 	dma_addr_t src_buf_dma;
193 	dma_addr_t dst_buf_dma;
194 	struct dma_device *chan_to_pl_dev = chan_to_pl->device;
195 	struct dma_device *chan_to_ps_dev = chan_to_ps->device;
196 	struct scatterlist chan_to_pl_sg[1];
197 	struct scatterlist chan_to_ps_sg[1];
198 	dma_cookie_t chan_to_pl_cookie;
199 	dma_cookie_t chan_to_ps_cookie;
200 	struct completion chan_to_pl_cmp;
201 	struct completion chan_to_ps_cmp;
202 	struct dma_async_tx_descriptor *chan_to_pl_d = NULL;
203 	struct dma_async_tx_descriptor *chan_to_ps_d = NULL;
204 	unsigned long chan_to_ps_tmo =	msecs_to_jiffies(300000);
205 	unsigned long chan_to_pl_tmo =  msecs_to_jiffies(30000);
206 	int test_idx;
207 
208 	for (test_idx=0; test_idx<num_test; test_idx++) {
209 		printk("%s test_idx %d\n", side_ch_compatible_str, test_idx);
210 		//set number of dma symbols expected to pl and ps
211 		SIDE_CH_REG_NUM_DMA_SYMBOL_write((num_dma_symbol<<16)|num_dma_symbol);
212 
213 		src_buf = kmalloc(test_buf_size, GFP_KERNEL);
214 		if (!src_buf)
215 			goto err_src_buf;
216 
217 		dst_buf = kmalloc(test_buf_size, GFP_KERNEL);
218 		if (!dst_buf)
219 			goto err_dst_buf;
220 
221 		// test buf init
222 		for (i=0; i<test_buf_size; i++) {
223 			src_buf[i] = (test_idx+test_buf_size-i-1);
224 			dst_buf[i] = 0;
225 		}
226 
227 		set_user_nice(current, 10);
228 		flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
229 
230 		src_buf_dma = dma_map_single(chan_to_pl_dev->dev, src_buf, test_buf_size, DMA_MEM_TO_DEV);
231 		if (dma_mapping_error(chan_to_pl_dev->dev, src_buf_dma)) {
232 			printk("%s dma_loopback_test WARNING chan_to_pl_dev DMA mapping error\n", side_ch_compatible_str);
233 			goto err_src_buf_dma_mapping;
234 		}
235 
236 		dst_buf_dma = dma_map_single(chan_to_ps_dev->dev, dst_buf, test_buf_size, DMA_DEV_TO_MEM);
237 		if (dma_mapping_error(chan_to_ps_dev->dev, dst_buf_dma)) {
238 			printk("%s dma_loopback_test WARNING chan_to_ps_dev DMA mapping error\n", side_ch_compatible_str);
239 			goto err_dst_buf_dma_mapping;
240 		}
241 
242 		sg_init_table(chan_to_ps_sg, 1);
243 		sg_init_table(chan_to_pl_sg, 1);
244 
245 		sg_dma_address(&chan_to_ps_sg[0]) = dst_buf_dma;
246 		sg_dma_address(&chan_to_pl_sg[0]) = src_buf_dma;
247 
248 		sg_dma_len(&chan_to_ps_sg[0]) = test_buf_size;
249 		sg_dma_len(&chan_to_pl_sg[0]) = test_buf_size;
250 
251 		chan_to_ps_d = chan_to_ps_dev->device_prep_slave_sg(chan_to_ps, chan_to_ps_sg, 1, DMA_DEV_TO_MEM, flags, NULL);
252 		chan_to_pl_d = chan_to_pl_dev->device_prep_slave_sg(chan_to_pl, chan_to_pl_sg, 1, DMA_MEM_TO_DEV, flags, NULL);
253 
254 		if (!chan_to_ps_d || !chan_to_pl_d) {
255 			printk("%s dma_loopback_test WARNING !chan_to_ps_d || !chan_to_pl_d\n", side_ch_compatible_str);
256 			goto err_dst_buf_with_unmap;
257 		}
258 
259 		init_completion(&chan_to_pl_cmp);
260 		chan_to_pl_d->callback = chan_to_pl_callback;
261 		chan_to_pl_d->callback_param = &chan_to_pl_cmp;
262 		chan_to_pl_cookie = chan_to_pl_d->tx_submit(chan_to_pl_d);
263 
264 		init_completion(&chan_to_ps_cmp);
265 		chan_to_ps_d->callback = chan_to_ps_callback;
266 		chan_to_ps_d->callback_param = &chan_to_ps_cmp;
267 		chan_to_ps_cookie = chan_to_ps_d->tx_submit(chan_to_ps_d);
268 
269 		if (dma_submit_error(chan_to_pl_cookie) ||	dma_submit_error(chan_to_ps_cookie)) {
270 			printk("%s dma_loopback_test WARNING dma_submit_error\n", side_ch_compatible_str);
271 			goto err_dst_buf_with_unmap;
272 		}
273 
274 		dma_async_issue_pending(chan_to_pl);
275 		dma_async_issue_pending(chan_to_ps);
276 
277 		chan_to_pl_tmo = wait_for_completion_timeout(&chan_to_pl_cmp, chan_to_pl_tmo);
278 
279 		status = dma_async_is_tx_complete(chan_to_pl, chan_to_pl_cookie, NULL, NULL);
280 		if (chan_to_pl_tmo == 0) {
281 			printk("%s dma_loopback_test chan_to_pl_tmo == 0\n", side_ch_compatible_str);
282 			goto err_dst_buf_with_unmap;
283 		} else if (status != DMA_COMPLETE) {
284 			printk("%s dma_loopback_test chan_to_pl status != DMA_COMPLETE\n", side_ch_compatible_str);
285 			goto err_dst_buf_with_unmap;
286 		}
287 
288 		chan_to_ps_tmo = wait_for_completion_timeout(&chan_to_ps_cmp, chan_to_ps_tmo);
289 		status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
290 		if (chan_to_ps_tmo == 0) {
291 			printk("%s dma_loopback_test chan_to_ps_tmo == 0\n", side_ch_compatible_str);
292 			goto err_dst_buf_with_unmap;
293 		} else if (status != DMA_COMPLETE) {
294 			printk("%s dma_loopback_test chan_to_ps status != DMA_COMPLETE\n", side_ch_compatible_str);
295 			goto err_dst_buf_with_unmap;
296 		}
297 
298 		dma_unmap_single(chan_to_pl_dev->dev, src_buf_dma, test_buf_size, DMA_MEM_TO_DEV);
299 		dma_unmap_single(chan_to_ps_dev->dev, dst_buf_dma, test_buf_size, DMA_DEV_TO_MEM);
300 
301 		// test buf verification
302 		for (i=0; i<test_buf_size; i++) {
303 			//printk("%d ", dst_buf[i]);
304 			if ( dst_buf[i] != ((test_idx+test_buf_size-i-1)%256) )
305 				break;
306 		}
307 		printk("\n");
308 		printk("%s dma_loopback_test buf verification end idx %d (test_buf_size %d)\n", side_ch_compatible_str, i, test_buf_size);
309 
310 		kfree(src_buf);
311 		kfree(dst_buf);
312 	}
313 
314 	printk("%s dma_loopback_test err %d\n", side_ch_compatible_str, err);
315 	return(err);
316 
317 err_dst_buf_with_unmap:
318 	dma_unmap_single(chan_to_ps_dev->dev, dst_buf_dma, test_buf_size, DMA_DEV_TO_MEM);
319 
320 err_dst_buf_dma_mapping:
321 	dma_unmap_single(chan_to_pl_dev->dev, src_buf_dma, test_buf_size, DMA_MEM_TO_DEV);
322 
323 err_src_buf_dma_mapping:
324 
325 err_dst_buf:
326 	err = -4;
327 	kfree((void*)dst_buf);
328 
329 err_src_buf:
330 	err = -3;
331 	kfree(src_buf);
332 
333 	return(err);
334 }
335 #endif
336 
337 static int init_side_channel(void) {
338 	side_info_buf = kmalloc(max_side_info_buf_size, GFP_KERNEL);
339 	if (!side_info_buf)
340 		return(-1);
341 
342 	return(0);
343 }
344 
345 static int get_side_info(int num_eq, int iq_len) {
346 	// int err = 0;//, i;
347 	struct scatterlist chan_to_ps_sg[1];
348 	enum dma_status status;
349 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
350 	int num_dma_symbol, num_dma_symbol_per_trans, side_info_buf_size;
351 	dma_addr_t side_info_buf_dma;
352 	struct dma_device *chan_to_ps_dev = chan_to_ps->device;
353 	struct completion chan_to_ps_cmp;
354 	struct dma_async_tx_descriptor *chan_to_ps_d = NULL;
355 	unsigned long chan_to_ps_tmo =	msecs_to_jiffies(100);
356 
357 	if (side_info_buf==NULL) {
358 		printk("%s get_side_info WARNING side_info_buf==NULL\n", side_ch_compatible_str);
359 		return(-1);
360 	}
361 
362 	status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
363 	if (status!=DMA_COMPLETE) {
364 		printk("%s get_side_info WARNING status!=DMA_COMPLETE\n", side_ch_compatible_str);
365 		return(-1);
366 	}
367 
368 	set_user_nice(current, 10);
369 
370 	if (iq_len>0)
371 		num_dma_symbol_per_trans = 1+iq_len;
372 	else
373 		num_dma_symbol_per_trans = HEADER_LEN + CSI_LEN + num_eq*EQUALIZER_LEN;
374 	//set number of dma symbols expected to ps
375 	num_dma_symbol = SIDE_CH_REG_M_AXIS_DATA_COUNT_read();
376 	printk("%s get_side_info m axis data count %d per trans %d\n", side_ch_compatible_str, num_dma_symbol, num_dma_symbol_per_trans);
377 	num_dma_symbol = num_dma_symbol_per_trans*(num_dma_symbol/num_dma_symbol_per_trans);
378 	printk("%s get_side_info actual num dma symbol %d\n", side_ch_compatible_str, num_dma_symbol);
379 	if (num_dma_symbol == 0)
380 		return(-2);
381 
382 	side_info_buf_size = num_dma_symbol*8;
383 	side_info_buf_dma = dma_map_single(chan_to_ps_dev->dev, side_info_buf, side_info_buf_size, DMA_DEV_TO_MEM);
384 	if (dma_mapping_error(chan_to_ps_dev->dev, side_info_buf_dma)) {
385 		printk("%s get_side_info WARNING chan_to_ps_dev DMA mapping error\n", side_ch_compatible_str);
386 		return(-3);
387 	}
388 
389 	sg_init_table(chan_to_ps_sg, 1);
390 	sg_dma_address(&chan_to_ps_sg[0]) = side_info_buf_dma;
391 	sg_dma_len(&chan_to_ps_sg[0]) = side_info_buf_size;
392 
393 	chan_to_ps_d = chan_to_ps_dev->device_prep_slave_sg(chan_to_ps, chan_to_ps_sg, 1, DMA_DEV_TO_MEM, flags, NULL);
394 	if (!chan_to_ps_d) {
395 		printk("%s get_side_info WARNING !chan_to_ps_d\n", side_ch_compatible_str);
396 		goto err_dst_buf_with_unmap;
397 	}
398 
399 	init_completion(&chan_to_ps_cmp);
400 	chan_to_ps_d->callback = chan_to_ps_callback;
401 	chan_to_ps_d->callback_param = &chan_to_ps_cmp;
402 
403 	chan_to_ps_cookie = chan_to_ps_d->tx_submit(chan_to_ps_d);
404 	if (dma_submit_error(chan_to_ps_cookie)) {
405 		printk("%s get_side_info WARNING dma_submit_error\n", side_ch_compatible_str);
406 		goto err_dst_buf_with_unmap;
407 	}
408 
409 	SIDE_CH_REG_NUM_DMA_SYMBOL_write(num_dma_symbol); //dma from fpga will start automatically
410 
411 	dma_async_issue_pending(chan_to_ps);
412 
413 	chan_to_ps_tmo = wait_for_completion_timeout(&chan_to_ps_cmp, chan_to_ps_tmo);
414 	status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
415 	if (chan_to_ps_tmo == 0) {
416 		printk("%s get_side_info WARNING chan_to_ps_tmo == 0\n", side_ch_compatible_str);
417 		goto err_dst_buf_with_unmap;
418 	} else if (status != DMA_COMPLETE) {
419 		printk("%s get_side_info WARNING chan_to_ps status != DMA_COMPLETE\n", side_ch_compatible_str);
420 		goto err_dst_buf_with_unmap;
421 	}
422 
423 	dma_unmap_single(chan_to_ps_dev->dev, side_info_buf_dma, side_info_buf_size, DMA_DEV_TO_MEM);
424 	return(side_info_buf_size);
425 
426 err_dst_buf_with_unmap:
427 	dma_unmap_single(chan_to_ps_dev->dev, side_info_buf_dma, side_info_buf_size, DMA_DEV_TO_MEM);
428 	return(-100);
429 }
430 
431 // -----------------netlink recv and send-----------------
432 // should align with side_ch_ctl.c in user_space
433 #define ACTION_INVALID       0
434 #define ACTION_REG_WRITE     1
435 #define ACTION_REG_READ      2
436 #define ACTION_SIDE_INFO_GET 3
437 
438 #define REG_TYPE_INVALID     0
439 #define REG_TYPE_HARDWARE    1
440 #define REG_TYPE_SOFTWARE    2
441 
442 // #define NETLINK_USER 31
443 struct sock *nl_sk = NULL;
444 static void side_ch_nl_recv_msg(struct sk_buff *skb) {
445 	struct nlmsghdr *nlh;
446 	int pid;
447 	struct sk_buff *skb_out;
448 	int msg_size;
449 	int *msg=(int*)side_info_buf;
450 	int action_flag, reg_type, reg_idx;
451 	u32 reg_val, *cmd_buf;
452 	int res;
453 
454 	// printk(KERN_INFO "Entering: %s\n", __FUNCTION__);
455 
456 	// msg_size=strlen(msg);
457 
458 	nlh=(struct nlmsghdr*)skb->data;
459 	cmd_buf = (u32*)nlmsg_data(nlh);
460 	// printk(KERN_INFO "Netlink received msg payload:%s\n",(char*)nlmsg_data(nlh));
461 	action_flag = cmd_buf[0];
462     reg_type = cmd_buf[1];
463     reg_idx = cmd_buf[2];
464     reg_val = cmd_buf[3];
465 	printk("%s recv msg: len %d action_flag %d reg_type %d reg_idx %d reg_val %u\n", side_ch_compatible_str, nlmsg_len(nlh), action_flag, reg_type, reg_idx, reg_val);
466 
467 	pid = nlh->nlmsg_pid; /*pid of sending process */
468 
469 	if (action_flag==ACTION_SIDE_INFO_GET) {
470 		res = get_side_info(num_eq_init, iq_len_init);
471 		printk(KERN_INFO "%s recv msg: get_side_info(%d,%d) res %d\n", side_ch_compatible_str, num_eq_init, iq_len_init, res);
472 		if (res>0) {
473 			msg_size = res;
474 			// printk("%s recv msg: %d %d %d %d %d %d %d %d\n", side_ch_compatible_str, msg[0], msg[1], msg[2], msg[3], msg[4], msg[5], msg[6], msg[7]);
475 		} else {
476 			msg_size = 4;
477 			msg[0] = -2;
478 		}
479 	} else if (action_flag==ACTION_REG_READ) {
480 		msg_size = 4;
481 		// if (reg_idx<0 || reg_idx>31) {
482 		// 	msg[0] = -3;
483 		// 	printk("%s recv msg: invalid reg_idx\n", side_ch_compatible_str);
484 		// } else {
485 			msg[0] = reg_read(reg_idx*4);
486 		// }
487 	} else if (action_flag==ACTION_REG_WRITE) {
488 		msg_size = 4;
489 		// if (reg_idx<0 || reg_idx>31) {
490 		// 	msg[0] = -4;
491 		// 	printk("%s recv msg: invalid reg_idx\n", side_ch_compatible_str);
492 		// } else {
493 			msg[0] = 0;
494 			reg_write(reg_idx*4, reg_val);
495 		// }
496 	} else {
497 		msg_size = 4;
498 		msg[0] = -1;
499 		printk("%s recv msg: invalid action_flag\n", side_ch_compatible_str);
500 	}
501 
502 	skb_out = nlmsg_new(msg_size,0);
503 	if(!skb_out)
504 	{
505 		printk(KERN_ERR "Failed to allocate new skb\n");
506 		return;
507 	}
508 	nlh=nlmsg_put(skb_out,0,0,NLMSG_DONE,msg_size,0);
509 	NETLINK_CB(skb_out).dst_group = 0; /* not in mcast group */
510 
511 	memcpy(nlmsg_data(nlh),msg,msg_size);
512 
513 	res=nlmsg_unicast(nl_sk,skb_out,pid);
514 
515 	if(res<0)
516 		printk(KERN_INFO "Error while sending bak to user\n");
517 }
518 
519 static int dev_probe(struct platform_device *pdev) {
520 	struct netlink_kernel_cfg cfg = {
521 		.input = side_ch_nl_recv_msg,
522 	};
523 
524 	struct device_node *np = pdev->dev.of_node;
525 	struct resource *io;
526 	int err=1, i;
527 
528 	printk("\n");
529 
530 	if (np) {
531 		const struct of_device_id *match;
532 
533 		match = of_match_node(dev_of_ids, np);
534 		if (match) {
535 			printk("%s dev_probe: match!\n", side_ch_compatible_str);
536 			err = 0;
537 		}
538 	}
539 
540 	if (err)
541 		return err;
542 
543 	/* Request and map I/O memory */
544 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
545 	base_addr = devm_ioremap_resource(&pdev->dev, io);
546 	if (IS_ERR(base_addr))
547 		return PTR_ERR(base_addr);
548 
549 	printk("%s dev_probe: io start 0x%p end 0x%p name %s flags 0x%08x desc %s\n", side_ch_compatible_str, (void*)io->start, (void*)io->end, io->name, (u32)io->flags, (char*)io->desc);
550 	printk("%s dev_probe: base_addr 0x%p\n", side_ch_compatible_str, base_addr);
551 
552 	printk("%s dev_probe: succeed!\n", side_ch_compatible_str);
553 
554 	// --------------initialize netlink--------------
555 	//nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &cfg);
556 	nl_sk = netlink_kernel_create(&init_net, NETLINK_USERSOCK, &cfg);
557 	if(!nl_sk) {
558 		printk(KERN_ALERT "%s dev_probe: Error creating socket.\n", side_ch_compatible_str);
559 		return -10;
560 	}
561 
562 	//-----------------initialize fpga----------------
563 	printk("%s dev_probe: num_eq_init %d iq_len_init %d\n",side_ch_compatible_str, num_eq_init, iq_len_init);
564 
565 	// disable potential any action from side channel
566 	SIDE_CH_REG_MULTI_RST_write(4);
567 	// SIDE_CH_REG_CONFIG_write(0X6001); // match addr1 and addr2; bit12 FC; bit13 addr1; bit14 addr2
568 	SIDE_CH_REG_CONFIG_write(0x7001); // the most strict condition to prevent side channel action
569 	SIDE_CH_REG_IQ_TRIGGER_write(10); // set iq trigger to rssi, which will never happen when rssi_th is 0
570 	SIDE_CH_REG_NUM_EQ_write(num_eq_init);      // capture CSI + 8*equalizer by default
571 	if (iq_len_init>0) {//initialize the side channel into iq capture mode
572 		//Max UDP 65507 bytes; (65507/8)-1 = 8187
573 		if (iq_len_init>8187) {
574 			iq_len_init = 8187;
575 			printk("%s dev_probe: limit iq_len_init to 8187!\n",side_ch_compatible_str);
576 		}
577 		SIDE_CH_REG_IQ_CAPTURE_write(1);
578 		SIDE_CH_REG_PRE_TRIGGER_LEN_write(8190);
579 		SIDE_CH_REG_IQ_LEN_write(iq_len_init);
580 		SIDE_CH_REG_IQ_TRIGGER_write(0); // trigger is set to fcs ok/nok (both)
581 	}
582 
583 	SIDE_CH_REG_CONFIG_write(0x0001); // allow all packets by default; bit12 FC; bit13 addr1; bit14 addr2
584 
585 	//rst
586 	for (i=0;i<8;i++)
587 		SIDE_CH_REG_MULTI_RST_write(0);
588 	for (i=0;i<32;i++)
589 		SIDE_CH_REG_MULTI_RST_write(0xFFFFFFFF);
590 	for (i=0;i<8;i++)
591 		SIDE_CH_REG_MULTI_RST_write(0);
592 
593 	// chan_to_pl = dma_request_slave_channel(&(pdev->dev), "rx_dma_mm2s");
594 	// if (IS_ERR(chan_to_pl)) {
595 	// 	err = PTR_ERR(chan_to_pl);
596 	// 	pr_err("%s dev_probe: No channel to PL. %d\n",side_ch_compatible_str,err);
597 	// 	goto free_chan_to_pl;
598 	// }
599 
600 	chan_to_ps = dma_request_slave_channel(&(pdev->dev), "tx_dma_s2mm");
601 	if (IS_ERR(chan_to_ps)) {
602 		err = PTR_ERR(chan_to_ps);
603 		pr_err("%s dev_probe: No channel to PS. %d\n",side_ch_compatible_str,err);
604 		goto free_chan_to_ps;
605 	}
606 
607 	printk("%s dev_probe: DMA channel setup successfully. chan_to_pl 0x%p chan_to_ps 0x%p\n",side_ch_compatible_str, chan_to_pl, chan_to_ps);
608 
609 	// res = dma_loopback_test(3, 512);
610 	// printk(KERN_INFO "dma_loopback_test(3, 512) res %d\n", res);
611 
612 	err = init_side_channel();
613 	printk("%s dev_probe: init_side_channel() err %d\n",side_ch_compatible_str, err);
614 
615 	return(err);
616 
617 	// err = dma_loopback_test(7, 512);
618 	// if (err == 0)
619 	// 	return(err);
620 	// else
621 	// 	dma_release_channel(chan_to_ps);
622 
623 free_chan_to_ps:
624 	err = -2;
625 	dma_release_channel(chan_to_ps);
626 	return err;
627 
628 // free_chan_to_pl:
629 // 	err = -1;
630 // 	dma_release_channel(chan_to_pl);
631 // 	return err;
632 }
633 
634 static int dev_remove(struct platform_device *pdev)
635 {
636 	printk("\n");
637 
638 	printk("%s dev_remove: release nl_sk\n", side_ch_compatible_str);
639 	netlink_kernel_release(nl_sk);
640 
641 	pr_info("%s dev_remove: dropped chan_to_pl 0x%p\n", side_ch_compatible_str, chan_to_pl);
642 	if (chan_to_pl != NULL) {
643 		pr_info("%s dev_remove: dropped channel %s\n", side_ch_compatible_str, dma_chan_name(chan_to_pl));
644 		// dmaengine_terminate_all(chan_to_pl); //this also terminate sdr.ko. do not use
645 		dma_release_channel(chan_to_pl);
646 	}
647 
648 	pr_info("%s dev_remove: dropped chan_to_ps 0x%p\n", side_ch_compatible_str, chan_to_ps);
649 	if (chan_to_pl != NULL) {
650 		pr_info("%s dev_remove: dropped channel %s\n", side_ch_compatible_str, dma_chan_name(chan_to_ps));
651 		// dmaengine_terminate_all(chan_to_ps); //this also terminate sdr.ko. do not use
652 		dma_release_channel(chan_to_ps);
653 	}
654 
655 	if (side_info_buf != NULL)
656 		kfree(side_info_buf);
657 
658 	printk("%s dev_remove: base_addr 0x%p\n", side_ch_compatible_str, base_addr);
659 	printk("%s dev_remove: succeed!\n", side_ch_compatible_str);
660 	return 0;
661 }
662 
663 static struct platform_driver dev_driver = {
664 	.driver = {
665 		.name = "sdr,side_ch",
666 		.owner = THIS_MODULE,
667 		.of_match_table = dev_of_ids,
668 	},
669 	.probe = dev_probe,
670 	.remove = dev_remove,
671 };
672 
673 module_platform_driver(dev_driver);
674 
675 MODULE_AUTHOR("Xianjun Jiao");
676 MODULE_DESCRIPTION("sdr,side_ch");
677 MODULE_LICENSE("GPL v2");
678