xref: /openwifi/driver/side_ch/side_ch.c (revision 22dd0cc4861dbe973efee122229ab82ac3dd2c9a)
1 /*
2  * openwifi side channel driver
3  * Xianjun jiao. [email protected]; [email protected]
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/dmapool.h>
8 #include <linux/dma/xilinx_dma.h>
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/module.h>
14 #include <linux/of_address.h>
15 #include <linux/of_dma.h>
16 #include <linux/of_platform.h>
17 #include <linux/of_irq.h>
18 #include <linux/slab.h>
19 #include <linux/clk.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 
24 #include <net/sock.h>
25 #include <linux/netlink.h>
26 #include <linux/skbuff.h>
27 
28 #include "side_ch.h"
29 
30 static int num_eq_init = 8; // should be 0~8
31 
32 module_param(num_eq_init, int, 0);
33 MODULE_PARM_DESC(num_eq_init, "num_eq_init. 0~8. number of equalizer output (52 each) appended to CSI");
34 
35 static void __iomem *base_addr; // to store driver specific base address needed for mmu to translate virtual address to physical address in our FPGA design
36 
37 struct dma_chan *chan_to_pl = NULL;
38 struct dma_chan *chan_to_ps = NULL;
39 u8 *side_info_buf = NULL;
40 dma_cookie_t chan_to_ps_cookie;
41 const int max_side_info_buf_size = MAX_NUM_DMA_SYMBOL*8;
42 
43 /* IO accessors */
44 static inline u32 reg_read(u32 reg)
45 {
46 	return ioread32(base_addr + reg);
47 }
48 
49 static inline void reg_write(u32 reg, u32 value)
50 {
51 	iowrite32(value, base_addr + reg);
52 }
53 
54 static inline void SIDE_CH_REG_MULTI_RST_write(u32 Data) {
55 	reg_write(SIDE_CH_REG_MULTI_RST_ADDR, Data);
56 }
57 
58 static inline u32 SIDE_CH_REG_CONFIG_read(void){
59 	return reg_read(SIDE_CH_REG_CONFIG_ADDR);
60 }
61 
62 static inline void SIDE_CH_REG_CONFIG_write(u32 value){
63 	reg_write(SIDE_CH_REG_CONFIG_ADDR, value);
64 }
65 
66 static inline u32 SIDE_CH_REG_NUM_DMA_SYMBOL_read(void){
67 	return reg_read(SIDE_CH_REG_NUM_DMA_SYMBOL_ADDR);
68 }
69 
70 static inline void SIDE_CH_REG_NUM_DMA_SYMBOL_write(u32 value){
71 	reg_write(SIDE_CH_REG_NUM_DMA_SYMBOL_ADDR, value);
72 }
73 
74 static inline u32 SIDE_CH_REG_START_DMA_TO_PS_read(void){
75 	return reg_read(SIDE_CH_REG_START_DMA_TO_PS_ADDR);
76 }
77 
78 static inline void SIDE_CH_REG_START_DMA_TO_PS_write(u32 value){
79 	reg_write(SIDE_CH_REG_START_DMA_TO_PS_ADDR, value);
80 }
81 
82 static inline u32 SIDE_CH_REG_NUM_EQ_read(void){
83 	return reg_read(SIDE_CH_REG_NUM_EQ_ADDR);
84 }
85 
86 static inline void SIDE_CH_REG_NUM_EQ_write(u32 value){
87 	reg_write(SIDE_CH_REG_NUM_EQ_ADDR, value);
88 }
89 
90 static inline u32 SIDE_CH_REG_FC_TARGET_read(void){
91 	return reg_read(SIDE_CH_REG_FC_TARGET_ADDR);
92 }
93 
94 static inline void SIDE_CH_REG_FC_TARGET_write(u32 value){
95 	reg_write(SIDE_CH_REG_FC_TARGET_ADDR, value);
96 }
97 
98 static inline u32 SIDE_CH_REG_ADDR1_TARGET_read(void){
99 	return reg_read(SIDE_CH_REG_ADDR1_TARGET_ADDR);
100 }
101 
102 static inline void SIDE_CH_REG_ADDR1_TARGET_write(u32 value){
103 	reg_write(SIDE_CH_REG_ADDR1_TARGET_ADDR, value);
104 }
105 
106 static inline u32 SIDE_CH_REG_ADDR2_TARGET_read(void){
107 	return reg_read(SIDE_CH_REG_ADDR2_TARGET_ADDR);
108 }
109 
110 static inline void SIDE_CH_REG_ADDR2_TARGET_write(u32 value){
111 	reg_write(SIDE_CH_REG_ADDR2_TARGET_ADDR, value);
112 }
113 
114 static inline u32 SIDE_CH_REG_M_AXIS_DATA_COUNT_read(void){
115 	return reg_read(SIDE_CH_REG_M_AXIS_DATA_COUNT_ADDR);
116 }
117 
118 static inline void SIDE_CH_REG_M_AXIS_DATA_COUNT_write(u32 value){
119 	reg_write(SIDE_CH_REG_M_AXIS_DATA_COUNT_ADDR, value);
120 }
121 
122 static const struct of_device_id dev_of_ids[] = {
123 	{ .compatible = "sdr,side_ch", },
124 	{}
125 };
126 MODULE_DEVICE_TABLE(of, dev_of_ids);
127 
128 static void chan_to_ps_callback(void *completion)
129 {
130 	complete(completion);
131 }
132 
133 #if 0
134 static void chan_to_pl_callback(void *completion)
135 {
136 	complete(completion);
137 }
138 
139 static int dma_loopback_test(int num_test, int num_dma_symbol) {
140 	int i, err = 0;
141 
142 	// -----------dma loop back test-------------------------
143 	enum dma_status status;
144 	enum dma_ctrl_flags flags;
145 	u8 *src_buf, *dst_buf;
146 	// int num_dma_symbol = 16;
147 	int test_buf_size = num_dma_symbol*8;
148 	dma_addr_t src_buf_dma;
149 	dma_addr_t dst_buf_dma;
150 	struct dma_device *chan_to_pl_dev = chan_to_pl->device;
151 	struct dma_device *chan_to_ps_dev = chan_to_ps->device;
152 	struct scatterlist chan_to_pl_sg[1];
153 	struct scatterlist chan_to_ps_sg[1];
154 	dma_cookie_t chan_to_pl_cookie;
155 	dma_cookie_t chan_to_ps_cookie;
156 	struct completion chan_to_pl_cmp;
157 	struct completion chan_to_ps_cmp;
158 	struct dma_async_tx_descriptor *chan_to_pl_d = NULL;
159 	struct dma_async_tx_descriptor *chan_to_ps_d = NULL;
160 	unsigned long chan_to_ps_tmo =	msecs_to_jiffies(300000);
161 	unsigned long chan_to_pl_tmo =  msecs_to_jiffies(30000);
162 	int test_idx;
163 
164 	for (test_idx=0; test_idx<num_test; test_idx++) {
165 		printk("%s test_idx %d\n", side_ch_compatible_str, test_idx);
166 		//set number of dma symbols expected to pl and ps
167 		SIDE_CH_REG_NUM_DMA_SYMBOL_write((num_dma_symbol<<16)|num_dma_symbol);
168 
169 		src_buf = kmalloc(test_buf_size, GFP_KERNEL);
170 		if (!src_buf)
171 			goto err_src_buf;
172 
173 		dst_buf = kmalloc(test_buf_size, GFP_KERNEL);
174 		if (!dst_buf)
175 			goto err_dst_buf;
176 
177 		// test buf init
178 		for (i=0; i<test_buf_size; i++) {
179 			src_buf[i] = (test_idx+test_buf_size-i-1);
180 			dst_buf[i] = 0;
181 		}
182 
183 		set_user_nice(current, 10);
184 		flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
185 
186 		src_buf_dma = dma_map_single(chan_to_pl_dev->dev, src_buf, test_buf_size, DMA_MEM_TO_DEV);
187 		if (dma_mapping_error(chan_to_pl_dev->dev, src_buf_dma)) {
188 			printk("%s dma_loopback_test WARNING chan_to_pl_dev DMA mapping error\n", side_ch_compatible_str);
189 			goto err_src_buf_dma_mapping;
190 		}
191 
192 		dst_buf_dma = dma_map_single(chan_to_ps_dev->dev, dst_buf, test_buf_size, DMA_DEV_TO_MEM);
193 		if (dma_mapping_error(chan_to_ps_dev->dev, dst_buf_dma)) {
194 			printk("%s dma_loopback_test WARNING chan_to_ps_dev DMA mapping error\n", side_ch_compatible_str);
195 			goto err_dst_buf_dma_mapping;
196 		}
197 
198 		sg_init_table(chan_to_ps_sg, 1);
199 		sg_init_table(chan_to_pl_sg, 1);
200 
201 		sg_dma_address(&chan_to_ps_sg[0]) = dst_buf_dma;
202 		sg_dma_address(&chan_to_pl_sg[0]) = src_buf_dma;
203 
204 		sg_dma_len(&chan_to_ps_sg[0]) = test_buf_size;
205 		sg_dma_len(&chan_to_pl_sg[0]) = test_buf_size;
206 
207 		chan_to_ps_d = chan_to_ps_dev->device_prep_slave_sg(chan_to_ps, chan_to_ps_sg, 1, DMA_DEV_TO_MEM, flags, NULL);
208 		chan_to_pl_d = chan_to_pl_dev->device_prep_slave_sg(chan_to_pl, chan_to_pl_sg, 1, DMA_MEM_TO_DEV, flags, NULL);
209 
210 		if (!chan_to_ps_d || !chan_to_pl_d) {
211 			printk("%s dma_loopback_test WARNING !chan_to_ps_d || !chan_to_pl_d\n", side_ch_compatible_str);
212 			goto err_dst_buf_with_unmap;
213 		}
214 
215 		init_completion(&chan_to_pl_cmp);
216 		chan_to_pl_d->callback = chan_to_pl_callback;
217 		chan_to_pl_d->callback_param = &chan_to_pl_cmp;
218 		chan_to_pl_cookie = chan_to_pl_d->tx_submit(chan_to_pl_d);
219 
220 		init_completion(&chan_to_ps_cmp);
221 		chan_to_ps_d->callback = chan_to_ps_callback;
222 		chan_to_ps_d->callback_param = &chan_to_ps_cmp;
223 		chan_to_ps_cookie = chan_to_ps_d->tx_submit(chan_to_ps_d);
224 
225 		if (dma_submit_error(chan_to_pl_cookie) ||	dma_submit_error(chan_to_ps_cookie)) {
226 			printk("%s dma_loopback_test WARNING dma_submit_error\n", side_ch_compatible_str);
227 			goto err_dst_buf_with_unmap;
228 		}
229 
230 		dma_async_issue_pending(chan_to_pl);
231 		dma_async_issue_pending(chan_to_ps);
232 
233 		chan_to_pl_tmo = wait_for_completion_timeout(&chan_to_pl_cmp, chan_to_pl_tmo);
234 
235 		status = dma_async_is_tx_complete(chan_to_pl, chan_to_pl_cookie, NULL, NULL);
236 		if (chan_to_pl_tmo == 0) {
237 			printk("%s dma_loopback_test chan_to_pl_tmo == 0\n", side_ch_compatible_str);
238 			goto err_dst_buf_with_unmap;
239 		} else if (status != DMA_COMPLETE) {
240 			printk("%s dma_loopback_test chan_to_pl status != DMA_COMPLETE\n", side_ch_compatible_str);
241 			goto err_dst_buf_with_unmap;
242 		}
243 
244 		chan_to_ps_tmo = wait_for_completion_timeout(&chan_to_ps_cmp, chan_to_ps_tmo);
245 		status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
246 		if (chan_to_ps_tmo == 0) {
247 			printk("%s dma_loopback_test chan_to_ps_tmo == 0\n", side_ch_compatible_str);
248 			goto err_dst_buf_with_unmap;
249 		} else if (status != DMA_COMPLETE) {
250 			printk("%s dma_loopback_test chan_to_ps status != DMA_COMPLETE\n", side_ch_compatible_str);
251 			goto err_dst_buf_with_unmap;
252 		}
253 
254 		dma_unmap_single(chan_to_pl_dev->dev, src_buf_dma, test_buf_size, DMA_MEM_TO_DEV);
255 		dma_unmap_single(chan_to_ps_dev->dev, dst_buf_dma, test_buf_size, DMA_DEV_TO_MEM);
256 
257 		// test buf verification
258 		for (i=0; i<test_buf_size; i++) {
259 			//printk("%d ", dst_buf[i]);
260 			if ( dst_buf[i] != ((test_idx+test_buf_size-i-1)%256) )
261 				break;
262 		}
263 		printk("\n");
264 		printk("%s dma_loopback_test buf verification end idx %d (test_buf_size %d)\n", side_ch_compatible_str, i, test_buf_size);
265 
266 		kfree(src_buf);
267 		kfree(dst_buf);
268 	}
269 
270 	printk("%s dma_loopback_test err %d\n", side_ch_compatible_str, err);
271 	return(err);
272 
273 err_dst_buf_with_unmap:
274 	dma_unmap_single(chan_to_ps_dev->dev, dst_buf_dma, test_buf_size, DMA_DEV_TO_MEM);
275 
276 err_dst_buf_dma_mapping:
277 	dma_unmap_single(chan_to_pl_dev->dev, src_buf_dma, test_buf_size, DMA_MEM_TO_DEV);
278 
279 err_src_buf_dma_mapping:
280 
281 err_dst_buf:
282 	err = -4;
283 	kfree((void*)dst_buf);
284 
285 err_src_buf:
286 	err = -3;
287 	kfree(src_buf);
288 
289 	return(err);
290 }
291 #endif
292 
293 static int init_side_channel(void) {
294 	side_info_buf = kmalloc(max_side_info_buf_size, GFP_KERNEL);
295 	if (!side_info_buf)
296 		return(-1);
297 
298 	return(0);
299 }
300 
301 static int get_side_info(int num_eq) {
302 	// int err = 0;//, i;
303 	struct scatterlist chan_to_ps_sg[1];
304 	enum dma_status status;
305 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
306 	int num_dma_symbol, num_dma_symbol_per_trans, side_info_buf_size;
307 	dma_addr_t side_info_buf_dma;
308 	struct dma_device *chan_to_ps_dev = chan_to_ps->device;
309 	struct completion chan_to_ps_cmp;
310 	struct dma_async_tx_descriptor *chan_to_ps_d = NULL;
311 	unsigned long chan_to_ps_tmo =	msecs_to_jiffies(100);
312 
313 	if (side_info_buf==NULL) {
314 		printk("%s get_side_info WARNING side_info_buf==NULL\n", side_ch_compatible_str);
315 		return(-1);
316 	}
317 
318 	status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
319 	if (status!=DMA_COMPLETE) {
320 		printk("%s get_side_info WARNING status!=DMA_COMPLETE\n", side_ch_compatible_str);
321 		return(-1);
322 	}
323 
324 	set_user_nice(current, 10);
325 
326 	num_dma_symbol_per_trans = HEADER_LEN + CSI_LEN + num_eq*EQUALIZER_LEN;
327 	//set number of dma symbols expected to ps
328 	num_dma_symbol = SIDE_CH_REG_M_AXIS_DATA_COUNT_read();
329 	printk("%s get_side_info m axis data count %d per trans %d\n", side_ch_compatible_str, num_dma_symbol, num_dma_symbol_per_trans);
330 	num_dma_symbol = num_dma_symbol_per_trans*(num_dma_symbol/num_dma_symbol_per_trans);
331 	printk("%s get_side_info actual num dma symbol %d\n", side_ch_compatible_str, num_dma_symbol);
332 	if (num_dma_symbol == 0)
333 		return(-2);
334 
335 	side_info_buf_size = num_dma_symbol*8;
336 	side_info_buf_dma = dma_map_single(chan_to_ps_dev->dev, side_info_buf, side_info_buf_size, DMA_DEV_TO_MEM);
337 	if (dma_mapping_error(chan_to_ps_dev->dev, side_info_buf_dma)) {
338 		printk("%s get_side_info WARNING chan_to_ps_dev DMA mapping error\n", side_ch_compatible_str);
339 		return(-3);
340 	}
341 
342 	sg_init_table(chan_to_ps_sg, 1);
343 	sg_dma_address(&chan_to_ps_sg[0]) = side_info_buf_dma;
344 	sg_dma_len(&chan_to_ps_sg[0]) = side_info_buf_size;
345 
346 	chan_to_ps_d = chan_to_ps_dev->device_prep_slave_sg(chan_to_ps, chan_to_ps_sg, 1, DMA_DEV_TO_MEM, flags, NULL);
347 	if (!chan_to_ps_d) {
348 		printk("%s get_side_info WARNING !chan_to_ps_d\n", side_ch_compatible_str);
349 		goto err_dst_buf_with_unmap;
350 	}
351 
352 	init_completion(&chan_to_ps_cmp);
353 	chan_to_ps_d->callback = chan_to_ps_callback;
354 	chan_to_ps_d->callback_param = &chan_to_ps_cmp;
355 
356 	chan_to_ps_cookie = chan_to_ps_d->tx_submit(chan_to_ps_d);
357 	if (dma_submit_error(chan_to_ps_cookie)) {
358 		printk("%s get_side_info WARNING dma_submit_error\n", side_ch_compatible_str);
359 		goto err_dst_buf_with_unmap;
360 	}
361 
362 	SIDE_CH_REG_NUM_DMA_SYMBOL_write(num_dma_symbol); //dma from fpga will start automatically
363 
364 	dma_async_issue_pending(chan_to_ps);
365 
366 	chan_to_ps_tmo = wait_for_completion_timeout(&chan_to_ps_cmp, chan_to_ps_tmo);
367 	status = dma_async_is_tx_complete(chan_to_ps, chan_to_ps_cookie, NULL, NULL);
368 	if (chan_to_ps_tmo == 0) {
369 		printk("%s get_side_info WARNING chan_to_ps_tmo == 0\n", side_ch_compatible_str);
370 		goto err_dst_buf_with_unmap;
371 	} else if (status != DMA_COMPLETE) {
372 		printk("%s get_side_info WARNING chan_to_ps status != DMA_COMPLETE\n", side_ch_compatible_str);
373 		goto err_dst_buf_with_unmap;
374 	}
375 
376 	dma_unmap_single(chan_to_ps_dev->dev, side_info_buf_dma, side_info_buf_size, DMA_DEV_TO_MEM);
377 	return(side_info_buf_size);
378 
379 err_dst_buf_with_unmap:
380 	dma_unmap_single(chan_to_ps_dev->dev, side_info_buf_dma, side_info_buf_size, DMA_DEV_TO_MEM);
381 	return(-100);
382 }
383 
384 // -----------------netlink recv and send-----------------
385 // should align with side_ch_ctl.c in user_space
386 #define ACTION_INVALID       0
387 #define ACTION_REG_WRITE     1
388 #define ACTION_REG_READ      2
389 #define ACTION_SIDE_INFO_GET 3
390 
391 #define REG_TYPE_INVALID     0
392 #define REG_TYPE_HARDWARE    1
393 #define REG_TYPE_SOFTWARE    2
394 
395 // #define NETLINK_USER 31
396 struct sock *nl_sk = NULL;
397 static void side_ch_nl_recv_msg(struct sk_buff *skb) {
398 	struct nlmsghdr *nlh;
399 	int pid;
400 	struct sk_buff *skb_out;
401 	int msg_size;
402 	int *msg=(int*)side_info_buf;
403 	int action_flag, reg_type, reg_idx;
404 	u32 reg_val, *cmd_buf;
405 	int res;
406 
407 	// printk(KERN_INFO "Entering: %s\n", __FUNCTION__);
408 
409 	// msg_size=strlen(msg);
410 
411 	nlh=(struct nlmsghdr*)skb->data;
412 	cmd_buf = (u32*)nlmsg_data(nlh);
413 	// printk(KERN_INFO "Netlink received msg payload:%s\n",(char*)nlmsg_data(nlh));
414 	action_flag = cmd_buf[0];
415     reg_type = cmd_buf[1];
416     reg_idx = cmd_buf[2];
417     reg_val = cmd_buf[3];
418 	printk("%s recv msg: len %d action_flag %d reg_type %d reg_idx %d reg_val %u\n", side_ch_compatible_str, nlmsg_len(nlh), action_flag, reg_type, reg_idx, reg_val);
419 
420 	pid = nlh->nlmsg_pid; /*pid of sending process */
421 
422 	if (action_flag==ACTION_SIDE_INFO_GET) {
423 		res = get_side_info(num_eq_init);
424 		printk(KERN_INFO "%s recv msg: get_side_info(%d) res %d\n", side_ch_compatible_str, num_eq_init, res);
425 		if (res>0) {
426 			msg_size = res;
427 			// printk("%s recv msg: %d %d %d %d %d %d %d %d\n", side_ch_compatible_str, msg[0], msg[1], msg[2], msg[3], msg[4], msg[5], msg[6], msg[7]);
428 		} else {
429 			msg_size = 4;
430 			msg[0] = -2;
431 		}
432 	} else if (action_flag==ACTION_REG_READ) {
433 		msg_size = 4;
434 		// if (reg_idx<0 || reg_idx>31) {
435 		// 	msg[0] = -3;
436 		// 	printk("%s recv msg: invalid reg_idx\n", side_ch_compatible_str);
437 		// } else {
438 			msg[0] = reg_read(reg_idx*4);
439 		// }
440 	} else if (action_flag==ACTION_REG_WRITE) {
441 		msg_size = 4;
442 		// if (reg_idx<0 || reg_idx>31) {
443 		// 	msg[0] = -4;
444 		// 	printk("%s recv msg: invalid reg_idx\n", side_ch_compatible_str);
445 		// } else {
446 			msg[0] = 0;
447 			reg_write(reg_idx*4, reg_val);
448 		// }
449 	} else {
450 		msg_size = 4;
451 		msg[0] = -1;
452 		printk("%s recv msg: invalid action_flag\n", side_ch_compatible_str);
453 	}
454 
455 	skb_out = nlmsg_new(msg_size,0);
456 	if(!skb_out)
457 	{
458 		printk(KERN_ERR "Failed to allocate new skb\n");
459 		return;
460 	}
461 	nlh=nlmsg_put(skb_out,0,0,NLMSG_DONE,msg_size,0);
462 	NETLINK_CB(skb_out).dst_group = 0; /* not in mcast group */
463 
464 	memcpy(nlmsg_data(nlh),msg,msg_size);
465 
466 	res=nlmsg_unicast(nl_sk,skb_out,pid);
467 
468 	if(res<0)
469 		printk(KERN_INFO "Error while sending bak to user\n");
470 }
471 
472 static int dev_probe(struct platform_device *pdev) {
473 	struct netlink_kernel_cfg cfg = {
474 		.input = side_ch_nl_recv_msg,
475 	};
476 
477 	struct device_node *np = pdev->dev.of_node;
478 	struct resource *io;
479 	int err=1, i;
480 
481 	printk("\n");
482 
483 	if (np) {
484 		const struct of_device_id *match;
485 
486 		match = of_match_node(dev_of_ids, np);
487 		if (match) {
488 			printk("%s dev_probe: match!\n", side_ch_compatible_str);
489 			err = 0;
490 		}
491 	}
492 
493 	if (err)
494 		return err;
495 
496 	/* Request and map I/O memory */
497 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
498 	base_addr = devm_ioremap_resource(&pdev->dev, io);
499 	if (IS_ERR(base_addr))
500 		return PTR_ERR(base_addr);
501 
502 	printk("%s dev_probe: io start 0x%p end 0x%p name %s flags 0x%08x desc %s\n", side_ch_compatible_str, (void*)io->start, (void*)io->end, io->name, (u32)io->flags, (char*)io->desc);
503 	printk("%s dev_probe: base_addr 0x%p\n", side_ch_compatible_str, base_addr);
504 
505 	printk("%s dev_probe: succeed!\n", side_ch_compatible_str);
506 
507 	// --------------initialize netlink--------------
508 	//nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &cfg);
509 	nl_sk = netlink_kernel_create(&init_net, NETLINK_USERSOCK, &cfg);
510 	if(!nl_sk) {
511 		printk(KERN_ALERT "%s dev_probe: Error creating socket.\n", side_ch_compatible_str);
512 		return -10;
513 	}
514 
515 	//-----------------initialize fpga----------------
516 	//rst
517 	for (i=0;i<8;i++)
518 		SIDE_CH_REG_MULTI_RST_write(0);
519 	for (i=0;i<32;i++)
520 		SIDE_CH_REG_MULTI_RST_write(0xFFFFFFFF);
521 	for (i=0;i<8;i++)
522 		SIDE_CH_REG_MULTI_RST_write(0);
523 
524 	// chan_to_pl = dma_request_slave_channel(&(pdev->dev), "rx_dma_mm2s");
525 	// if (IS_ERR(chan_to_pl)) {
526 	// 	err = PTR_ERR(chan_to_pl);
527 	// 	pr_err("%s dev_probe: No channel to PL. %d\n",side_ch_compatible_str,err);
528 	// 	goto free_chan_to_pl;
529 	// }
530 
531 	chan_to_ps = dma_request_slave_channel(&(pdev->dev), "tx_dma_s2mm");
532 	if (IS_ERR(chan_to_ps)) {
533 		err = PTR_ERR(chan_to_ps);
534 		pr_err("%s dev_probe: No channel to PS. %d\n",side_ch_compatible_str,err);
535 		goto free_chan_to_ps;
536 	}
537 
538 	printk("%s dev_probe: DMA channel setup successfully. chan_to_pl 0x%p chan_to_ps 0x%p\n",side_ch_compatible_str, chan_to_pl, chan_to_ps);
539 
540 	// res = dma_loopback_test(3, 512);
541 	// printk(KERN_INFO "dma_loopback_test(3, 512) res %d\n", res);
542 
543 	err = init_side_channel();
544 	printk("%s dev_probe: init_side_channel() err %d\n",side_ch_compatible_str, err);
545 
546 	printk("%s dev_probe: num_eq_init %d\n",side_ch_compatible_str, num_eq_init);
547 	// SIDE_CH_REG_CONFIG_write(0X6001); // match addr1 and addr2; bit12 FC; bit13 addr1; bit14 addr2
548 	SIDE_CH_REG_CONFIG_write(0x0001); // match all packets by default; bit12 FC; bit13 addr1; bit14 addr2
549 	SIDE_CH_REG_NUM_EQ_write(num_eq_init);      // capture CSI + 8*equalizer by default
550 
551 	return(err);
552 
553 	// err = dma_loopback_test(7, 512);
554 	// if (err == 0)
555 	// 	return(err);
556 	// else
557 	// 	dma_release_channel(chan_to_ps);
558 
559 free_chan_to_ps:
560 	err = -2;
561 	dma_release_channel(chan_to_ps);
562 	return err;
563 
564 // free_chan_to_pl:
565 // 	err = -1;
566 // 	dma_release_channel(chan_to_pl);
567 // 	return err;
568 }
569 
570 static int dev_remove(struct platform_device *pdev)
571 {
572 	printk("\n");
573 
574 	printk("%s dev_remove: release nl_sk\n", side_ch_compatible_str);
575 	netlink_kernel_release(nl_sk);
576 
577 	pr_info("%s dev_remove: dropped chan_to_pl 0x%p\n", side_ch_compatible_str, chan_to_pl);
578 	if (chan_to_pl != NULL) {
579 		pr_info("%s dev_remove: dropped channel %s\n", side_ch_compatible_str, dma_chan_name(chan_to_pl));
580 		// dmaengine_terminate_all(chan_to_pl); //this also terminate sdr.ko. do not use
581 		dma_release_channel(chan_to_pl);
582 	}
583 
584 	pr_info("%s dev_remove: dropped chan_to_ps 0x%p\n", side_ch_compatible_str, chan_to_ps);
585 	if (chan_to_pl != NULL) {
586 		pr_info("%s dev_remove: dropped channel %s\n", side_ch_compatible_str, dma_chan_name(chan_to_ps));
587 		// dmaengine_terminate_all(chan_to_ps); //this also terminate sdr.ko. do not use
588 		dma_release_channel(chan_to_ps);
589 	}
590 
591 	if (side_info_buf != NULL)
592 		kfree(side_info_buf);
593 
594 	printk("%s dev_remove: base_addr 0x%p\n", side_ch_compatible_str, base_addr);
595 	printk("%s dev_remove: succeed!\n", side_ch_compatible_str);
596 	return 0;
597 }
598 
599 static struct platform_driver dev_driver = {
600 	.driver = {
601 		.name = "sdr,side_ch",
602 		.owner = THIS_MODULE,
603 		.of_match_table = dev_of_ids,
604 	},
605 	.probe = dev_probe,
606 	.remove = dev_remove,
607 };
608 
609 module_platform_driver(dev_driver);
610 
611 MODULE_AUTHOR("Xianjun Jiao");
612 MODULE_DESCRIPTION("sdr,side_ch");
613 MODULE_LICENSE("GPL v2");
614