1 /*
2 * Copyright (c) 2013 Corey Tabaka
3 * Copyright (c) 2015 Travis Geiselbrecht
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files
7 * (the "Software"), to deal in the Software without restriction,
8 * including without limitation the rights to use, copy, modify, merge,
9 * publish, distribute, sublicense, and/or sell copies of the Software,
10 * and to permit persons to whom the Software is furnished to do so,
11 * subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <reg.h>
26 #include <err.h>
27 #include <pcnet.h>
28 #include <debug.h>
29 #include <trace.h>
30 #include <assert.h>
31 #include <arch/x86.h>
32 #include <platform/pc.h>
33 #include <platform/pcnet.h>
34 #include <platform/interrupts.h>
35 #include <kernel/thread.h>
36 #include <kernel/mutex.h>
37 #include <kernel/event.h>
38 #include <dev/class/netif.h>
39 #include <dev/pci.h>
40 #include <stdlib.h>
41 #include <malloc.h>
42 #include <string.h>
43 #include <lwip/pbuf.h>
44 #include <lk/init.h>
45
46 #define LOCAL_TRACE 0
47
48 #define PCNET_INIT_TIMEOUT 20000
49 #define MAX_PACKET_SIZE 1518
50
51 #define QEMU_IRQ_BUG_WORKAROUND 1
52
53 struct pcnet_state {
54 int irq;
55 addr_t base;
56
57 uint8_t padr[6];
58
59 struct init_block_32 *ib;
60
61 struct rd_style3 *rd;
62 struct td_style3 *td;
63
64 struct pbuf **rx_buffers;
65 struct pbuf **tx_buffers;
66
67 /* queue accounting */
68 int rd_head;
69 int td_head;
70 int td_tail;
71
72 int rd_count;
73 int td_count;
74
75 int tx_pending;
76
77 mutex_t tx_lock;
78
79 /* bottom half state */
80 event_t event;
81 event_t initialized;
82 bool done;
83
84 struct netstack_state *netstack_state;
85 };
86
87 static status_t pcnet_init(struct device *dev);
88 static status_t pcnet_read_pci_config(struct device *dev, pci_location_t *loc);
89
90 static enum handler_return pcnet_irq_handler(void *arg);
91
92 static int pcnet_thread(void *arg);
93 static bool pcnet_service_tx(struct device *dev);
94 static bool pcnet_service_rx(struct device *dev);
95
96 static status_t pcnet_set_state(struct device *dev, struct netstack_state *state);
97 static ssize_t pcnet_get_hwaddr(struct device *dev, void *buf, size_t max_len);
98 static ssize_t pcnet_get_mtu(struct device *dev);
99
100 static status_t pcnet_output(struct device *dev, struct pbuf *p);
101
102 static struct netif_ops pcnet_ops = {
103 .std = {
104 .init = pcnet_init,
105 },
106
107 .set_state = pcnet_set_state,
108 .get_hwaddr = pcnet_get_hwaddr,
109 .get_mtu = pcnet_get_mtu,
110
111 .output = pcnet_output,
112 };
113
114 DRIVER_EXPORT(netif, &pcnet_ops.std);
115
pcnet_read_csr(struct device * dev,uint8_t rap)116 static inline uint32_t pcnet_read_csr(struct device *dev, uint8_t rap)
117 {
118 struct pcnet_state *state = dev->state;
119
120 outpd(state->base + REG_RAP, rap);
121 return inpd(state->base + REG_RDP);
122 }
123
pcnet_write_csr(struct device * dev,uint8_t rap,uint16_t data)124 static inline void pcnet_write_csr(struct device *dev, uint8_t rap, uint16_t data)
125 {
126 struct pcnet_state *state = dev->state;
127
128 outpd(state->base + REG_RAP, rap);
129 outpd(state->base + REG_RDP, data);
130 }
131
pcnet_read_bcr(struct device * dev,uint8_t rap)132 static inline uint32_t pcnet_read_bcr(struct device *dev, uint8_t rap)
133 {
134 struct pcnet_state *state = dev->state;
135
136 outpd(state->base + REG_RAP, rap);
137 return inpd(state->base + REG_BDP);
138 }
139
pcnet_write_bcr(struct device * dev,uint8_t rap,uint16_t data)140 static inline void pcnet_write_bcr(struct device *dev, uint8_t rap, uint16_t data)
141 {
142 struct pcnet_state *state = dev->state;
143
144 outpd(state->base + REG_RAP, rap);
145 outpd(state->base + REG_BDP, data);
146 }
147
pcnet_init(struct device * dev)148 static status_t pcnet_init(struct device *dev)
149 {
150 status_t res = NO_ERROR;
151 pci_location_t loc;
152 int i;
153
154 const struct platform_pcnet_config *config = dev->config;
155
156 if (!config)
157 return ERR_NOT_CONFIGURED;
158
159 if (pci_find_pci_device(&loc, config->device_id, config->vendor_id, config->index) != _PCI_SUCCESSFUL)
160 return ERR_NOT_FOUND;
161
162 struct pcnet_state *state = calloc(1, sizeof(struct pcnet_state));
163 if (!state)
164 return ERR_NO_MEMORY;
165
166 dev->state = state;
167
168 res = pcnet_read_pci_config(dev, &loc);
169 if (res)
170 goto error;
171
172 for (i=0; i < 6; i++)
173 state->padr[i] = inp(state->base + i);
174
175 LTRACEF("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", state->padr[0], state->padr[1], state->padr[2],
176 state->padr[3], state->padr[4], state->padr[5]);
177
178 /* put the controller into 32bit wide mode by performing a 32bit write to CSR0 */
179 outpd(state->base + 0, 0);
180
181 /* stop the controller for configuration */
182 pcnet_write_csr(dev, 0, CSR0_STOP);
183
184 /* setup 32bit (style 3) structures, burst, all CSR4 bits valid, TDM1[29] is ADD_FCS */
185 pcnet_write_csr(dev, 58, 3);
186
187 /* DMA plus enable */
188 pcnet_write_csr(dev, 4, pcnet_read_csr(dev, 4) | CSR4_DMAPLUS);
189
190 /* allocate 128 tx and 128 rx descriptor rings */
191 state->td_count = 128;
192 state->rd_count = 128;
193 state->td = memalign(16, state->td_count * DESC_SIZE);
194 state->rd = memalign(16, state->rd_count * DESC_SIZE);
195
196 state->rx_buffers = calloc(state->rd_count, sizeof(struct pbuf *));
197 state->tx_buffers = calloc(state->td_count, sizeof(struct pbuf *));
198
199 state->tx_pending = 0;
200
201 if (!state->td || !state->rd || !state->tx_buffers || !state->rx_buffers) {
202 res = ERR_NO_MEMORY;
203 goto error;
204 }
205
206 memset(state->td, 0, state->td_count * DESC_SIZE);
207 memset(state->rd, 0, state->rd_count * DESC_SIZE);
208
209 /* allocate temporary init block space */
210 state->ib = memalign(4, sizeof(struct init_block_32));
211 if (!state->ib) {
212 res = ERR_NO_MEMORY;
213 goto error;
214 }
215
216 LTRACEF("Init block addr: %p\n", state->ib);
217
218 /* setup init block */
219 state->ib->tlen = 7; // 128 descriptors
220 state->ib->rlen = 7; // 128 descriptors
221 state->ib->mode = 0;
222
223 state->ib->ladr = ~0;
224 state->ib->tdra = (uint32_t) state->td;
225 state->ib->rdra = (uint32_t) state->rd;
226
227 memcpy(state->ib->padr, state->padr, 6);
228
229 /* load the init block address */
230 pcnet_write_csr(dev, 1, (uint32_t) state->ib);
231 pcnet_write_csr(dev, 2, (uint32_t) state->ib >> 16);
232
233 /* setup receive descriptors */
234 for (i=0; i < state->rd_count; i++) {
235 //LTRACEF("Allocating pbuf %d\n", i);
236 struct pbuf *p = pbuf_alloc(PBUF_RAW, MAX_PACKET_SIZE, PBUF_RAM);
237
238 state->rd[i].rbadr = (uint32_t) p->payload;
239 state->rd[i].bcnt = -p->tot_len;
240 state->rd[i].ones = 0xf;
241 state->rd[i].own = 1;
242
243 state->rx_buffers[i] = p;
244 }
245
246 mutex_init(&state->tx_lock);
247
248 state->done = false;
249 event_init(&state->event, false, EVENT_FLAG_AUTOUNSIGNAL);
250 event_init(&state->initialized, false, 0);
251
252 /* start up a thread to process packet activity */
253 thread_resume(thread_create("[pcnet bh]", pcnet_thread, dev, DEFAULT_PRIORITY,
254 DEFAULT_STACK_SIZE));
255
256 register_int_handler(state->irq, pcnet_irq_handler, dev);
257 unmask_interrupt(state->irq);
258
259 #if QEMU_IRQ_BUG_WORKAROUND
260 register_int_handler(INT_BASE + 15, pcnet_irq_handler, dev);
261 unmask_interrupt(INT_BASE + 15);
262 #endif
263
264 /* wait for initialization to complete */
265 res = event_wait_timeout(&state->initialized, PCNET_INIT_TIMEOUT);
266 if (res) {
267 /* TODO: cancel bottom half thread and tear down device instance */
268 LTRACEF("Failed to wait for IDON: %d\n", res);
269 return res;
270 }
271
272 LTRACE_EXIT;
273 return res;
274
275 error:
276 LTRACEF("Error: %d\n", res);
277
278 if (state) {
279 free(state->td);
280 free(state->rd);
281 free(state->ib);
282 free(state->tx_buffers);
283 free(state->rx_buffers);
284 }
285
286 free(state);
287
288 return res;
289 }
290
pcnet_read_pci_config(struct device * dev,pci_location_t * loc)291 static status_t pcnet_read_pci_config(struct device *dev, pci_location_t *loc)
292 {
293 status_t res = NO_ERROR;
294 pci_config_t config;
295 uint8_t *buf = (uint8_t *) &config;
296 unsigned i;
297
298 DEBUG_ASSERT(dev->state);
299
300 struct pcnet_state *state = dev->state;
301
302 for (i=0; i < sizeof(config); i++)
303 pci_read_config_byte(loc, i, buf + i);
304
305 LTRACEF("Resources:\n");
306
307 for (i=0; i < countof(config.base_addresses); i++) {
308 if (config.base_addresses[i] & 0x1) {
309 LTRACEF(" BAR %d I/O REG: %04x\n", i, config.base_addresses[i] & ~0x3);
310
311 state->base = config.base_addresses[i] & ~0x3;
312 break;
313 }
314 }
315
316 if (!state->base) {
317 res = ERR_NOT_CONFIGURED;
318 goto error;
319 }
320
321 if (config.interrupt_line != 0xff) {
322 LTRACEF(" IRQ %u\n", config.interrupt_line);
323
324 state->irq = config.interrupt_line + INT_BASE;
325 } else {
326 res = ERR_NOT_CONFIGURED;
327 goto error;
328 }
329
330 LTRACEF("Command: %04x\n", config.command);
331 LTRACEF("Status: %04x\n", config.status);
332
333 pci_write_config_half(loc, PCI_CONFIG_COMMAND,
334 (config.command | PCI_COMMAND_IO_EN | PCI_COMMAND_BUS_MASTER_EN) & ~PCI_COMMAND_MEM_EN);
335
336 error:
337 return res;
338 }
339
pcnet_irq_handler(void * arg)340 static enum handler_return pcnet_irq_handler(void *arg)
341 {
342 struct device *dev = arg;
343 struct pcnet_state *state = dev->state;
344
345 mask_interrupt(state->irq);
346
347 #if QEMU_IRQ_BUG_WORKAROUND
348 mask_interrupt(INT_BASE + 15);
349 #endif
350
351 event_signal(&state->event, false);
352
353 return INT_RESCHEDULE;
354 }
355
pcnet_thread(void * arg)356 static int pcnet_thread(void *arg)
357 {
358 DEBUG_ASSERT(arg);
359
360 struct device *dev = arg;
361 struct pcnet_state *state = dev->state;
362
363 /* kick off init, enable ints, and start operation */
364 pcnet_write_csr(dev, 0, CSR0_INIT | CSR0_IENA | CSR0_STRT);
365
366 while (!state->done) {
367 LTRACEF("Waiting for event.\n");
368 //event_wait_timeout(&state->event, 5000);
369 event_wait(&state->event);
370
371 int csr0 = pcnet_read_csr(dev, 0);
372
373 /* disable interrupts at the controller */
374 pcnet_write_csr(dev, 0, csr0 & ~CSR0_IENA);
375
376 LTRACEF("CSR0 = %04x\n", csr0);
377
378 #if LOCAL_TRACE
379 if (csr0 & CSR0_RINT) TRACEF("RINT\n");
380 if (csr0 & CSR0_TINT) TRACEF("TINT\n");
381 #endif
382
383 if (csr0 & CSR0_IDON) {
384 LTRACEF("IDON\n");
385
386 /* free the init block that we no longer need */
387 free(state->ib);
388 state->ib = NULL;
389
390 event_signal(&state->initialized, true);
391 }
392
393 if (csr0 & CSR0_ERR) {
394 LTRACEF("ERR\n");
395
396 /* TODO: handle errors, though not many need it */
397
398 /* clear flags, preserve necessary enables */
399 pcnet_write_csr(dev, 0, csr0 & (CSR0_TXON | CSR0_RXON | CSR0_IENA));
400 }
401
402 bool again = !!(csr0 & (CSR0_RINT | CSR0_TINT));
403 while (again) {
404 again = pcnet_service_tx(dev) | pcnet_service_rx(dev);
405 }
406
407 /* enable interrupts at the controller */
408 pcnet_write_csr(dev, 0, CSR0_IENA);
409 unmask_interrupt(state->irq);
410
411 #if QEMU_IRQ_BUG_WORKAROUND
412 unmask_interrupt(INT_BASE + 15);
413 #endif
414 }
415
416 return 0;
417 }
418
pcnet_service_tx(struct device * dev)419 static bool pcnet_service_tx(struct device *dev)
420 {
421 LTRACE_ENTRY;
422
423 struct pcnet_state *state = dev->state;
424
425 mutex_acquire(&state->tx_lock);
426
427 struct td_style3 *td = &state->td[state->td_tail];
428
429 if (state->tx_pending && td->own == 0) {
430 struct pbuf *p = state->tx_buffers[state->td_tail];
431 DEBUG_ASSERT(p);
432
433 state->tx_buffers[state->td_tail] = NULL;
434
435 LTRACEF("Retiring packet: td_tail=%d p=%p tot_len=%u\n", state->td_tail, p, p->tot_len);
436
437 state->tx_pending--;
438 state->td_tail = (state->td_tail + 1) % state->td_count;
439
440 if (td->err) {
441 LTRACEF("Descriptor error status encountered\n");
442 hexdump8(td, sizeof(*td));
443 }
444
445 mutex_release(&state->tx_lock);
446
447 pbuf_free(p);
448
449 LTRACE_EXIT;
450 return true;
451 } else {
452 mutex_release(&state->tx_lock);
453
454 #if 0
455 LTRACEF("Nothing to do for TX.\n");
456 for (int i=0; i < state->td_count; i++)
457 printf("%d ", state->td[i].own);
458 printf("\n");
459 #endif
460
461 LTRACE_EXIT;
462 return false;
463 }
464 }
465
pcnet_service_rx(struct device * dev)466 static bool pcnet_service_rx(struct device *dev)
467 {
468 LTRACE_ENTRY;
469
470 struct pcnet_state *state = dev->state;
471
472 struct rd_style3 *rd = &state->rd[state->rd_head];
473
474 if (rd->own == 0) {
475 struct pbuf *p = state->rx_buffers[state->rd_head];
476 DEBUG_ASSERT(p);
477
478 LTRACEF("Processing RX descriptor %d\n", state->rd_head);
479
480 if (rd->err) {
481 LTRACEF("Descriptor error status encountered\n");
482 hexdump8(rd, sizeof(*rd));
483 } else {
484 if (rd->mcnt <= p->tot_len) {
485
486 pbuf_realloc(p, rd->mcnt);
487
488 #if LOCAL_TRACE
489 LTRACEF("payload=%p len=%u\n", p->payload, p->tot_len);
490 hexdump8(p->payload, p->tot_len);
491 #endif
492
493 class_netstack_input(dev, state->netstack_state, p);
494
495 p = state->rx_buffers[state->rd_head] = pbuf_alloc(PBUF_RAW, MAX_PACKET_SIZE, PBUF_RAM);
496 } else {
497 LTRACEF("RX packet size error: mcnt = %u, buf len = %u\n", rd->mcnt, p->tot_len);
498 }
499 }
500
501 memset(rd, 0, sizeof(*rd));
502 memset(p->payload, 0, p->tot_len);
503
504 rd->rbadr = (uint32_t) p->payload;
505 rd->bcnt = -p->tot_len;
506 rd->ones = 0xf;
507 rd->own = 1;
508
509 state->rd_head = (state->rd_head + 1) % state->rd_count;
510
511 LTRACE_EXIT;
512 return true;
513 } else {
514 #if 0
515 LTRACEF("Nothing to do for RX: rd_head=%d.\n", state->rd_head);
516 for (int i=0; i < state->rd_count; i++)
517 printf("%d ", state->rd[i].own);
518 printf("\n");
519 #endif
520 }
521
522 LTRACE_EXIT;
523 return false;
524 }
525
pcnet_set_state(struct device * dev,struct netstack_state * netstack_state)526 static status_t pcnet_set_state(struct device *dev, struct netstack_state *netstack_state)
527 {
528 if (!dev)
529 return ERR_INVALID_ARGS;
530
531 if (!dev->state)
532 return ERR_NOT_CONFIGURED;
533
534 struct pcnet_state *state = dev->state;
535
536 state->netstack_state = netstack_state;
537
538 return NO_ERROR;
539 }
540
pcnet_get_hwaddr(struct device * dev,void * buf,size_t max_len)541 static ssize_t pcnet_get_hwaddr(struct device *dev, void *buf, size_t max_len)
542 {
543 if (!dev || !buf)
544 return ERR_INVALID_ARGS;
545
546 if (!dev->state)
547 return ERR_NOT_CONFIGURED;
548
549 struct pcnet_state *state = dev->state;
550
551 memcpy(buf, state->padr, MIN(sizeof(state->padr), max_len));
552
553 return sizeof(state->padr);
554 }
555
pcnet_get_mtu(struct device * dev)556 static ssize_t pcnet_get_mtu(struct device *dev)
557 {
558 if (!dev)
559 return ERR_INVALID_ARGS;
560
561 return 1500;
562 }
563
pcnet_output(struct device * dev,struct pbuf * p)564 static status_t pcnet_output(struct device *dev, struct pbuf *p)
565 {
566 LTRACE_ENTRY;
567
568 if (!dev || !p)
569 return ERR_INVALID_ARGS;
570
571 if (!dev->state)
572 return ERR_NOT_CONFIGURED;
573
574 status_t res = NO_ERROR;
575 struct pcnet_state *state = dev->state;
576
577 mutex_acquire(&state->tx_lock);
578
579 struct td_style3 *td = &state->td[state->td_head];
580
581 if (td->own) {
582 LTRACEF("TX descriptor ring full\n");
583 res = ERR_NOT_READY; // maybe this should be ERR_NOT_ENOUGH_BUFFER?
584 goto done;
585 }
586
587 pbuf_ref(p);
588 p = pbuf_coalesce(p, PBUF_RAW);
589
590 #if LOCAL_TRACE
591 LTRACEF("Queuing packet: td_head=%d p=%p tot_len=%u\n", state->td_head, p, p->tot_len);
592 hexdump8(p->payload, p->tot_len);
593 #endif
594
595 /* clear flags */
596 memset(td, 0, sizeof(*td));
597
598 td->tbadr = (uint32_t) p->payload;
599 td->bcnt = -p->tot_len;
600 td->stp = 1;
601 td->enp = 1;
602 td->add_no_fcs = 1;
603 td->ones = 0xf;
604
605 state->tx_buffers[state->td_head] = p;
606 state->tx_pending++;
607
608 state->td_head = (state->td_head + 1) % state->td_count;
609
610 td->own = 1;
611
612 /* trigger tx */
613 pcnet_write_csr(dev, 0, CSR0_TDMD);
614
615 done:
616 mutex_release(&state->tx_lock);
617 LTRACE_EXIT;
618 return res;
619 }
620
621 static const struct platform_pcnet_config pcnet0_config = {
622 .vendor_id = 0x1022,
623 .device_id = 0x2000,
624 .index = 0,
625 };
626
627 DEVICE_INSTANCE(netif, pcnet0, &pcnet0_config);
628
pcnet_init_hook(uint level)629 static void pcnet_init_hook(uint level)
630 {
631 device_init(device_get_by_name(netif, pcnet0));
632 class_netif_add(device_get_by_name(netif, pcnet0));
633 }
634
635 LK_INIT_HOOK(pcnet, &pcnet_init_hook, LK_INIT_LEVEL_PLATFORM);
636
637