Lines Matching full:net_device
35 int netvsc_switch_datapath(struct net_device *ndev, bool vf) in netvsc_switch_datapath()
130 struct netvsc_device *net_device; in alloc_net_device() local
132 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); in alloc_net_device()
133 if (!net_device) in alloc_net_device()
136 init_waitqueue_head(&net_device->wait_drain); in alloc_net_device()
137 net_device->destroy = false; in alloc_net_device()
138 net_device->tx_disable = true; in alloc_net_device()
140 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; in alloc_net_device()
141 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; in alloc_net_device()
143 init_completion(&net_device->channel_init_wait); in alloc_net_device()
144 init_waitqueue_head(&net_device->subchan_open); in alloc_net_device()
145 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); in alloc_net_device()
147 return net_device; in alloc_net_device()
179 struct netvsc_device *net_device, in netvsc_revoke_recv_buf() argument
180 struct net_device *ndev) in netvsc_revoke_recv_buf()
191 if (net_device->recv_section_cnt) { in netvsc_revoke_recv_buf()
193 revoke_packet = &net_device->revoke_packet; in netvsc_revoke_recv_buf()
224 net_device->recv_section_cnt = 0; in netvsc_revoke_recv_buf()
229 struct netvsc_device *net_device, in netvsc_revoke_send_buf() argument
230 struct net_device *ndev) in netvsc_revoke_send_buf()
241 if (net_device->send_section_cnt) { in netvsc_revoke_send_buf()
243 revoke_packet = &net_device->revoke_packet; in netvsc_revoke_send_buf()
275 net_device->send_section_cnt = 0; in netvsc_revoke_send_buf()
280 struct netvsc_device *net_device, in netvsc_teardown_recv_gpadl() argument
281 struct net_device *ndev) in netvsc_teardown_recv_gpadl()
285 if (net_device->recv_buf_gpadl_handle.gpadl_handle) { in netvsc_teardown_recv_gpadl()
287 &net_device->recv_buf_gpadl_handle); in netvsc_teardown_recv_gpadl()
301 struct netvsc_device *net_device, in netvsc_teardown_send_gpadl() argument
302 struct net_device *ndev) in netvsc_teardown_send_gpadl()
306 if (net_device->send_buf_gpadl_handle.gpadl_handle) { in netvsc_teardown_send_gpadl()
308 &net_device->send_buf_gpadl_handle); in netvsc_teardown_send_gpadl()
321 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) in netvsc_alloc_recv_comp_ring() argument
323 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring()
327 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data); in netvsc_alloc_recv_comp_ring()
336 struct netvsc_device *net_device, in netvsc_init_buf() argument
340 struct net_device *ndev = hv_get_drvdata(device); in netvsc_init_buf()
350 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) in netvsc_init_buf()
354 net_device->recv_buf = vzalloc(buf_size); in netvsc_init_buf()
355 if (!net_device->recv_buf) { in netvsc_init_buf()
363 net_device->recv_buf_size = buf_size; in netvsc_init_buf()
370 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, in netvsc_init_buf()
372 &net_device->recv_buf_gpadl_handle); in netvsc_init_buf()
380 init_packet = &net_device->channel_init_pkt; in netvsc_init_buf()
384 gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle; in netvsc_init_buf()
402 wait_for_completion(&net_device->channel_init_wait); in netvsc_init_buf()
425 net_device->recv_section_size = resp->sections[0].sub_alloc_size; in netvsc_init_buf()
426 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; in netvsc_init_buf()
429 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size * in netvsc_init_buf()
430 (u64)net_device->recv_section_cnt > (u64)buf_size) { in netvsc_init_buf()
432 net_device->recv_section_size); in netvsc_init_buf()
438 struct netvsc_channel *nvchan = &net_device->chan_table[i]; in netvsc_init_buf()
440 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL); in netvsc_init_buf()
451 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1; in netvsc_init_buf()
452 ret = netvsc_alloc_recv_comp_ring(net_device, 0); in netvsc_init_buf()
460 net_device->send_buf = vzalloc(buf_size); in netvsc_init_buf()
461 if (!net_device->send_buf) { in netvsc_init_buf()
467 net_device->send_buf_size = buf_size; in netvsc_init_buf()
473 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, in netvsc_init_buf()
475 &net_device->send_buf_gpadl_handle); in netvsc_init_buf()
483 init_packet = &net_device->channel_init_pkt; in netvsc_init_buf()
487 net_device->send_buf_gpadl_handle.gpadl_handle; in netvsc_init_buf()
504 wait_for_completion(&net_device->channel_init_wait); in netvsc_init_buf()
518 net_device->send_section_size = init_packet->msg. in netvsc_init_buf()
520 if (net_device->send_section_size < NETVSC_MTU_MIN) { in netvsc_init_buf()
522 net_device->send_section_size); in netvsc_init_buf()
528 net_device->send_section_cnt = buf_size / net_device->send_section_size; in netvsc_init_buf()
531 net_device->send_section_size, net_device->send_section_cnt); in netvsc_init_buf()
534 net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt, in netvsc_init_buf()
536 if (!net_device->send_section_map) { in netvsc_init_buf()
544 netvsc_revoke_recv_buf(device, net_device, ndev); in netvsc_init_buf()
545 netvsc_revoke_send_buf(device, net_device, ndev); in netvsc_init_buf()
546 netvsc_teardown_recv_gpadl(device, net_device, ndev); in netvsc_init_buf()
547 netvsc_teardown_send_gpadl(device, net_device, ndev); in netvsc_init_buf()
555 struct netvsc_device *net_device, in negotiate_nvsp_ver() argument
559 struct net_device *ndev = hv_get_drvdata(device); in negotiate_nvsp_ver()
578 wait_for_completion(&net_device->channel_init_wait); in negotiate_nvsp_ver()
617 struct netvsc_device *net_device, in netvsc_connect_vsp() argument
620 struct net_device *ndev = hv_get_drvdata(device); in netvsc_connect_vsp()
629 init_packet = &net_device->channel_init_pkt; in netvsc_connect_vsp()
633 if (negotiate_nvsp_ver(device, net_device, init_packet, in netvsc_connect_vsp()
635 net_device->nvsp_version = ver_list[i]; in netvsc_connect_vsp()
644 if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) { in netvsc_connect_vsp()
646 net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61); in netvsc_connect_vsp()
651 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); in netvsc_connect_vsp()
656 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) in netvsc_connect_vsp()
680 ret = netvsc_init_buf(device, net_device, device_info); in netvsc_connect_vsp()
691 struct net_device *ndev = hv_get_drvdata(device); in netvsc_device_remove()
693 struct netvsc_device *net_device in netvsc_device_remove() local
701 netvsc_revoke_recv_buf(device, net_device, ndev); in netvsc_device_remove()
703 netvsc_teardown_recv_gpadl(device, net_device, ndev); in netvsc_device_remove()
705 netvsc_revoke_send_buf(device, net_device, ndev); in netvsc_device_remove()
707 netvsc_teardown_send_gpadl(device, net_device, ndev); in netvsc_device_remove()
712 for (i = 0; i < net_device->num_chn; i++) { in netvsc_device_remove()
720 napi_disable(&net_device->chan_table[i].napi); in netvsc_device_remove()
723 netif_napi_del(&net_device->chan_table[i].napi); in netvsc_device_remove()
727 * At this point, no one should be accessing net_device in netvsc_device_remove()
740 netvsc_teardown_recv_gpadl(device, net_device, ndev); in netvsc_device_remove()
741 netvsc_teardown_send_gpadl(device, net_device, ndev); in netvsc_device_remove()
745 free_netvsc_device_rcu(net_device); in netvsc_device_remove()
751 static inline void netvsc_free_send_slot(struct netvsc_device *net_device, in netvsc_free_send_slot() argument
754 sync_change_bit(index, net_device->send_section_map); in netvsc_free_send_slot()
757 static void netvsc_send_tx_complete(struct net_device *ndev, in netvsc_send_tx_complete()
758 struct netvsc_device *net_device, in netvsc_send_tx_complete() argument
785 netvsc_free_send_slot(net_device, send_index); in netvsc_send_tx_complete()
788 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete()
800 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete()
802 if (unlikely(net_device->destroy)) { in netvsc_send_tx_complete()
804 wake_up(&net_device->wait_drain); in netvsc_send_tx_complete()
808 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && in netvsc_send_tx_complete()
817 static void netvsc_send_completion(struct net_device *ndev, in netvsc_send_completion()
818 struct netvsc_device *net_device, in netvsc_send_completion() argument
841 complete(&net_device->channel_init_wait); in netvsc_send_completion()
913 netvsc_send_tx_complete(ndev, net_device, incoming_channel, in netvsc_send_completion()
925 memcpy(&net_device->channel_init_pkt, nvsp_packet, in netvsc_send_completion()
927 complete(&net_device->channel_init_wait); in netvsc_send_completion()
930 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) in netvsc_get_next_send_section() argument
932 unsigned long *map_addr = net_device->send_section_map; in netvsc_get_next_send_section()
935 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) { in netvsc_get_next_send_section()
943 static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, in netvsc_copy_to_send_buf() argument
951 char *start = net_device->send_buf; in netvsc_copy_to_send_buf()
952 char *dest = start + (section_index * net_device->send_section_size) in netvsc_copy_to_send_buf()
961 remain = packet->total_data_buflen & (net_device->pkt_align - 1); in netvsc_copy_to_send_buf()
963 padding = net_device->pkt_align - remain; in netvsc_copy_to_send_buf()
1061 struct netvsc_device *net_device, in netvsc_send_pkt() argument
1069 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt()
1071 struct net_device *ndev = hv_get_drvdata(device); in netvsc_send_pkt()
1143 !net_device->tx_disable) { in netvsc_send_pkt()
1186 int netvsc_send(struct net_device *ndev, in netvsc_send()
1194 struct netvsc_device *net_device in netvsc_send() local
1207 if (unlikely(!net_device || net_device->destroy)) in netvsc_send()
1210 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send()
1219 return netvsc_send_pkt(device, packet, net_device, pb, skb); in netvsc_send()
1226 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt; in netvsc_send()
1227 if (try_batch && msd_len + pktlen + net_device->pkt_align < in netvsc_send()
1228 net_device->send_section_size) { in netvsc_send()
1232 net_device->send_section_size) { in netvsc_send()
1236 } else if (pktlen + net_device->pkt_align < in netvsc_send()
1237 net_device->send_section_size) { in netvsc_send()
1238 section_index = netvsc_get_next_send_section(net_device); in netvsc_send()
1255 netvsc_copy_to_send_buf(net_device, in netvsc_send()
1293 int m_ret = netvsc_send_pkt(device, msd_send, net_device, in netvsc_send()
1297 netvsc_free_send_slot(net_device, in netvsc_send()
1304 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); in netvsc_send()
1307 netvsc_free_send_slot(net_device, section_index); in netvsc_send()
1313 static int send_recv_completions(struct net_device *ndev, in send_recv_completions()
1368 static void enq_receive_complete(struct net_device *ndev, in enq_receive_complete()
1398 static int netvsc_receive(struct net_device *ndev, in netvsc_receive()
1399 struct netvsc_device *net_device, in netvsc_receive() argument
1410 char *recv_buf = net_device->recv_buf; in netvsc_receive()
1464 if (unlikely(offset > net_device->recv_buf_size || in netvsc_receive()
1465 buflen > net_device->recv_buf_size - offset)) { in netvsc_receive()
1478 if (unlikely(buflen > net_device->recv_section_size)) { in netvsc_receive()
1483 buflen, net_device->recv_section_size); in netvsc_receive()
1495 ret = rndis_filter_receive(ndev, net_device, in netvsc_receive()
1505 enq_receive_complete(ndev, net_device, q_idx, in netvsc_receive()
1511 static void netvsc_send_table(struct net_device *ndev, in netvsc_send_table()
1557 static void netvsc_send_vf(struct net_device *ndev, in netvsc_send_vf()
1581 static void netvsc_receive_inband(struct net_device *ndev, in netvsc_receive_inband()
1610 struct netvsc_device *net_device, in netvsc_process_raw_pkt() argument
1611 struct net_device *ndev, in netvsc_process_raw_pkt()
1622 netvsc_send_completion(ndev, net_device, channel, desc, budget); in netvsc_process_raw_pkt()
1626 return netvsc_receive(ndev, net_device, nvchan, desc); in netvsc_process_raw_pkt()
1629 netvsc_receive_inband(ndev, net_device, desc); in netvsc_process_raw_pkt()
1656 struct netvsc_device *net_device = nvchan->net_device; in netvsc_poll() local
1659 struct net_device *ndev = hv_get_drvdata(device); in netvsc_poll()
1670 work_done += netvsc_process_raw_pkt(device, nvchan, net_device, in netvsc_poll()
1679 ret = send_recv_completions(ndev, net_device, nvchan); in netvsc_poll()
1727 struct netvsc_device *net_device; in netvsc_device_add() local
1728 struct net_device *ndev = hv_get_drvdata(device); in netvsc_device_add()
1731 net_device = alloc_net_device(); in netvsc_device_add()
1732 if (!net_device) in netvsc_device_add()
1751 struct netvsc_channel *nvchan = &net_device->chan_table[i]; in netvsc_device_add()
1754 nvchan->net_device = net_device; in netvsc_device_add()
1775 netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll); in netvsc_device_add()
1785 netvsc_channel_cb, net_device->chan_table); in netvsc_device_add()
1795 napi_enable(&net_device->chan_table[0].napi); in netvsc_device_add()
1797 &net_device->chan_table[0].napi); in netvsc_device_add()
1799 &net_device->chan_table[0].napi); in netvsc_device_add()
1802 ret = netvsc_connect_vsp(device, net_device, device_info); in netvsc_device_add()
1812 rcu_assign_pointer(net_device_ctx->nvdev, net_device); in netvsc_device_add()
1814 return net_device; in netvsc_device_add()
1820 napi_disable(&net_device->chan_table[0].napi); in netvsc_device_add()
1826 netif_napi_del(&net_device->chan_table[0].napi); in netvsc_device_add()
1829 free_netvsc_device(&net_device->rcu); in netvsc_device_add()