diff --git a/src/receive.c b/src/receive.c index 92a47c8..9ea601f 100644 --- a/src/receive.c +++ b/src/receive.c @@ -342,7 +342,7 @@ static void packet_consume_data_done(struct sk_buff *skb, struct endpoint *endpo if (unlikely(routed_peer != peer)) goto dishonest_packet_peer; - if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { + if (unlikely(netif_rx(skb) == NET_RX_DROP)) { ++dev->stats.rx_dropped; net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n", dev->name, peer->internal_id, &peer->endpoint.addr); } else @@ -378,7 +378,6 @@ void packet_rx_worker(struct work_struct *work) enum packet_state state; bool free; - local_bh_disable(); while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && (state = atomic_read(&PACKET_CB(skb)->state)) != PACKET_STATE_UNCRYPTED) { __ptr_ring_discard_one(&queue->ring); peer = PACKET_PEER(skb); @@ -405,14 +404,7 @@ next: peer_put(peer); if (unlikely(free)) dev_kfree_skb(skb); - - /* Don't totally kill scheduling latency by keeping preemption disabled forever. */ - if (need_resched()) { - local_bh_enable(); - local_bh_disable(); - } } - local_bh_enable(); } void packet_decrypt_worker(struct work_struct *work)