在虚拟化场景,中断的开销更大,所以要尽可能的少用中断,我们从中断处理函数开始看。
skb_recv_done函数
static void skb_recv_done(struct virtqueue
*rvq
)
{
struct virtnet_info
*vi
= rvq
->vdev
->priv
;
struct receive_queue
*rq
= &vi
->rq
[vq2rxq(rvq
)];
if (napi_schedule_prep(&rq
->napi
)) {
virtqueue_disable_cb(rvq
);
__napi_schedule(&rq
->napi
);
}
}
触发软中断前,关闭vhost中断通知,此时通过poll机制收包。 收包软中断最终会调用napi定义的poll函数,virtio_net定义的是virtnet_poll函数。
virtnet_poll函数
static int virtnet_poll(struct napi_struct
*napi
, int budget
)
{
struct receive_queue
*rq
=
container_of(napi
, struct receive_queue
, napi
);
unsigned int r
, received
;
received
= virtnet_receive(rq
, budget
);
if (received
< budget
) {
r
= virtqueue_enable_cb_prepare(rq
->vq
);
napi_complete(napi
);
if (unlikely(virtqueue_poll(rq
->vq
, r
)) &&
napi_schedule_prep(napi
)) {
virtqueue_disable_cb(rq
->vq
);
__napi_schedule(napi
);
}
}
return received
;
}
如果报文接收完成,会重新开启中断;否则处理完budget数量的报文后,让出CPU,等待下次处理软中断时再次接收报文。
关闭和开启中断的函数是通过设置共享环中的flag实现的:
virtqueue_disable_cb函数
void virtqueue_disable_cb(struct virtqueue
*_vq
)
{
struct vring_virtqueue
*vq
= to_vvq(_vq
);
vq
->vring
.avail
->flags
|= cpu_to_virtio16(_vq
->vdev
, VRING_AVAIL_F_NO_INTERRUPT
);
}
virtqueue_enable_cb_prepare函数
unsigned virtqueue_enable_cb_prepare(struct virtqueue
*_vq
)
{
struct vring_virtqueue
*vq
= to_vvq(_vq
);
u16 last_used_idx
;
START_USE(vq
);
vq
->vring
.avail
->flags
&= cpu_to_virtio16(_vq
->vdev
, ~VRING_AVAIL_F_NO_INTERRUPT
);
vring_used_event(&vq
->vring
) = cpu_to_virtio16(_vq
->vdev
, last_used_idx
= vq
->last_used_idx
);
END_USE(vq
);
return last_used_idx
;
}
原文链接:https://blog.csdn.net/one_clouder/article/details/53327362