summaryrefslogtreecommitdiff
path: root/kernel/linux/kni/kni_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/linux/kni/kni_net.c')
-rw-r--r--kernel/linux/kni/kni_net.c62
1 files changed, 49 insertions, 13 deletions
diff --git a/kernel/linux/kni/kni_net.c b/kernel/linux/kni/kni_net.c
index f25b1277bd..1ba9b1b99f 100644
--- a/kernel/linux/kni/kni_net.c
+++ b/kernel/linux/kni/kni_net.c
@@ -36,6 +36,22 @@ static void kni_net_rx_normal(struct kni_dev *kni);
/* kni rx function pointer, with default to normal rx */
static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+/* iova to kernel virtual address */
+static inline void *
+iova2kva(struct kni_dev *kni, void *iova)
+{
+ return phys_to_virt(iova_to_phys(kni->usr_tsk, (unsigned long)iova));
+}
+
+static inline void *
+iova2data_kva(struct kni_dev *kni, struct rte_kni_mbuf *m)
+{
+ return phys_to_virt(iova_to_phys(kni->usr_tsk, m->buf_physaddr) +
+ m->data_off);
+}
+#endif
+
/* physical address to kernel virtual address */
static void *
pa2kva(void *pa)
@@ -62,6 +78,26 @@ kva2data_kva(struct rte_kni_mbuf *m)
return phys_to_virt(m->buf_physaddr + m->data_off);
}
+static inline void *
+get_kva(struct kni_dev *kni, void *pa)
+{
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+ if (kni->iova_mode == 1)
+ return iova2kva(kni, pa);
+#endif
+ return pa2kva(pa);
+}
+
+static inline void *
+get_data_kva(struct kni_dev *kni, void *pkt_kva)
+{
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+ if (kni->iova_mode == 1)
+ return iova2data_kva(kni, pkt_kva);
+#endif
+ return kva2data_kva(pkt_kva);
+}
+
/*
* It can be called to process the request.
*/
@@ -178,7 +214,7 @@ kni_fifo_trans_pa2va(struct kni_dev *kni,
return;
for (i = 0; i < num_rx; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
kni->va[i] = pa2va(kni->pa[i], kva);
kva_nb_segs = kva->nb_segs;
@@ -266,8 +302,8 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)
if (likely(ret == 1)) {
void *data_kva;
- pkt_kva = pa2kva(pkt_pa);
- data_kva = kva2data_kva(pkt_kva);
+ pkt_kva = get_kva(kni, pkt_pa);
+ data_kva = get_data_kva(kni, pkt_kva);
pkt_va = pa2va(pkt_pa, pkt_kva);
len = skb->len;
@@ -338,9 +374,9 @@ kni_net_rx_normal(struct kni_dev *kni)
/* Transfer received packets to netif */
for (i = 0; i < num_rx; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
len = kva->pkt_len;
- data_kva = kva2data_kva(kva);
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
skb = netdev_alloc_skb(dev, len);
@@ -437,9 +473,9 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)
num = ret;
/* Copy mbufs */
for (i = 0; i < num; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
len = kva->data_len;
- data_kva = kva2data_kva(kva);
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
while (kva->next) {
@@ -449,8 +485,8 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)
kva = next_kva;
}
- alloc_kva = pa2kva(kni->alloc_pa[i]);
- alloc_data_kva = kva2data_kva(alloc_kva);
+ alloc_kva = get_kva(kni, kni->alloc_pa[i]);
+ alloc_data_kva = get_data_kva(kni, alloc_kva);
kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva);
memcpy(alloc_data_kva, data_kva, len);
@@ -517,9 +553,9 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
/* Copy mbufs to sk buffer and then call tx interface */
for (i = 0; i < num; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
len = kva->pkt_len;
- data_kva = kva2data_kva(kva);
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
skb = netdev_alloc_skb(dev, len);
@@ -550,8 +586,8 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
break;
prev_kva = kva;
- kva = pa2kva(kva->next);
- data_kva = kva2data_kva(kva);
+ kva = get_kva(kni, kva->next);
+ data_kva = get_data_kva(kni, kva);
/* Convert physical address to virtual address */
prev_kva->next = pa2va(prev_kva->next, kva);
}