summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/net/xen-netback/netback.c')
-rw-r--r--kernel/drivers/net/xen-netback/netback.c413
1 files changed, 281 insertions, 132 deletions
diff --git a/kernel/drivers/net/xen-netback/netback.c b/kernel/drivers/net/xen-netback/netback.c
index 0866c5dfd..1049c34e7 100644
--- a/kernel/drivers/net/xen-netback/netback.c
+++ b/kernel/drivers/net/xen-netback/netback.c
@@ -44,15 +44,15 @@
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/memory.h>
+#include <xen/page.h>
#include <asm/xen/hypercall.h>
-#include <asm/xen/page.h>
/* Provide an option to disable split event channels at load time as
* event channels are limited resource. Split event channels are
* enabled by default.
*/
-bool separate_tx_rx_irq = 1;
+bool separate_tx_rx_irq = true;
module_param(separate_tx_rx_irq, bool, 0644);
/* The time that packets can stay on the guest Rx internal queue
@@ -149,9 +149,20 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
+static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
+{
+ if (vif->gso_mask)
+ return DIV_ROUND_UP(vif->dev->gso_max_size, XEN_PAGE_SIZE) + 1;
+ else
+ return DIV_ROUND_UP(vif->dev->mtu, XEN_PAGE_SIZE);
+}
+
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
+ int needed;
+
+ needed = xenvif_rx_ring_slots_needed(queue->vif);
do {
prod = queue->rx.sring->req_prod;
@@ -247,22 +258,96 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
struct netrx_pending_operations *npo)
{
struct xenvif_rx_meta *meta;
- struct xen_netif_rx_request *req;
+ struct xen_netif_rx_request req;
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
meta->size = 0;
- meta->id = req->id;
+ meta->id = req.id;
npo->copy_off = 0;
- npo->copy_gref = req->gref;
+ npo->copy_gref = req.gref;
return meta;
}
+struct gop_frag_copy {
+ struct xenvif_queue *queue;
+ struct netrx_pending_operations *npo;
+ struct xenvif_rx_meta *meta;
+ int head;
+ int gso_type;
+
+ struct page *page;
+};
+
+static void xenvif_setup_copy_gop(unsigned long gfn,
+ unsigned int offset,
+ unsigned int *len,
+ struct gop_frag_copy *info)
+{
+ struct gnttab_copy *copy_gop;
+ struct xen_page_foreign *foreign;
+ /* Convenient aliases */
+ struct xenvif_queue *queue = info->queue;
+ struct netrx_pending_operations *npo = info->npo;
+ struct page *page = info->page;
+
+ BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
+
+ if (npo->copy_off == MAX_BUFFER_OFFSET)
+ info->meta = get_next_rx_buffer(queue, npo);
+
+ if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
+ *len = MAX_BUFFER_OFFSET - npo->copy_off;
+
+ copy_gop = npo->copy + npo->copy_prod++;
+ copy_gop->flags = GNTCOPY_dest_gref;
+ copy_gop->len = *len;
+
+ foreign = xen_page_foreign(page);
+ if (foreign) {
+ copy_gop->source.domid = foreign->domid;
+ copy_gop->source.u.ref = foreign->gref;
+ copy_gop->flags |= GNTCOPY_source_gref;
+ } else {
+ copy_gop->source.domid = DOMID_SELF;
+ copy_gop->source.u.gmfn = gfn;
+ }
+ copy_gop->source.offset = offset;
+
+ copy_gop->dest.domid = queue->vif->domid;
+ copy_gop->dest.offset = npo->copy_off;
+ copy_gop->dest.u.ref = npo->copy_gref;
+
+ npo->copy_off += *len;
+ info->meta->size += *len;
+
+ /* Leave a gap for the GSO descriptor. */
+ if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
+ queue->rx.req_cons++;
+
+ info->head = 0; /* There must be something in this buffer now */
+}
+
+static void xenvif_gop_frag_copy_grant(unsigned long gfn,
+ unsigned offset,
+ unsigned int len,
+ void *data)
+{
+ unsigned int bytes;
+
+ while (len) {
+ bytes = len;
+ xenvif_setup_copy_gop(gfn, offset, &bytes, data);
+ offset += bytes;
+ len -= bytes;
+ }
+}
+
/*
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
@@ -272,83 +357,52 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
struct page *page, unsigned long size,
unsigned long offset, int *head)
{
- struct gnttab_copy *copy_gop;
- struct xenvif_rx_meta *meta;
+ struct gop_frag_copy info = {
+ .queue = queue,
+ .npo = npo,
+ .head = *head,
+ .gso_type = XEN_NETIF_GSO_TYPE_NONE,
+ };
unsigned long bytes;
- int gso_type = XEN_NETIF_GSO_TYPE_NONE;
+
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ }
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
- meta = npo->meta + npo->meta_prod - 1;
+ info.meta = npo->meta + npo->meta_prod - 1;
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
while (size > 0) {
- struct xen_page_foreign *foreign;
-
BUG_ON(offset >= PAGE_SIZE);
- BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
-
- if (npo->copy_off == MAX_BUFFER_OFFSET)
- meta = get_next_rx_buffer(queue, npo);
bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
- if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
- bytes = MAX_BUFFER_OFFSET - npo->copy_off;
-
- copy_gop = npo->copy + npo->copy_prod++;
- copy_gop->flags = GNTCOPY_dest_gref;
- copy_gop->len = bytes;
-
- foreign = xen_page_foreign(page);
- if (foreign) {
- copy_gop->source.domid = foreign->domid;
- copy_gop->source.u.ref = foreign->gref;
- copy_gop->flags |= GNTCOPY_source_gref;
- } else {
- copy_gop->source.domid = DOMID_SELF;
- copy_gop->source.u.gmfn =
- virt_to_mfn(page_address(page));
- }
- copy_gop->source.offset = offset;
-
- copy_gop->dest.domid = queue->vif->domid;
- copy_gop->dest.offset = npo->copy_off;
- copy_gop->dest.u.ref = npo->copy_gref;
-
- npo->copy_off += bytes;
- meta->size += bytes;
-
- offset += bytes;
+ info.page = page;
+ gnttab_foreach_grant_in_range(page, offset, bytes,
+ xenvif_gop_frag_copy_grant,
+ &info);
size -= bytes;
+ offset = 0;
- /* Next frame */
- if (offset == PAGE_SIZE && size) {
+ /* Next page */
+ if (size) {
BUG_ON(!PageCompound(page));
page++;
- offset = 0;
- }
-
- /* Leave a gap for the GSO descriptor. */
- if (skb_is_gso(skb)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
-
- if (*head && ((1 << gso_type) & queue->vif->gso_mask))
- queue->rx.req_cons++;
-
- *head = 0; /* There must be something in this buffer now. */
-
}
+
+ *head = info.head;
}
/*
@@ -370,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
- struct xen_netif_rx_request *req;
+ struct xen_netif_rx_request req;
struct xenvif_rx_meta *meta;
unsigned char *data;
int head = 1;
@@ -389,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
/* Set up a GSO prefix descriptor, if necessary */
if ((1 << gso_type) & vif->gso_prefix_mask) {
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
- meta->id = req->id;
+ meta->id = req.id;
}
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
if ((1 << gso_type) & vif->gso_mask) {
@@ -409,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
}
meta->size = 0;
- meta->id = req->id;
+ meta->id = req.id;
npo->copy_off = 0;
- npo->copy_gref = req->gref;
+ npo->copy_gref = req.gref;
data = skb->data;
while (data < skb_tail_pointer(skb)) {
@@ -513,16 +567,11 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
skb_queue_head_init(&rxq);
- while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
+ while (xenvif_rx_ring_slots_available(queue)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
- RING_IDX old_req_cons;
- RING_IDX ring_slots_used;
-
queue->last_rx_time = jiffies;
- old_req_cons = queue->rx.req_cons;
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
- ring_slots_used = queue->rx.req_cons - old_req_cons;
__skb_queue_tail(&rxq, skb);
}
@@ -630,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
* Otherwise the interface can seize up due to insufficient credit.
*/
- max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
- max_burst = min(max_burst, 131072UL);
- max_burst = max(max_burst, queue->credit_bytes);
+ max_burst = max(131072UL, queue->credit_bytes);
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
max_credit = queue->remaining_credit + queue->credit_bytes;
@@ -662,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
- txp = RING_GET_REQUEST(&queue->tx, cons++);
+ RING_COPY_REQUEST(&queue->tx, cons++, txp);
} while (1);
queue->tx.req_cons = cons;
}
@@ -729,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
if (drop_err)
txp = &dropped_tx;
- memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
- sizeof(*txp));
+ RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
/* If the guest submitted a frame >= 64 KiB then
* first->size overflowed and following slots will
@@ -752,8 +798,8 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
first->size -= txp->size;
slots++;
- if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
- netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
+ if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
+ netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
txp->offset, txp->size);
xenvif_fatal_tx_err(queue->vif);
return -EINVAL;
@@ -815,23 +861,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
- struct gnttab_map_grant_ref *gop)
+ struct gnttab_map_grant_ref *gop,
+ unsigned int frag_overflow,
+ struct sk_buff *nskb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start;
pending_ring_idx_t index;
- unsigned int nr_slots, frag_overflow = 0;
+ unsigned int nr_slots;
- /* At this point shinfo->nr_frags is in fact the number of
- * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
- */
- if (shinfo->nr_frags > MAX_SKB_FRAGS) {
- frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
- BUG_ON(frag_overflow > MAX_SKB_FRAGS);
- shinfo->nr_frags = MAX_SKB_FRAGS;
- }
nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
@@ -846,13 +886,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
}
if (frag_overflow) {
- struct sk_buff *nskb = xenvif_alloc_skb(0);
- if (unlikely(nskb == NULL)) {
- if (net_ratelimit())
- netdev_err(queue->vif->dev,
- "Can't allocate the frag_list skb.\n");
- return NULL;
- }
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
@@ -879,7 +912,7 @@ static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
if (unlikely(queue->grant_tx_handle[pending_idx] !=
NETBACK_INVALID_HANDLE)) {
netdev_err(queue->vif->dev,
- "Trying to overwrite active handle! pending_idx: %x\n",
+ "Trying to overwrite active handle! pending_idx: 0x%x\n",
pending_idx);
BUG();
}
@@ -892,7 +925,7 @@ static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
if (unlikely(queue->grant_tx_handle[pending_idx] ==
NETBACK_INVALID_HANDLE)) {
netdev_err(queue->vif->dev,
- "Trying to unmap invalid handle! pending_idx: %x\n",
+ "Trying to unmap invalid handle! pending_idx: 0x%x\n",
pending_idx);
BUG();
}
@@ -1076,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
return -EBADR;
}
- memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
- sizeof(extra));
+ RING_COPY_REQUEST(&queue->tx, cons, &extra);
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
queue->tx.req_cons = ++cons;
@@ -1175,14 +1207,89 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
return false;
}
+/* No locking is required in xenvif_mcast_add/del() as they are
+ * only ever invoked from NAPI poll. An RCU list is used because
+ * xenvif_mcast_match() is called asynchronously, during start_xmit.
+ */
+
+static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
+{
+ struct xenvif_mcast_addr *mcast;
+
+ if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Too many multicast addresses\n");
+ return -ENOSPC;
+ }
+
+ mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
+ if (!mcast)
+ return -ENOMEM;
+
+ ether_addr_copy(mcast->addr, addr);
+ list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
+ vif->fe_mcast_count++;
+
+ return 0;
+}
+
+static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
+{
+ struct xenvif_mcast_addr *mcast;
+
+ list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+ if (ether_addr_equal(addr, mcast->addr)) {
+ --vif->fe_mcast_count;
+ list_del_rcu(&mcast->entry);
+ kfree_rcu(mcast, rcu);
+ break;
+ }
+ }
+}
+
+bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
+{
+ struct xenvif_mcast_addr *mcast;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+ if (ether_addr_equal(addr, mcast->addr)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
+void xenvif_mcast_addr_list_free(struct xenvif *vif)
+{
+ /* No need for locking or RCU here. NAPI poll and TX queue
+ * are stopped.
+ */
+ while (!list_empty(&vif->fe_mcast_addr)) {
+ struct xenvif_mcast_addr *mcast;
+
+ mcast = list_first_entry(&vif->fe_mcast_addr,
+ struct xenvif_mcast_addr,
+ entry);
+ --vif->fe_mcast_count;
+ list_del(&mcast->entry);
+ kfree(mcast);
+ }
+}
+
static void xenvif_tx_build_gops(struct xenvif_queue *queue,
int budget,
unsigned *copy_ops,
unsigned *map_ops)
{
- struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
- struct sk_buff *skb;
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+ struct sk_buff *skb, *nskb;
int ret;
+ unsigned int frag_overflow;
while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
@@ -1211,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
idx = queue->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
- memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
+ RING_COPY_REQUEST(&queue->tx, idx, &txreq);
/* Credit-based scheduling. */
if (txreq.size > queue->remaining_credit &&
@@ -1232,6 +1339,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
+ if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
+ struct xen_netif_extra_info *extra;
+
+ extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
+ ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
+
+ make_tx_response(queue, &txreq,
+ (ret == 0) ?
+ XEN_NETIF_RSP_OKAY :
+ XEN_NETIF_RSP_ERROR);
+ push_tx_responses(queue);
+ continue;
+ }
+
+ if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
+ struct xen_netif_extra_info *extra;
+
+ extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
+ xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
+
+ make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
+ push_tx_responses(queue);
+ continue;
+ }
+
ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
break;
@@ -1246,11 +1378,11 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
}
/* No crossing a page as the payload mustn't fragment. */
- if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+ if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
netdev_err(queue->vif->dev,
- "txreq.offset: %x, size: %u, end: %lu\n",
+ "txreq.offset: %u, size: %u, end: %lu\n",
txreq.offset, txreq.size,
- (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
+ (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
xenvif_fatal_tx_err(queue->vif);
break;
}
@@ -1270,6 +1402,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
+ skb_shinfo(skb)->nr_frags = ret;
+ if (data_len < txreq.size)
+ skb_shinfo(skb)->nr_frags++;
+ /* At this point shinfo->nr_frags is in fact the number of
+ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+ */
+ frag_overflow = 0;
+ nskb = NULL;
+ if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+ frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+ BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+ skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+ nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
+ kfree_skb(skb);
+ xenvif_tx_err(queue, &txreq, idx);
+ if (net_ratelimit())
+ netdev_err(queue->vif->dev,
+ "Can't allocate the frag_list skb.\n");
+ break;
+ }
+ }
+
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1277,6 +1432,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
+ kfree_skb(nskb);
break;
}
}
@@ -1289,19 +1445,17 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
- virt_to_mfn(skb->data);
+ virt_to_gfn(skb->data);
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
queue->tx_copy_ops[*copy_ops].dest.offset =
- offset_in_page(skb->data);
+ offset_in_page(skb->data) & ~XEN_PAGE_MASK;
queue->tx_copy_ops[*copy_ops].len = data_len;
queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
(*copy_ops)++;
- skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
- skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1315,13 +1469,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->pending_cons++;
- request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
- if (request_gop == NULL) {
- kfree_skb(skb);
- xenvif_tx_err(queue, &txreq, idx);
- break;
- }
- gop = request_gop;
+ gop = xenvif_get_requests(queue, skb, txfrags, gop,
+ frag_overflow, nskb);
__skb_queue_tail(&queue->tx_queue, skb);
@@ -1541,7 +1690,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
smp_wmb();
queue->dealloc_prod++;
} while (ubuf);
- wake_up(&queue->dealloc_wq);
spin_unlock_irqrestore(&queue->callback_lock, flags);
if (likely(zerocopy_success))
@@ -1598,12 +1746,12 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
queue->pages_to_unmap,
gop - queue->tx_unmap_ops);
if (ret) {
- netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+ netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
gop - queue->tx_unmap_ops, ret);
for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
if (gop[i].status != GNTST_okay)
netdev_err(queue->vif->dev,
- " host_addr: %llx handle: %x status: %d\n",
+ " host_addr: 0x%llx handle: 0x%x status: %d\n",
gop[i].host_addr,
gop[i].handle,
gop[i].status);
@@ -1736,7 +1884,7 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
&queue->mmap_pages[pending_idx], 1);
if (ret) {
netdev_err(queue->vif->dev,
- "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
+ "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
ret,
pending_idx,
tx_unmap_op.host_addr,
@@ -1785,7 +1933,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
goto err;
txs = (struct xen_netif_tx_sring *)addr;
- BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
+ BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
&rx_ring_ref, 1, &addr);
@@ -1793,7 +1941,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
goto err;
rxs = (struct xen_netif_rx_sring *)addr;
- BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
+ BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
return 0;
@@ -1840,8 +1988,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
- return !queue->stalled
- && prod - cons < XEN_NETBK_RX_SLOTS_MAX
+ return !queue->stalled && prod - cons < 1
&& time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
@@ -1853,14 +2000,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
- return queue->stalled
- && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
+ return queue->stalled && prod - cons >= 1;
}
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
{
return (!skb_queue_empty(&queue->rx_queue)
- && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
+ && xenvif_rx_ring_slots_available(queue))
|| (queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue)
|| xenvif_rx_queue_ready(queue)))
@@ -2007,8 +2153,11 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
- /* Allow as many queues as there are CPUs, by default */
- xenvif_max_queues = num_online_cpus();
+ /* Allow as many queues as there are CPUs if user has not
+ * specified a value.
+ */
+ if (xenvif_max_queues == 0)
+ xenvif_max_queues = num_online_cpus();
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",