summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/ethernet/sfc/tx.c
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/net/ethernet/sfc/tx.c
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/net/ethernet/sfc/tx.c')
-rw-r--r--kernel/drivers/net/ethernet/sfc/tx.c33
1 files changed, 30 insertions, 3 deletions
diff --git a/kernel/drivers/net/ethernet/sfc/tx.c b/kernel/drivers/net/ethernet/sfc/tx.c
index aaf298751..67f6afaa0 100644
--- a/kernel/drivers/net/ethernet/sfc/tx.c
+++ b/kernel/drivers/net/ethernet/sfc/tx.c
@@ -431,8 +431,20 @@ finish_packet:
efx_tx_maybe_stop_queue(tx_queue);
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+ /* There could be packets left on the partner queue if those
+ * SKBs had skb->xmit_more set. If we do not push those they
+ * could be left for a long time and cause a netdev watchdog.
+ */
+ if (txq2->xmit_more_available)
+ efx_nic_push_buffers(txq2);
+
efx_nic_push_buffers(tx_queue);
+ } else {
+ tx_queue->xmit_more_available = skb->xmit_more;
+ }
tx_queue->tx_packets++;
@@ -617,7 +629,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
- netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
++tx_queue->merge_events;
@@ -721,6 +734,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+ tx_queue->xmit_more_available = false;
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
@@ -746,6 +760,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
++tx_queue->read_count;
}
+ tx_queue->xmit_more_available = false;
netdev_tx_reset_queue(tx_queue->core_txq);
}
@@ -1301,8 +1316,20 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
efx_tx_maybe_stop_queue(tx_queue);
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+ /* There could be packets left on the partner queue if those
+ * SKBs had skb->xmit_more set. If we do not push those they
+ * could be left for a long time and cause a netdev watchdog.
+ */
+ if (txq2->xmit_more_available)
+ efx_nic_push_buffers(txq2);
+
efx_nic_push_buffers(tx_queue);
+ } else {
+ tx_queue->xmit_more_available = skb->xmit_more;
+ }
tx_queue->tso_bursts++;
return NETDEV_TX_OK;