diff options
Diffstat (limited to 'kernel/drivers/net/ethernet/cisco/enic')
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/enic.h | 49 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/enic_clsf.c | 31 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/enic_ethtool.c | 123 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/enic_main.c | 270 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/vnic_cq.c | 3 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/vnic_dev.c | 286 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/vnic_dev.h | 46 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/vnic_devcmd.h | 28 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/vnic_intr.c | 3 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/vnic_resource.h | 7 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/vnic_rq.c | 6 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/vnic_rq.h | 91 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/vnic_wq.c | 33 | ||||
-rw-r--r-- | kernel/drivers/net/ethernet/cisco/enic/vnic_wq.h | 18 |
14 files changed, 698 insertions, 296 deletions
diff --git a/kernel/drivers/net/ethernet/cisco/enic/enic.h b/kernel/drivers/net/ethernet/cisco/enic/enic.h index 84b6a2b46..7ba6d530b 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/enic.h +++ b/kernel/drivers/net/ethernet/cisco/enic/enic.h @@ -33,7 +33,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.1.1.83" +#define DRV_VERSION "2.3.0.20" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 @@ -50,6 +50,7 @@ struct enic_msix_entry { char devname[IFNAMSIZ]; irqreturn_t (*isr)(int, void *); void *devid; + cpumask_var_t affinity_mask; }; /* Store only the lower range. Higher range is given by fw. */ @@ -143,6 +144,7 @@ struct enic { struct vnic_dev *vdev; struct timer_list notify_timer; struct work_struct reset; + struct work_struct tx_hang_reset; struct work_struct change_mtu_work; struct msix_entry msix_entry[ENIC_INTR_MAX]; struct enic_msix_entry msix[ENIC_INTR_MAX]; @@ -191,6 +193,25 @@ struct enic { struct vnic_gen_stats gen_stats; }; +static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev) +{ + struct enic *enic = vdev->priv; + + return enic->netdev; +} + +/* wrappers function for kernel log + * Make sure variable vdev of struct vnic_dev is available in the block where + * these macros are used + */ +#define vdev_info(args...) dev_info(&vdev->pdev->dev, args) +#define vdev_warn(args...) dev_warn(&vdev->pdev->dev, args) +#define vdev_err(args...) dev_err(&vdev->pdev->dev, args) + +#define vdev_netinfo(args...) netdev_info(vnic_get_netdev(vdev), args) +#define vdev_netwarn(args...) netdev_warn(vnic_get_netdev(vdev), args) +#define vdev_neterr(args...) netdev_err(vnic_get_netdev(vdev), args) + static inline struct device *enic_get_dev(struct enic *enic) { return &(enic->pdev->dev); @@ -243,6 +264,32 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic) return enic->rq_count + enic->wq_count + 1; } +static inline bool enic_is_err_intr(struct enic *enic, int intr) +{ + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + return intr == enic_legacy_err_intr(); + case VNIC_DEV_INTR_MODE_MSIX: + return intr == enic_msix_err_intr(enic); + case VNIC_DEV_INTR_MODE_MSI: + default: + return false; + } +} + +static inline bool enic_is_notify_intr(struct enic *enic, int intr) +{ + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + return intr == enic_legacy_notify_intr(); + case VNIC_DEV_INTR_MODE_MSIX: + return intr == enic_msix_notify_intr(enic); + case VNIC_DEV_INTR_MODE_MSI: + default: + return false; + } +} + static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr) { if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) { diff --git a/kernel/drivers/net/ethernet/cisco/enic/enic_clsf.c b/kernel/drivers/net/ethernet/cisco/enic/enic_clsf.c index 0be6850be..3c677ed3c 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/kernel/drivers/net/ethernet/cisco/enic/enic_clsf.c @@ -5,7 +5,7 @@ #include <linux/in.h> #include <linux/types.h> #include <linux/skbuff.h> -#include <net/flow_keys.h> +#include <net/flow_dissector.h> #include "enic_res.h" #include "enic_clsf.h" @@ -15,14 +15,14 @@ * @rq: rq number to steer to * * This function returns filter_id(hardware_id) of the filter - * added. In case of error it returns an negative number. + * added. In case of error it returns a negative number. */ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) { int res; struct filter data; - switch (keys->ip_proto) { + switch (keys->basic.ip_proto) { case IPPROTO_TCP: data.u.ipv4.protocol = PROTO_TCP; break; @@ -33,10 +33,10 @@ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) return -EPROTONOSUPPORT; }; data.type = FILTER_IPV4_5TUPLE; - data.u.ipv4.src_addr = ntohl(keys->src); - data.u.ipv4.dst_addr = ntohl(keys->dst); - data.u.ipv4.src_port = ntohs(keys->port16[0]); - data.u.ipv4.dst_port = ntohs(keys->port16[1]); + data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src); + data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst); + data.u.ipv4.src_port = ntohs(keys->ports.src); + data.u.ipv4.dst_port = ntohs(keys->ports.dst); data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; spin_lock_bh(&enic->devcmd_lock); @@ -158,11 +158,11 @@ static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h, struct enic_rfs_fltr_node *tpos; hlist_for_each_entry(tpos, h, node) - if (tpos->keys.src == k->src && - tpos->keys.dst == k->dst && - tpos->keys.ports == k->ports && - tpos->keys.ip_proto == k->ip_proto && - tpos->keys.n_proto == k->n_proto) + if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src && + tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst && + tpos->keys.ports.ports == k->ports.ports && + tpos->keys.basic.ip_proto == k->basic.ip_proto && + tpos->keys.basic.n_proto == k->basic.n_proto) return tpos; return NULL; } @@ -177,9 +177,10 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, int res, i; enic = netdev_priv(dev); - res = skb_flow_dissect(skb, &keys); - if (!res || keys.n_proto != htons(ETH_P_IP) || - (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP)) + res = skb_flow_dissect_flow_keys(skb, &keys, 0); + if (!res || keys.basic.n_proto != htons(ETH_P_IP) || + (keys.basic.ip_proto != IPPROTO_TCP && + keys.basic.ip_proto != IPPROTO_UDP)) return -EPROTONOSUPPORT; tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; diff --git a/kernel/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/kernel/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 68d47b196..f44a39c40 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/kernel/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -224,7 +224,8 @@ static int enic_get_coalesce(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; - ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) + ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; if (rxcoal->use_adaptive_rx_coalesce) ecmd->use_adaptive_rx_coalesce = 1; @@ -234,6 +235,53 @@ static int enic_get_coalesce(struct net_device *netdev, return 0; } +static int enic_coalesce_valid(struct enic *enic, + struct ethtool_coalesce *ec) +{ + u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); + u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max, + ec->rx_coalesce_usecs_high); + u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max, + ec->rx_coalesce_usecs_low); + + if (ec->rx_max_coalesced_frames || + ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_max_coalesced_frames || + ec->tx_coalesce_usecs_irq || + ec->tx_max_coalesced_frames_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -EINVAL; + + if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) && + ec->tx_coalesce_usecs) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > coalesce_usecs_max) || + (ec->rx_coalesce_usecs > coalesce_usecs_max) || + (ec->rx_coalesce_usecs_low > coalesce_usecs_max) || + (ec->rx_coalesce_usecs_high > coalesce_usecs_max)) + netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n", + coalesce_usecs_max); + + if (ec->rx_coalesce_usecs_high && + (rx_coalesce_usecs_high < + rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) + return -EINVAL; + + return 0; +} + static int enic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) { @@ -244,8 +292,12 @@ static int enic_set_coalesce(struct net_device *netdev, u32 rx_coalesce_usecs_high; u32 coalesce_usecs_max; unsigned int i, intr; + int ret; struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; + ret = enic_coalesce_valid(enic, ecmd); + if (ret) + return ret; coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, coalesce_usecs_max); @@ -257,59 +309,24 @@ static int enic_set_coalesce(struct net_device *netdev, rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high, coalesce_usecs_max); - switch (vnic_dev_get_intr_mode(enic->vdev)) { - case VNIC_DEV_INTR_MODE_INTX: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - if (ecmd->use_adaptive_rx_coalesce || - ecmd->rx_coalesce_usecs_low || - ecmd->rx_coalesce_usecs_high) - return -EINVAL; - - intr = enic_legacy_io_intr(); - vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSI: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - if (ecmd->use_adaptive_rx_coalesce || - ecmd->rx_coalesce_usecs_low || - ecmd->rx_coalesce_usecs_high) - return -EINVAL; - - vnic_intr_coalescing_timer_set(&enic->intr[0], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSIX: - if (ecmd->rx_coalesce_usecs_high && - (rx_coalesce_usecs_high < - rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) - return -EINVAL; - + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - } - - rxcoal->use_adaptive_rx_coalesce = - !!ecmd->use_adaptive_rx_coalesce; - if (!rxcoal->use_adaptive_rx_coalesce) - enic_intr_coal_set_rx(enic, rx_coalesce_usecs); - - if (ecmd->rx_coalesce_usecs_high) { - rxcoal->range_end = rx_coalesce_usecs_high; - rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; - rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + - ENIC_AIC_LARGE_PKT_DIFF; + tx_coalesce_usecs); } - break; - default: - break; + enic->tx_coalesce_usecs = tx_coalesce_usecs; + } + rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce; + if (!rxcoal->use_adaptive_rx_coalesce) + enic_intr_coal_set_rx(enic, rx_coalesce_usecs); + if (ecmd->rx_coalesce_usecs_high) { + rxcoal->range_end = rx_coalesce_usecs_high; + rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; + rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + + ENIC_AIC_LARGE_PKT_DIFF; } - enic->tx_coalesce_usecs = tx_coalesce_usecs; enic->rx_coalesce_usecs = rx_coalesce_usecs; return 0; @@ -348,7 +365,7 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd) n = htbl_fltr_search(enic, (u16)fsp->location); if (!n) return -EINVAL; - switch (n->keys.ip_proto) { + switch (n->keys.basic.ip_proto) { case IPPROTO_TCP: fsp->flow_type = TCP_V4_FLOW; break; @@ -360,16 +377,16 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd) break; } - fsp->h_u.tcp_ip4_spec.ip4src = n->keys.src; + fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys); fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0; - fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.dst; + fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys); fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0; - fsp->h_u.tcp_ip4_spec.psrc = n->keys.port16[0]; + fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src; fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0; - fsp->h_u.tcp_ip4_spec.pdst = n->keys.port16[1]; + fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst; fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0; fsp->ring_cookie = n->rq_id; diff --git a/kernel/drivers/net/ethernet/cisco/enic/enic_main.c b/kernel/drivers/net/ethernet/cisco/enic/enic_main.c index eadae1b41..b36643ef0 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/kernel/drivers/net/ethernet/cisco/enic/enic_main.c @@ -39,6 +39,7 @@ #include <linux/prefetch.h> #include <net/ip6_checksum.h> #include <linux/ktime.h> +#include <linux/numa.h> #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> #endif @@ -112,6 +113,71 @@ static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { {3, 6}, /* 10 - 40 Gbps */ }; +static void enic_init_affinity_hint(struct enic *enic) +{ + int numa_node = dev_to_node(&enic->pdev->dev); + int i; + + for (i = 0; i < enic->intr_count; i++) { + if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || + (enic->msix[i].affinity_mask && + !cpumask_empty(enic->msix[i].affinity_mask))) + continue; + if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, + GFP_KERNEL)) + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + enic->msix[i].affinity_mask); + } +} + +static void enic_free_affinity_hint(struct enic *enic) +{ + int i; + + for (i = 0; i < enic->intr_count; i++) { + if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i)) + continue; + free_cpumask_var(enic->msix[i].affinity_mask); + } +} + +static void enic_set_affinity_hint(struct enic *enic) +{ + int i; + int err; + + for (i = 0; i < enic->intr_count; i++) { + if (enic_is_err_intr(enic, i) || + enic_is_notify_intr(enic, i) || + !enic->msix[i].affinity_mask || + cpumask_empty(enic->msix[i].affinity_mask)) + continue; + err = irq_set_affinity_hint(enic->msix_entry[i].vector, + enic->msix[i].affinity_mask); + if (err) + netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n", + err); + } + + for (i = 0; i < enic->wq_count; i++) { + int wq_intr = enic_msix_wq_intr(enic, i); + + if (enic->msix[wq_intr].affinity_mask && + !cpumask_empty(enic->msix[wq_intr].affinity_mask)) + netif_set_xps_queue(enic->netdev, + enic->msix[wq_intr].affinity_mask, + i); + } +} + +static void enic_unset_affinity_hint(struct enic *enic) +{ + int i; + + for (i = 0; i < enic->intr_count; i++) + irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); +} + int enic_is_dynamic(struct enic *enic) { return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; @@ -178,13 +244,15 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, return 0; } -static void enic_log_q_error(struct enic *enic) +static bool enic_log_q_error(struct enic *enic) { unsigned int i; u32 error_status; + bool err = false; for (i = 0; i < enic->wq_count; i++) { error_status = vnic_wq_error_status(&enic->wq[i]); + err |= error_status; if (error_status) netdev_err(enic->netdev, "WQ[%d] error_status %d\n", i, error_status); @@ -192,10 +260,13 @@ static void enic_log_q_error(struct enic *enic) for (i = 0; i < enic->rq_count; i++) { error_status = vnic_rq_error_status(&enic->rq[i]); + err |= error_status; if (error_status) netdev_err(enic->netdev, "RQ[%d] error_status %d\n", i, error_status); } + + return err; } static void enic_msglvl_check(struct enic *enic) @@ -333,10 +404,9 @@ static irqreturn_t enic_isr_msix_err(int irq, void *data) vnic_intr_return_all_credits(&enic->intr[intr]); - enic_log_q_error(enic); - - /* schedule recovery from WQ/RQ error */ - schedule_work(&enic->reset); + if (enic_log_q_error(enic)) + /* schedule recovery from WQ/RQ error */ + schedule_work(&enic->reset); return IRQ_HANDLED; } @@ -804,7 +874,7 @@ static void enic_set_rx_mode(struct net_device *netdev) static void enic_tx_timeout(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); - schedule_work(&enic->reset); + schedule_work(&enic->tx_hang_reset); } static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) @@ -1149,6 +1219,64 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, return 0; } +static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) +{ + unsigned int intr = enic_msix_rq_intr(enic, rq->index); + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + u32 timer = cq->tobe_rx_coal_timeval; + + if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { + vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); + cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; + } +} + +static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) +{ + struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; + int index; + u32 timer; + u32 range_start; + u32 traffic; + u64 delta; + ktime_t now = ktime_get(); + + delta = ktime_us_delta(now, cq->prev_ts); + if (delta < ENIC_AIC_TS_BREAK) + return; + cq->prev_ts = now; + + traffic = pkt_size_counter->large_pkt_bytes_cnt + + pkt_size_counter->small_pkt_bytes_cnt; + /* The table takes Mbps + * traffic *= 8 => bits + * traffic *= (10^6 / delta) => bps + * traffic /= 10^6 => Mbps + * + * Combining, traffic *= (8 / delta) + */ + + traffic <<= 3; + traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; + + for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) + if (traffic < mod_table[index].rx_rate) + break; + range_start = (pkt_size_counter->small_pkt_bytes_cnt > + pkt_size_counter->large_pkt_bytes_cnt << 1) ? + rx_coal->small_pkt_range_start : + rx_coal->large_pkt_range_start; + timer = range_start + ((rx_coal->range_end - range_start) * + mod_table[index].range_percent / 100); + /* Damping */ + cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; + + pkt_size_counter->large_pkt_bytes_cnt = 0; + pkt_size_counter->small_pkt_bytes_cnt = 0; +} + static int enic_poll(struct napi_struct *napi, int budget) { struct net_device *netdev = napi->dev; @@ -1170,7 +1298,7 @@ static int enic_poll(struct napi_struct *napi, int budget) wq_work_done, 0 /* dont unmask intr */, 0 /* dont reset intr timer */); - return rq_work_done; + return budget; } if (budget > 0) @@ -1191,6 +1319,7 @@ static int enic_poll(struct napi_struct *napi, int budget) 0 /* don't reset intr timer */); err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); + enic_poll_unlock_napi(&enic->rq[cq_rq], napi); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. @@ -1198,6 +1327,11 @@ static int enic_poll(struct napi_struct *napi, int budget) if (err) rq_work_done = rq_work_to_do; + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + /* Call the function which refreshes the intr coalescing timer + * value based on the traffic. + */ + enic_calc_int_moderation(enic, &enic->rq[0]); if (rq_work_done < rq_work_to_do) { @@ -1206,71 +1340,14 @@ static int enic_poll(struct napi_struct *napi, int budget) */ napi_complete(napi); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_set_int_moderation(enic, &enic->rq[0]); vnic_intr_unmask(&enic->intr[intr]); } - enic_poll_unlock_napi(&enic->rq[cq_rq]); return rq_work_done; } -static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) -{ - unsigned int intr = enic_msix_rq_intr(enic, rq->index); - struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - u32 timer = cq->tobe_rx_coal_timeval; - - if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { - vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); - cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; - } -} - -static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) -{ - struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; - struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; - int index; - u32 timer; - u32 range_start; - u32 traffic; - u64 delta; - ktime_t now = ktime_get(); - - delta = ktime_us_delta(now, cq->prev_ts); - if (delta < ENIC_AIC_TS_BREAK) - return; - cq->prev_ts = now; - - traffic = pkt_size_counter->large_pkt_bytes_cnt + - pkt_size_counter->small_pkt_bytes_cnt; - /* The table takes Mbps - * traffic *= 8 => bits - * traffic *= (10^6 / delta) => bps - * traffic /= 10^6 => Mbps - * - * Combining, traffic *= (8 / delta) - */ - - traffic <<= 3; - traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; - - for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) - if (traffic < mod_table[index].rx_rate) - break; - range_start = (pkt_size_counter->small_pkt_bytes_cnt > - pkt_size_counter->large_pkt_bytes_cnt << 1) ? - rx_coal->small_pkt_range_start : - rx_coal->large_pkt_range_start; - timer = range_start + ((rx_coal->range_end - range_start) * - mod_table[index].range_percent / 100); - /* Damping */ - cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; - - pkt_size_counter->large_pkt_bytes_cnt = 0; - pkt_size_counter->small_pkt_bytes_cnt = 0; -} - #ifdef CONFIG_RFS_ACCEL static void enic_free_rx_cpu_rmap(struct enic *enic) { @@ -1407,14 +1484,12 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) if (err) work_done = work_to_do; if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - /* Call the function which refreshes - * the intr coalescing timer value based on - * the traffic. This is supported only in - * the case of MSI-x mode + /* Call the function which refreshes the intr coalescing timer + * value based on the traffic. */ enic_calc_int_moderation(enic, &enic->rq[rq]); - enic_poll_unlock_napi(&enic->rq[rq]); + enic_poll_unlock_napi(&enic->rq[rq], napi); if (work_done < work_to_do) { /* Some work done, but not enough to stay in polling, @@ -1569,12 +1644,6 @@ static void enic_set_rx_coal_setting(struct enic *enic) int index = -1; struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; - /* If intr mode is not MSIX, do not do adaptive coalescing */ - if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) { - netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing"); - return; - } - /* 1. Read the link speed from fw * 2. Pick the default range for the speed * 3. Update it in enic->rx_coalesce_setting @@ -1646,6 +1715,8 @@ static int enic_open(struct net_device *netdev) netdev_err(netdev, "Unable to request irq.\n"); return err; } + enic_init_affinity_hint(enic); + enic_set_affinity_hint(enic); err = enic_dev_notify_set(enic); if (err) { @@ -1698,6 +1769,7 @@ err_out_free_rq: vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); enic_dev_notify_unset(enic); err_out_free_intr: + enic_unset_affinity_hint(enic); enic_free_intr(enic); return err; @@ -1751,6 +1823,7 @@ static int enic_stop(struct net_device *netdev) } enic_dev_notify_unset(enic); + enic_unset_affinity_hint(enic); enic_free_intr(enic); for (i = 0; i < enic->wq_count; i++) @@ -1925,6 +1998,19 @@ static int enic_dev_open(struct enic *enic) return err; } +static int enic_dev_soft_reset(struct enic *enic) +{ + int err; + + err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, + vnic_dev_soft_reset_done, 0); + if (err) + netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n", + err); + + return err; +} + static int enic_dev_hang_reset(struct enic *enic) { int err; @@ -2061,6 +2147,26 @@ static void enic_reset(struct work_struct *work) rtnl_lock(); spin_lock(&enic->enic_api_lock); + enic_stop(enic->netdev); + enic_dev_soft_reset(enic); + enic_reset_addr_lists(enic); + enic_init_vnic_resources(enic); + enic_set_rss_nic_cfg(enic); + enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_open(enic->netdev); + spin_unlock(&enic->enic_api_lock); + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); + + rtnl_unlock(); +} + +static void enic_tx_hang_reset(struct work_struct *work) +{ + struct enic *enic = container_of(work, struct enic, tx_hang_reset); + + rtnl_lock(); + + spin_lock(&enic->enic_api_lock); enic_dev_hang_notify(enic); enic_stop(enic->netdev); enic_dev_hang_reset(enic); @@ -2273,6 +2379,7 @@ static void enic_dev_deinit(struct enic *enic) enic_free_vnic_resources(enic); enic_clear_intr_mode(enic); + enic_free_affinity_hint(enic); } static void enic_kdump_kernel_config(struct enic *enic) @@ -2368,6 +2475,7 @@ static int enic_dev_init(struct enic *enic) return 0; err_out_free_vnic_resources: + enic_free_affinity_hint(enic); enic_clear_intr_mode(enic); enic_free_vnic_resources(enic); @@ -2485,6 +2593,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_iounmap; } + err = vnic_devcmd_init(enic->vdev); + + if (err) + goto err_out_vnic_unregister; + #ifdef CONFIG_PCI_IOV /* Get number of subvnics */ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); @@ -2579,6 +2692,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) enic_set_rx_coal_setting(enic); INIT_WORK(&enic->reset, enic_reset); + INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); for (i = 0; i < enic->wq_count; i++) @@ -2659,8 +2773,8 @@ err_out_disable_sriov_pp: pci_disable_sriov(pdev); enic->priv_flags &= ~ENIC_SRIOV_ENABLED; } -err_out_vnic_unregister: #endif +err_out_vnic_unregister: vnic_dev_unregister(enic->vdev); err_out_iounmap: enic_iounmap(enic); diff --git a/kernel/drivers/net/ethernet/cisco/enic/vnic_cq.c b/kernel/drivers/net/ethernet/cisco/enic/vnic_cq.c index 0daa1c707..abeda2a9e 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/vnic_cq.c +++ b/kernel/drivers/net/ethernet/cisco/enic/vnic_cq.c @@ -24,6 +24,7 @@ #include "vnic_dev.h" #include "vnic_cq.h" +#include "enic.h" void vnic_cq_free(struct vnic_cq *cq) { @@ -42,7 +43,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); if (!cq->ctrl) { - pr_err("Failed to hook CQ[%d] resource\n", index); + vdev_err("Failed to hook CQ[%d] resource\n", index); return -EINVAL; } diff --git a/kernel/drivers/net/ethernet/cisco/enic/vnic_dev.c b/kernel/drivers/net/ethernet/cisco/enic/vnic_dev.c index 62f7b7baf..1fdf5fe12 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/kernel/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -27,46 +27,9 @@ #include "vnic_resource.h" #include "vnic_devcmd.h" #include "vnic_dev.h" +#include "vnic_wq.h" #include "vnic_stats.h" - -enum vnic_proxy_type { - PROXY_NONE, - PROXY_BY_BDF, - PROXY_BY_INDEX, -}; - -struct vnic_res { - void __iomem *vaddr; - dma_addr_t bus_addr; - unsigned int count; -}; - -struct vnic_intr_coal_timer_info { - u32 mul; - u32 div; - u32 max_usec; -}; - -struct vnic_dev { - void *priv; - struct pci_dev *pdev; - struct vnic_res res[RES_TYPE_MAX]; - enum vnic_dev_intr_mode intr_mode; - struct vnic_devcmd __iomem *devcmd; - struct vnic_devcmd_notify *notify; - struct vnic_devcmd_notify notify_copy; - dma_addr_t notify_pa; - u32 notify_sz; - dma_addr_t linkstatus_pa; - struct vnic_stats *stats; - dma_addr_t stats_pa; - struct vnic_devcmd_fw_info *fw_info; - dma_addr_t fw_info_pa; - enum vnic_proxy_type proxy; - u32 proxy_index; - u64 args[VNIC_DEVCMD_NARGS]; - struct vnic_intr_coal_timer_info intr_coal_timer_info; -}; +#include "enic.h" #define VNIC_MAX_RES_HDR_SIZE \ (sizeof(struct vnic_resource_header) + \ @@ -90,14 +53,14 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, return -EINVAL; if (bar->len < VNIC_MAX_RES_HDR_SIZE) { - pr_err("vNIC BAR0 res hdr length error\n"); + vdev_err("vNIC BAR0 res hdr length error\n"); return -EINVAL; } rh = bar->vaddr; mrh = bar->vaddr; if (!rh) { - pr_err("vNIC BAR0 res hdr not mem-mapped\n"); + vdev_err("vNIC BAR0 res hdr not mem-mapped\n"); return -EINVAL; } @@ -106,11 +69,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, (ioread32(&rh->version) != VNIC_RES_VERSION)) { if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { - pr_err("vNIC BAR0 res magic/version error " - "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", - VNIC_RES_MAGIC, VNIC_RES_VERSION, - MGMTVNIC_MAGIC, MGMTVNIC_VERSION, - ioread32(&rh->magic), ioread32(&rh->version)); + vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", + VNIC_RES_MAGIC, VNIC_RES_VERSION, + MGMTVNIC_MAGIC, MGMTVNIC_VERSION, + ioread32(&rh->magic), ioread32(&rh->version)); return -EINVAL; } } @@ -144,17 +106,15 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, /* each count is stride bytes long */ len = count * VNIC_RES_STRIDE; if (len + bar_offset > bar[bar_num].len) { - pr_err("vNIC BAR0 resource %d " - "out-of-bounds, offset 0x%x + " - "size 0x%x > bar len 0x%lx\n", - type, bar_offset, - len, - bar[bar_num].len); + vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n", + type, bar_offset, len, + bar[bar_num].len); return -EINVAL; } break; case RES_TYPE_INTR_PBA_LEGACY: case RES_TYPE_DEVCMD: + case RES_TYPE_DEVCMD2: len = count; break; default: @@ -238,8 +198,8 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, &ring->base_addr_unaligned); if (!ring->descs_unaligned) { - pr_err("Failed to allocate ring (size=%d), aborting\n", - (int)ring->size); + vdev_err("Failed to allocate ring (size=%d), aborting\n", + (int)ring->size); return -ENOMEM; } @@ -281,7 +241,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -ENODEV; } if (status & STAT_BUSY) { - pr_err("Busy devcmd %d\n", _CMD_N(cmd)); + vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } @@ -315,8 +275,8 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -err; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - pr_err("Error %d devcmd %d\n", - err, _CMD_N(cmd)); + vdev_neterr("Error %d devcmd %d\n", + err, _CMD_N(cmd)); return -err; } @@ -330,10 +290,167 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, } } - pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); + vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd)); + return -ETIMEDOUT; +} + +static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct devcmd2_controller *dc2c = vdev->devcmd2; + struct devcmd2_result *result; + u8 color; + unsigned int i; + int delay, err; + u32 fetch_index, new_posted; + u32 posted = dc2c->posted; + + fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index); + + if (fetch_index == 0xFFFFFFFF) + return -ENODEV; + + new_posted = (posted + 1) % DEVCMD2_RING_SIZE; + + if (new_posted == fetch_index) { + vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", + _CMD_N(cmd), fetch_index, posted); + return -EBUSY; + } + dc2c->cmd_ring[posted].cmd = cmd; + dc2c->cmd_ring[posted].flags = 0; + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + dc2c->cmd_ring[posted].args[i] = vdev->args[i]; + + /* Adding write memory barrier prevents compiler and/or CPU reordering, + * thus avoiding descriptor posting before descriptor is initialized. + * Otherwise, hardware can read stale descriptor fields. + */ + wmb(); + iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); + dc2c->posted = new_posted; + + if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) + return 0; + + result = dc2c->result + dc2c->next_result; + color = dc2c->color; + + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + + for (delay = 0; delay < wait; delay++) { + if (result->color == color) { + if (result->error) { + err = result->error; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + vdev_neterr("Error %d devcmd %d\n", + err, _CMD_N(cmd)); + return -err; + } + if (_CMD_DIR(cmd) & _CMD_DIR_READ) + for (i = 0; i < VNIC_DEVCMD2_NARGS; i++) + vdev->args[i] = result->results[i]; + + return 0; + } + udelay(100); + } + + vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd)); + return -ETIMEDOUT; } +static int vnic_dev_init_devcmd1(struct vnic_dev *vdev) +{ + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); + if (!vdev->devcmd) + return -ENODEV; + vdev->devcmd_rtn = _vnic_dev_cmd; + + return 0; +} + +static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) +{ + int err; + unsigned int fetch_index; + + if (vdev->devcmd2) + return 0; + + vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL); + if (!vdev->devcmd2) + return -ENOMEM; + + vdev->devcmd2->color = 1; + vdev->devcmd2->result_size = DEVCMD2_RING_SIZE; + err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE, + DEVCMD2_DESC_SIZE); + if (err) + goto err_free_devcmd2; + + fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + vdev_err("Fatal error in devcmd2 init - hardware surprise removal"); + + return -ENODEV; + } + + enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, + 0); + vdev->devcmd2->posted = fetch_index; + vnic_wq_enable(&vdev->devcmd2->wq); + + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) + goto err_free_wq; + + vdev->devcmd2->result = vdev->devcmd2->results_ring.descs; + vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; + vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; + vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr | + VNIC_PADDR_TARGET; + vdev->args[1] = DEVCMD2_RING_SIZE; + + err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000); + if (err) + goto err_free_desc_ring; + + vdev->devcmd_rtn = _vnic_dev_cmd2; + + return 0; + +err_free_desc_ring: + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); +err_free_wq: + vnic_wq_disable(&vdev->devcmd2->wq); + vnic_wq_free(&vdev->devcmd2->wq); +err_free_devcmd2: + kfree(vdev->devcmd2); + vdev->devcmd2 = NULL; + + return err; +} + +static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) +{ + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); + vnic_wq_disable(&vdev->devcmd2->wq); + vnic_wq_free(&vdev->devcmd2->wq); + kfree(vdev->devcmd2); +} + static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) @@ -348,7 +465,7 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, vdev->args[2] = *a0; vdev->args[3] = *a1; - err = _vnic_dev_cmd(vdev, proxy_cmd, wait); + err = vdev->devcmd_rtn(vdev, proxy_cmd, wait); if (err) return err; @@ -357,7 +474,8 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, err = (int)vdev->args[1]; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); + vdev_neterr("Error %d proxy devcmd %d\n", err, + _CMD_N(cmd)); return err; } @@ -375,7 +493,7 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, vdev->args[0] = *a0; vdev->args[1] = *a1; - err = _vnic_dev_cmd(vdev, cmd, wait); + err = vdev->devcmd_rtn(vdev, cmd, wait); *a0 = vdev->args[0]; *a1 = vdev->args[1]; @@ -546,14 +664,14 @@ int vnic_dev_open_done(struct vnic_dev *vdev, int *done) return 0; } -static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) +int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) { u64 a0 = (u32)arg, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); } -static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) +int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) { u64 a0 = 0, a1 = 0; int wait = 1000; @@ -650,7 +768,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); if (err) - pr_err("Can't set packet filter\n"); + vdev_neterr("Can't set packet filter\n"); return err; } @@ -667,7 +785,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) - pr_err("Can't add addr [%pM], %d\n", addr, err); + vdev_neterr("Can't add addr [%pM], %d\n", addr, err); return err; } @@ -684,7 +802,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) - pr_err("Can't del addr [%pM], %d\n", addr, err); + vdev_neterr("Can't del addr [%pM], %d\n", addr, err); return err; } @@ -728,7 +846,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) dma_addr_t notify_pa; if (vdev->notify || vdev->notify_pa) { - pr_err("notify block %p still allocated", vdev->notify); + vdev_neterr("notify block %p still allocated", vdev->notify); return -EINVAL; } @@ -838,7 +956,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) memset(vdev->args, 0, sizeof(vdev->args)); if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT)) - err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait); + err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait); else err = ERR_ECMDUNKNOWN; @@ -847,7 +965,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) */ if ((err == ERR_ECMDUNKNOWN) || (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) { - pr_warn("Using default conversion factor for interrupt coalesce timer\n"); + vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n"); vnic_dev_intr_coal_timer_info_default(vdev); return 0; } @@ -938,6 +1056,9 @@ void vnic_dev_unregister(struct vnic_dev *vdev) pci_free_consistent(vdev->pdev, sizeof(struct vnic_devcmd_fw_info), vdev->fw_info, vdev->fw_info_pa); + if (vdev->devcmd2) + vnic_dev_deinit_devcmd2(vdev); + kfree(vdev); } } @@ -959,10 +1080,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, if (vnic_dev_discover_res(vdev, bar, num_bars)) goto err_out; - vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); - if (!vdev->devcmd) - goto err_out; - return vdev; err_out: @@ -977,6 +1094,29 @@ struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev) } EXPORT_SYMBOL(vnic_dev_get_pdev); +int vnic_devcmd_init(struct vnic_dev *vdev) +{ + void __iomem *res; + int err; + + res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (res) { + err = vnic_dev_init_devcmd2(vdev); + if (err) + vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1", + err); + else + return 0; + } else { + vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n"); + } + err = vnic_dev_init_devcmd1(vdev); + if (err) + vdev_err("DEVCMD1 initialization failed: %d", err); + + return err; +} + int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) { u64 a0, a1 = len; diff --git a/kernel/drivers/net/ethernet/cisco/enic/vnic_dev.h b/kernel/drivers/net/ethernet/cisco/enic/vnic_dev.h index 1fb214efc..54156c484 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/kernel/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -70,7 +70,48 @@ struct vnic_dev_ring { unsigned int desc_avail; }; -struct vnic_dev; +enum vnic_proxy_type { + PROXY_NONE, + PROXY_BY_BDF, + PROXY_BY_INDEX, +}; + +struct vnic_res { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned int count; +}; + +struct vnic_intr_coal_timer_info { + u32 mul; + u32 div; + u32 max_usec; +}; + +struct vnic_dev { + void *priv; + struct pci_dev *pdev; + struct vnic_res res[RES_TYPE_MAX]; + enum vnic_dev_intr_mode intr_mode; + struct vnic_devcmd __iomem *devcmd; + struct vnic_devcmd_notify *notify; + struct vnic_devcmd_notify notify_copy; + dma_addr_t notify_pa; + u32 notify_sz; + dma_addr_t linkstatus_pa; + struct vnic_stats *stats; + dma_addr_t stats_pa; + struct vnic_devcmd_fw_info *fw_info; + dma_addr_t fw_info_pa; + enum vnic_proxy_type proxy; + u32 proxy_index; + u64 args[VNIC_DEVCMD_NARGS]; + struct vnic_intr_coal_timer_info intr_coal_timer_info; + struct devcmd2_controller *devcmd2; + int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait); +}; + struct vnic_stats; void *vnic_dev_priv(struct vnic_dev *vdev); @@ -114,7 +155,9 @@ int vnic_dev_deinit(struct vnic_dev *vdev); void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev); int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev); int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg); +int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done); +int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); void vnic_dev_set_intr_mode(struct vnic_dev *vdev, enum vnic_dev_intr_mode intr_mode); enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); @@ -135,5 +178,6 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter *data); +int vnic_devcmd_init(struct vnic_dev *vdev); #endif /* _VNIC_DEV_H_ */ diff --git a/kernel/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/kernel/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index 435d0cd96..2a812880b 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/kernel/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -365,6 +365,12 @@ enum vnic_devcmd_cmd { */ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56), + /* Initialization for the devcmd2 interface. + * in: (u64) a0 = host result buffer physical address + * in: (u16) a1 = number of entries in result buffer + */ + CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57), + /* Add a filter. * in: (u64) a0= filter address * (u32) a1= size of filter @@ -629,4 +635,26 @@ struct vnic_devcmd { u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ }; +#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */ + +#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS +struct vnic_devcmd2 { + u16 pad; + u16 flags; + u32 cmd; + u64 args[VNIC_DEVCMD2_NARGS]; +}; + +#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS +struct devcmd2_result { + u64 results[VNIC_DEVCMD2_NRESULTS]; + u32 pad; + u16 completed_index; + u8 error; + u8 color; +}; + +#define DEVCMD2_RING_SIZE 32 +#define DEVCMD2_DESC_SIZE 128 + #endif /* _VNIC_DEVCMD_H_ */ diff --git a/kernel/drivers/net/ethernet/cisco/enic/vnic_intr.c b/kernel/drivers/net/ethernet/cisco/enic/vnic_intr.c index 0ca107f7b..942759d9c 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/vnic_intr.c +++ b/kernel/drivers/net/ethernet/cisco/enic/vnic_intr.c @@ -25,6 +25,7 @@ #include "vnic_dev.h" #include "vnic_intr.h" +#include "enic.h" void vnic_intr_free(struct vnic_intr *intr) { @@ -39,7 +40,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); if (!intr->ctrl) { - pr_err("Failed to hook INTR[%d].ctrl resource\n", index); + vdev_err("Failed to hook INTR[%d].ctrl resource\n", index); return -EINVAL; } diff --git a/kernel/drivers/net/ethernet/cisco/enic/vnic_resource.h b/kernel/drivers/net/ethernet/cisco/enic/vnic_resource.h index e0a73f1ca..4e45f88ac 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/vnic_resource.h +++ b/kernel/drivers/net/ethernet/cisco/enic/vnic_resource.h @@ -48,6 +48,13 @@ enum vnic_res_type { RES_TYPE_RSVD7, RES_TYPE_DEVCMD, /* Device command region */ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + RES_TYPE_SUBVNIC, /* subvnic resource type */ + RES_TYPE_MQ_WQ, /* MQ Work queues */ + RES_TYPE_MQ_RQ, /* MQ Receive queues */ + RES_TYPE_MQ_CQ, /* MQ Completion queues */ + RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */ + RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */ + RES_TYPE_DEVCMD2, /* Device control region */ RES_TYPE_MAX, /* Count of resource types */ }; diff --git a/kernel/drivers/net/ethernet/cisco/enic/vnic_rq.c b/kernel/drivers/net/ethernet/cisco/enic/vnic_rq.c index c4b2183bf..cce2777df 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/kernel/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -26,6 +26,7 @@ #include "vnic_dev.h" #include "vnic_rq.h" +#include "enic.h" static int vnic_rq_alloc_bufs(struct vnic_rq *rq) { @@ -91,7 +92,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); if (!rq->ctrl) { - pr_err("Failed to hook RQ[%d] resource\n", index); + vdev_err("Failed to hook RQ[%d] resource\n", index); return -EINVAL; } @@ -167,6 +168,7 @@ void vnic_rq_enable(struct vnic_rq *rq) int vnic_rq_disable(struct vnic_rq *rq) { unsigned int wait; + struct vnic_dev *vdev = rq->vdev; iowrite32(0, &rq->ctrl->enable); @@ -177,7 +179,7 @@ int vnic_rq_disable(struct vnic_rq *rq) udelay(10); } - pr_err("Failed to disable RQ[%d]\n", rq->index); + vdev_neterr("Failed to disable RQ[%d]\n", rq->index); return -ETIMEDOUT; } diff --git a/kernel/drivers/net/ethernet/cisco/enic/vnic_rq.h b/kernel/drivers/net/ethernet/cisco/enic/vnic_rq.h index 8111d5202..b9c82f143 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/vnic_rq.h +++ b/kernel/drivers/net/ethernet/cisco/enic/vnic_rq.h @@ -21,6 +21,7 @@ #define _VNIC_RQ_H_ #include <linux/pci.h> +#include <linux/netdevice.h> #include "vnic_dev.h" #include "vnic_cq.h" @@ -75,6 +76,12 @@ struct vnic_rq_buf { uint64_t wr_id; }; +enum enic_poll_state { + ENIC_POLL_STATE_IDLE, + ENIC_POLL_STATE_NAPI, + ENIC_POLL_STATE_POLL +}; + struct vnic_rq { unsigned int index; struct vnic_dev *vdev; @@ -86,19 +93,7 @@ struct vnic_rq { void *os_buf_head; unsigned int pkts_outstanding; #ifdef CONFIG_NET_RX_BUSY_POLL -#define ENIC_POLL_STATE_IDLE 0 -#define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */ -#define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */ -#define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */ -#define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */ -#define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \ - ENIC_POLL_STATE_POLL_YIELD) -#define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \ - ENIC_POLL_STATE_POLL) -#define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \ - ENIC_POLL_STATE_POLL_YIELD) - unsigned int bpoll_state; - spinlock_t bpoll_lock; + atomic_t bpoll_state; #endif /* CONFIG_NET_RX_BUSY_POLL */ }; @@ -215,76 +210,43 @@ static inline int vnic_rq_fill(struct vnic_rq *rq, #ifdef CONFIG_NET_RX_BUSY_POLL static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) { - spin_lock_init(&rq->bpoll_lock); - rq->bpoll_state = ENIC_POLL_STATE_IDLE; + atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); } static inline bool enic_poll_lock_napi(struct vnic_rq *rq) { - bool rc = true; - - spin_lock(&rq->bpoll_lock); - if (rq->bpoll_state & ENIC_POLL_LOCKED) { - WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); - rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD; - rc = false; - } else { - rq->bpoll_state = ENIC_POLL_STATE_NAPI; - } - spin_unlock(&rq->bpoll_lock); + int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, + ENIC_POLL_STATE_NAPI); - return rc; + return (rc == ENIC_POLL_STATE_IDLE); } -static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) +static inline void enic_poll_unlock_napi(struct vnic_rq *rq, + struct napi_struct *napi) { - bool rc = false; - - spin_lock(&rq->bpoll_lock); - WARN_ON(rq->bpoll_state & - (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD)); - if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) - rc = true; - rq->bpoll_state = ENIC_POLL_STATE_IDLE; - spin_unlock(&rq->bpoll_lock); - - return rc; + WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI); + napi_gro_flush(napi, false); + atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); } static inline bool enic_poll_lock_poll(struct vnic_rq *rq) { - bool rc = true; - - spin_lock_bh(&rq->bpoll_lock); - if (rq->bpoll_state & ENIC_POLL_LOCKED) { - rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD; - rc = false; - } else { - rq->bpoll_state |= ENIC_POLL_STATE_POLL; - } - spin_unlock_bh(&rq->bpoll_lock); + int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, + ENIC_POLL_STATE_POLL); - return rc; + return (rc == ENIC_POLL_STATE_IDLE); } -static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) -{ - bool rc = false; - spin_lock_bh(&rq->bpoll_lock); - WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); - if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) - rc = true; - rq->bpoll_state = ENIC_POLL_STATE_IDLE; - spin_unlock_bh(&rq->bpoll_lock); - - return rc; +static inline void enic_poll_unlock_poll(struct vnic_rq *rq) +{ + WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL); + atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); } static inline bool enic_poll_busy_polling(struct vnic_rq *rq) { - WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED)); - return rq->bpoll_state & ENIC_POLL_USER_PEND; + return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL; } #else @@ -298,7 +260,8 @@ static inline bool enic_poll_lock_napi(struct vnic_rq *rq) return true; } -static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) +static inline bool enic_poll_unlock_napi(struct vnic_rq *rq, + struct napi_struct *napi) { return false; } diff --git a/kernel/drivers/net/ethernet/cisco/enic/vnic_wq.c b/kernel/drivers/net/ethernet/cisco/enic/vnic_wq.c index b5a1c937f..05ad16a7e 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/vnic_wq.c +++ b/kernel/drivers/net/ethernet/cisco/enic/vnic_wq.c @@ -26,6 +26,7 @@ #include "vnic_dev.h" #include "vnic_wq.h" +#include "enic.h" static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { @@ -94,7 +95,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); if (!wq->ctrl) { - pr_err("Failed to hook WQ[%d] resource\n", index); + vdev_err("Failed to hook WQ[%d] resource\n", index); return -EINVAL; } @@ -113,10 +114,27 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, return 0; } -static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, - unsigned int fetch_index, unsigned int posted_index, - unsigned int error_interrupt_enable, - unsigned int error_interrupt_offset) +int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = 0; + wq->vdev = vdev; + + wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (!wq->ctrl) + return -EINVAL; + vnic_wq_disable(wq); + err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); + + return err; +} + +void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = wq->ring.desc_count; @@ -140,7 +158,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { - vnic_wq_init_start(wq, cq_index, 0, 0, + enic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, error_interrupt_offset); } @@ -158,6 +176,7 @@ void vnic_wq_enable(struct vnic_wq *wq) int vnic_wq_disable(struct vnic_wq *wq) { unsigned int wait; + struct vnic_dev *vdev = wq->vdev; iowrite32(0, &wq->ctrl->enable); @@ -168,7 +187,7 @@ int vnic_wq_disable(struct vnic_wq *wq) udelay(10); } - pr_err("Failed to disable WQ[%d]\n", wq->index); + vdev_neterr("Failed to disable WQ[%d]\n", wq->index); return -ETIMEDOUT; } diff --git a/kernel/drivers/net/ethernet/cisco/enic/vnic_wq.h b/kernel/drivers/net/ethernet/cisco/enic/vnic_wq.h index 296154351..01209613d 100644 --- a/kernel/drivers/net/ethernet/cisco/enic/vnic_wq.h +++ b/kernel/drivers/net/ethernet/cisco/enic/vnic_wq.h @@ -88,6 +88,18 @@ struct vnic_wq { unsigned int pkts_outstanding; }; +struct devcmd2_controller { + struct vnic_wq_ctrl __iomem *wq_ctrl; + struct vnic_devcmd2 *cmd_ring; + struct devcmd2_result *result; + u16 next_result; + u16 result_size; + int color; + struct vnic_dev_ring results_ring; + struct vnic_wq wq; + u32 posted; +}; + static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) { /* how many does SW own? */ @@ -174,5 +186,11 @@ void vnic_wq_enable(struct vnic_wq *wq); int vnic_wq_disable(struct vnic_wq *wq); void vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); +int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size); +void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); #endif /* _VNIC_WQ_H_ */ |