diff options
Diffstat (limited to 'kernel/drivers/net/wireless/iwlwifi/pcie')
-rw-r--r-- | kernel/drivers/net/wireless/iwlwifi/pcie/drv.c | 46 | ||||
-rw-r--r-- | kernel/drivers/net/wireless/iwlwifi/pcie/internal.h | 64 | ||||
-rw-r--r-- | kernel/drivers/net/wireless/iwlwifi/pcie/rx.c | 497 | ||||
-rw-r--r-- | kernel/drivers/net/wireless/iwlwifi/pcie/trans.c | 590 | ||||
-rw-r--r-- | kernel/drivers/net/wireless/iwlwifi/pcie/tx.c | 152 |
5 files changed, 1015 insertions, 334 deletions
diff --git a/kernel/drivers/net/wireless/iwlwifi/pcie/drv.c b/kernel/drivers/net/wireless/iwlwifi/pcie/drv.c index b18569734..d58c094f2 100644 --- a/kernel/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/kernel/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = { /* 3165 Series */ {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, @@ -382,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, @@ -399,10 +402,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, @@ -412,38 +415,59 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} @@ -613,6 +637,7 @@ static int iwl_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct iwl_trans *trans = pci_get_drvdata(pdev); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; /* Before you put code here, think about WoWLAN. You cannot check here @@ -629,10 +654,17 @@ static int iwl_pci_resume(struct device *device) if (!trans->op_mode) return 0; + /* + * Enable rfkill interrupt (in order to keep track of + * the rfkill status) + */ iwl_enable_rfkill_int(trans); hw_rfkill = iwl_is_rfkill_set(trans); + + mutex_lock(&trans_pcie->mutex); iwl_trans_pcie_rf_kill(trans, hw_rfkill); + mutex_unlock(&trans_pcie->mutex); return 0; } diff --git a/kernel/drivers/net/wireless/iwlwifi/pcie/internal.h b/kernel/drivers/net/wireless/iwlwifi/pcie/internal.h index 376b84e54..feb2f7e81 100644 --- a/kernel/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/kernel/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -44,6 +44,21 @@ #include "iwl-io.h" #include "iwl-op-mode.h" +/* We need 2 entries for the TX command and header, and another one might + * be needed for potential data in the SKB's head. The remaining ones can + * be used for frags. + */ +#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3) + +/* + * RX related structures and functions + */ +#define RX_NUM_QUEUES 1 +#define RX_POST_REQ_ALLOC 2 +#define RX_CLAIM_REQ_ALLOC 8 +#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) +#define RX_LOW_WATERMARK 8 + struct iwl_host_cmd; /*This file includes the declaration that are internal to the @@ -77,29 +92,29 @@ struct isr_statistics { * struct iwl_rxq - Rx queue * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) - * @pool: - * @queue: * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free + * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: - * @rx_free: list of free SKBs for use - * @rx_used: List of Rx buffers with no SKB + * @rx_free: list of RBDs with allocated RB ready for use + * @rx_used: list of RBDs with no RB attached * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: + * @pool: initial pool of iwl_rx_mem_buffer for the queue + * @queue: actual rx queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { __le32 *bd; dma_addr_t bd_dma; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; u32 read; u32 write; u32 free_count; + u32 used_count; u32 write_actual; struct list_head rx_free; struct list_head rx_used; @@ -107,6 +122,32 @@ struct iwl_rxq { struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; +}; + +/** + * struct iwl_rb_allocator - Rx allocator + * @pool: initial pool of allocator + * @req_pending: number of requests the allcator had not processed yet + * @req_ready: number of requests honored and ready for claiming + * @rbd_allocated: RBDs with pages allocated and ready to be handled to + * the queue. This is a list of &struct iwl_rx_mem_buffer + * @rbd_empty: RBDs with no page attached for allocator use. This is a list + * of &struct iwl_rx_mem_buffer + * @lock: protects the rbd_allocated and rbd_empty lists + * @alloc_wq: work queue for background calls + * @rx_alloc: work struct for background calls + */ +struct iwl_rb_allocator { + struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; + atomic_t req_pending; + atomic_t req_ready; + struct list_head rbd_allocated; + struct list_head rbd_empty; + spinlock_t lock; + struct workqueue_struct *alloc_wq; + struct work_struct rx_alloc; }; struct iwl_dma_ptr { @@ -250,7 +291,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data - * @rx_replenish: work that will be called when buffers need to be allocated + * @rba: allocator for RX replenishing * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM @@ -264,8 +305,10 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) * @rx_buf_size_8k: 8 kB RX buffer size * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue + * @wide_cmd_header: true when ucode supports wide command header format * @rx_page_order: page order for receive buffer size * @reg_lock: protect hw register access + * @mutex: to protect stop_device / start_fw / start_hw * @cmd_in_flight: true when we have a host command in flight * @fw_mon_phys: physical address of the buffer for the firmware monitor * @fw_mon_page: points to the first page of the buffer for the firmware monitor @@ -273,7 +316,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) */ struct iwl_trans_pcie { struct iwl_rxq rxq; - struct work_struct rx_replenish; + struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; @@ -285,9 +328,11 @@ struct iwl_trans_pcie { dma_addr_t ict_tbl_dma; int ict_index; bool use_ict; + bool is_down; struct isr_statistics isr_stats; spinlock_t irq_lock; + struct mutex mutex; u32 inta_mask; u32 scd_base_addr; struct iwl_dma_ptr scd_bc_tbls; @@ -314,6 +359,7 @@ struct iwl_trans_pcie { bool rx_buf_size_8k; bool bc_table_dword; bool scd_set_active; + bool wide_cmd_header; u32 rx_page_order; const char *const *command_names; @@ -385,7 +431,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_hcmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb, int handler_status); + struct iwl_rx_cmd_buffer *rxb); void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); diff --git a/kernel/drivers/net/wireless/iwlwifi/pcie/rx.c b/kernel/drivers/net/wireless/iwlwifi/pcie/rx.c index 7ff69c642..e06591f62 100644 --- a/kernel/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/kernel/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -74,16 +74,29 @@ * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ INDEX is updated (updating the - * 'processed' and 'read' driver indexes as well) + * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. + * When the interrupt handler is called, the request is processed. + * The page is either stolen - transferred to the upper layer + * or reused - added immediately to the iwl->rxq->rx_free list. + * + When the page is stolen - the driver updates the matching queue's used + * count, detaches the RBD and transfers it to the queue used list. + * When there are two used RBDs - they are transferred to the allocator empty + * list. Work is then scheduled for the allocator to start allocating + * eight buffers. + * When there are another 6 used RBDs - they are transferred to the allocator + * empty list and the driver tries to claim the pre-allocated buffers and + * add them to iwl->rxq->rx_free. If it fails - it continues to claim them + * until ready. + * When there are 8+ buffers in the free list - either from allocation or from + * 8 reused unstolen pages - restock is called to update the FW and indexes. + * + In order to make sure the allocator always has RBDs to use for allocation + * the allocator has initial pool in the size of num_queues*(8-2) - the + * maximum missing RBDs per allocation request (request posted with 2 + * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). + * The queues supplies the recycle of the rest of the RBDs. * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at irq thread time from the - * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, + * + If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * @@ -92,18 +105,32 @@ * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_pcie_rxq_restock + * iwl_pcie_rxq_restock. + * Used only during initialization. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates - * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl_pcie_rx_replenish + * the WRITE index. + * iwl_pcie_rx_allocator() Background work for allocating pages. * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. + * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. + * + * RBD life-cycle: + * + * Init: + * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue + * + * Regular Receive interrupt: + * Page Stolen: + * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> + * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue + * Page not Stolen: + * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ @@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) rxq->free_count--; } spin_unlock(&rxq->lock); - /* If the pre-allocated buffer pool is dropping low, schedule to - * refill it */ - if (rxq->free_count <= RX_LOW_WATERMARK) - schedule_work(&trans_pcie->rx_replenish); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ @@ -255,6 +278,45 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) } /* + * iwl_pcie_rx_alloc_page - allocates and returns a page. + * + */ +static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, + gfp_t priority) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct page *page; + gfp_t gfp_mask = priority; + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (trans_pcie->rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", + trans_pcie->rx_page_order); + /* Issue an error if the hardware has consumed more than half + * of its free buffer list and we don't have enough + * pre-allocated buffers. +` */ + if (rxq->free_count <= RX_LOW_WATERMARK && + iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && + net_ratelimit()) + IWL_CRIT(trans, + "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", + rxq->free_count); + return NULL; + } + return page; +} + +/* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * * A used RBD is an Rx buffer that has been given to the stack. To use it again @@ -269,7 +331,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; - gfp_t gfp_mask = priority; while (1) { spin_lock(&rxq->lock); @@ -279,32 +340,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) } spin_unlock(&rxq->lock); - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (trans_pcie->rx_page_order > 0) - gfp_mask |= __GFP_COMP; - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(trans, "alloc_pages failed, " - "order: %d\n", - trans_pcie->rx_page_order); - - if ((rxq->free_count <= RX_LOW_WATERMARK) && - net_ratelimit()) - IWL_CRIT(trans, "Failed to alloc_pages with %s." - "Only %u free buffers remaining.\n", - priority == GFP_ATOMIC ? - "GFP_ATOMIC" : "GFP_KERNEL", - rxq->free_count); - /* We don't reschedule replenish work here -- we will - * call the restock method and if it still needs - * more buffers it will schedule replenish */ + page = iwl_pcie_rx_alloc_page(trans, priority); + if (!page) return; - } spin_lock(&rxq->lock); @@ -355,7 +394,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) lockdep_assert_held(&rxq->lock); - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + for (i = 0; i < RX_QUEUE_SIZE; i++) { if (!rxq->pool[i].page) continue; dma_unmap_page(trans->dev, rxq->pool[i].page_dma, @@ -372,32 +411,164 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) * When moving to rx_free an page is allocated for the slot. * * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called as a scheduled work item (except for during initialization) + * This is called only during initialization */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) +static void iwl_pcie_rx_replenish(struct iwl_trans *trans) { - iwl_pcie_rxq_alloc_rbs(trans, gfp); + iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); iwl_pcie_rxq_restock(trans); } -static void iwl_pcie_rx_replenish_work(struct work_struct *data) +/* + * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues + * + * Allocates for each received request 8 pages + * Called as a scheduled work item. + */ +static void iwl_pcie_rx_allocator(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct list_head local_empty; + int pending = atomic_xchg(&rba->req_pending, 0); + + IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); + + /* If we were scheduled - there is at least one request */ + spin_lock(&rba->lock); + /* swap out the rba->rbd_empty to a local list */ + list_replace_init(&rba->rbd_empty, &local_empty); + spin_unlock(&rba->lock); + + while (pending) { + int i; + struct list_head local_allocated; + + INIT_LIST_HEAD(&local_allocated); + + for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { + struct iwl_rx_mem_buffer *rxb; + struct page *page; + + /* List should never be empty - each reused RBD is + * returned to the list, and initial pool covers any + * possible gap between the time the page is allocated + * to the time the RBD is added. + */ + BUG_ON(list_empty(&local_empty)); + /* Get the first rxb from the rbd list */ + rxb = list_first_entry(&local_empty, + struct iwl_rx_mem_buffer, list); + BUG_ON(rxb->page); + + /* Alloc a new receive buffer */ + page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL); + if (!page) + continue; + rxb->page = page; + + /* Get physical address of the RB */ + rxb->page_dma = dma_map_page(trans->dev, page, 0, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + __free_pages(page, trans_pcie->rx_page_order); + continue; + } + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + /* move the allocated entry to the out list */ + list_move(&rxb->list, &local_allocated); + i++; + } + + pending--; + if (!pending) { + pending = atomic_xchg(&rba->req_pending, 0); + IWL_DEBUG_RX(trans, + "Pending allocation requests = %d\n", + pending); + } + + spin_lock(&rba->lock); + /* add the allocated rbds to the allocator allocated list */ + list_splice_tail(&local_allocated, &rba->rbd_allocated); + /* get more empty RBDs for current pending requests */ + list_splice_tail_init(&rba->rbd_empty, &local_empty); + spin_unlock(&rba->lock); + + atomic_inc(&rba->req_ready); + } + + spin_lock(&rba->lock); + /* return unused rbds to the allocator empty list */ + list_splice_tail(&local_empty, &rba->rbd_empty); + spin_unlock(&rba->lock); +} + +/* + * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages +.* +.* Called by queue when the queue posted allocation request and + * has freed 8 RBDs in order to restock itself. + */ +static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, + struct iwl_rx_mem_buffer + *out[RX_CLAIM_REQ_ALLOC]) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + /* + * atomic_dec_if_positive returns req_ready - 1 for any scenario. + * If req_ready is 0 atomic_dec_if_positive will return -1 and this + * function will return -ENOMEM, as there are no ready requests. + * atomic_dec_if_positive will perofrm the *actual* decrement only if + * req_ready > 0, i.e. - there are ready requests and the function + * hands one request to the caller. + */ + if (atomic_dec_if_positive(&rba->req_ready) < 0) + return -ENOMEM; + + spin_lock(&rba->lock); + for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { + /* Get next free Rx buffer, remove it from free list */ + out[i] = list_first_entry(&rba->rbd_allocated, + struct iwl_rx_mem_buffer, list); + list_del(&out[i]->list); + } + spin_unlock(&rba->lock); + + return 0; +} + +static void iwl_pcie_rx_allocator_work(struct work_struct *data) +{ + struct iwl_rb_allocator *rba_p = + container_of(data, struct iwl_rb_allocator, rx_alloc); struct iwl_trans_pcie *trans_pcie = - container_of(data, struct iwl_trans_pcie, rx_replenish); + container_of(rba_p, struct iwl_trans_pcie, rba); - iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); + iwl_pcie_rx_allocator(trans_pcie->trans); } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); spin_lock_init(&rxq->lock); + spin_lock_init(&rba->lock); if (WARN_ON(rxq->bd || rxq->rb_stts)) return -EINVAL; @@ -487,15 +658,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; + rxq->used_count = 0; - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + for (i = 0; i < RX_QUEUE_SIZE; i++) list_add(&rxq->pool[i].list, &rxq->rx_used); } +static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) +{ + int i; + + lockdep_assert_held(&rba->lock); + + INIT_LIST_HEAD(&rba->rbd_allocated); + INIT_LIST_HEAD(&rba->rbd_empty); + + for (i = 0; i < RX_POOL_SIZE; i++) + list_add(&rba->pool[i].list, &rba->rbd_empty); +} + +static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + lockdep_assert_held(&rba->lock); + + for (i = 0; i < RX_POOL_SIZE; i++) { + if (!rba->pool[i].page) + continue; + dma_unmap_page(trans->dev, rba->pool[i].page_dma, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); + rba->pool[i].page = NULL; + } +} + int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err; if (!rxq->bd) { @@ -503,11 +708,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (err) return err; } + if (!rba->alloc_wq) + rba->alloc_wq = alloc_workqueue("rb_allocator", + WQ_HIGHPRI | WQ_UNBOUND, 1); + INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); + + spin_lock(&rba->lock); + atomic_set(&rba->req_pending, 0); + atomic_set(&rba->req_ready, 0); + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_rx_free_rba(trans); + iwl_pcie_rx_init_rba(rba); + spin_unlock(&rba->lock); spin_lock(&rxq->lock); - INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); - /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rx_init_rxb_lists(rxq); @@ -522,7 +737,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans, GFP_KERNEL); + iwl_pcie_rx_replenish(trans); iwl_pcie_rx_hw_init(trans, rxq); @@ -537,6 +752,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ @@ -545,7 +761,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) return; } - cancel_work_sync(&trans_pcie->rx_replenish); + cancel_work_sync(&rba->rx_alloc); + if (rba->alloc_wq) { + destroy_workqueue(rba->alloc_wq); + rba->alloc_wq = NULL; + } + + spin_lock(&rba->lock); + iwl_pcie_rx_free_rba(trans); + spin_unlock(&rba->lock); spin_lock(&rxq->lock); iwl_pcie_rxq_free_rbs(trans); @@ -566,8 +790,49 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rxq->rb_stts = NULL; } +/* + * iwl_pcie_rx_reuse_rbd - Recycle used RBDs + * + * Called when a RBD can be reused. The RBD is transferred to the allocator. + * When there are 2 empty RBDs - a request for allocation is posted + */ +static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, + struct iwl_rx_mem_buffer *rxb, + struct iwl_rxq *rxq, bool emergency) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + + /* Move the RBD to the used list, will be moved to allocator in batches + * before claiming or posting a request*/ + list_add_tail(&rxb->list, &rxq->rx_used); + + if (unlikely(emergency)) + return; + + /* Count the allocator owned RBDs */ + rxq->used_count++; + + /* If we have RX_POST_REQ_ALLOC new released rx buffers - + * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is + * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, + * after but we still need to post another request. + */ + if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { + /* Move the 2 RBDs to the allocator ownership. + Allocator has another 6 from pool for the request completion*/ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + + atomic_inc(&rba->req_pending); + queue_work(rba->alloc_wq, &rba->rx_alloc); + } +} + static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, - struct iwl_rx_mem_buffer *rxb) + struct iwl_rx_mem_buffer *rxb, + bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; @@ -583,10 +848,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { struct iwl_rx_packet *pkt; - struct iwl_device_cmd *cmd; u16 sequence; bool reclaim; - int index, cmd_index, err, len; + int index, cmd_index, len; struct iwl_rx_cmd_buffer rxcb = { ._offset = offset, ._rx_page_order = trans_pcie->rx_page_order, @@ -634,12 +898,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); - if (reclaim) - cmd = txq->entries[cmd_index].cmd; - else - cmd = NULL; - - err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); + iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); if (reclaim) { kzfree(txq->entries[cmd_index].free_buf); @@ -657,7 +916,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (!rxcb._page_stolen) - iwl_pcie_hcmd_complete(trans, &rxcb, err); + iwl_pcie_hcmd_complete(trans, &rxcb); else IWL_WARN(trans, "Claim null rxb?\n"); } @@ -688,13 +947,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; - list_add_tail(&rxb->list, &rxq->rx_used); + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else - list_add_tail(&rxb->list, &rxq->rx_used); + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } /* @@ -704,10 +963,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 r, i; - u8 fill_rx = 0; - u32 count = 8; - int total_empty; + u32 r, i, j, count = 0; + bool emergency = false; restart: spin_lock(&rxq->lock); @@ -720,47 +977,95 @@ restart: if (i == r) IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); - /* calculate total frames need to be restock after handling RX */ - total_empty = r - rxq->write_actual; - if (total_empty < 0) - total_empty += RX_QUEUE_SIZE; - - if (total_empty > (RX_QUEUE_SIZE / 2)) - fill_rx = 1; - while (i != r) { struct iwl_rx_mem_buffer *rxb; + if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) + emergency = true; + rxb = rxq->queue[i]; rxq->queue[i] = NULL; IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", r, i, rxb); - iwl_pcie_rx_handle_rb(trans, rxb); + iwl_pcie_rx_handle_rb(trans, rxb, emergency); i = (i + 1) & RX_QUEUE_MASK; - /* If there are a lot of unused frames, - * restock the Rx queue so ucode wont assert. */ - if (fill_rx) { + + /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - + * try to claim the pre-allocated buffers from the allocator */ + if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; + + if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && + !emergency) { + /* Add the remaining 6 empty RBDs + * for allocator use + */ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, + &rba->rbd_empty); + spin_unlock(&rba->lock); + } + + /* If not ready - continue, will try to reclaim later. + * No need to reschedule work - allocator exits only on + * success */ + if (!iwl_pcie_rx_allocator_get(trans, out)) { + /* If success - then RX_CLAIM_REQ_ALLOC + * buffers were retrieved and should be added + * to free list */ + rxq->used_count -= RX_CLAIM_REQ_ALLOC; + for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { + list_add_tail(&out[j]->list, + &rxq->rx_free); + rxq->free_count++; + } + } + } + if (emergency) { count++; - if (count >= 8) { - rxq->read = i; - spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans, GFP_ATOMIC); + if (count == 8) { count = 0; - goto restart; + if (rxq->used_count < RX_QUEUE_SIZE / 3) + emergency = false; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + spin_lock(&rxq->lock); } } + /* handle restock for three cases, can be all of them at once: + * - we just pulled buffers from the allocator + * - we have 8+ unstolen pages accumulated + * - we are in emergency and allocated buffers + */ + if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { + rxq->read = i; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_restock(trans); + goto restart; + } } /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); - if (fill_rx) - iwl_pcie_rx_replenish(trans, GFP_ATOMIC); - else - iwl_pcie_rxq_restock(trans); + /* + * handle a case where in emergency there are some unallocated RBDs. + * those RBDs are in the used list, but are not tracked by the queue's + * used_count which counts allocator owned RBDs. + * unallocated emergency RBDs must be allocated on exit, otherwise + * when called again the function may not be in emergency mode and + * they will be handed to the allocator with no tracking in the RBD + * allocator counters, which will lead to them never being claimed back + * by the queue. + * by allocating them here, they are now in the queue free list, and + * will be restocked by the next call of iwl_pcie_rxq_restock. + */ + if (unlikely(emergency && count)) + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); if (trans_pcie->napi.poll) napi_gro_flush(&trans_pcie->napi, false); @@ -772,9 +1077,11 @@ restart: static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && + !trans->cfg->apmg_not_supported && (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(trans, APMG_PS_CTRL_REG) & @@ -794,6 +1101,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) iwl_trans_fw_error(trans); local_bh_enable(); + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) + del_timer(&trans_pcie->txq[i].stuck_timer); + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans_pcie->wait_command_queue); } @@ -1002,7 +1312,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) isr_stats->rfkill++; + mutex_lock(&trans_pcie->mutex); iwl_trans_pcie_rf_kill(trans, hw_rfkill); + mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { set_bit(STATUS_RFKILL, &trans->status); if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, @@ -1194,8 +1506,9 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans) val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; - val |= CSR_DRAM_INT_TBL_ENABLE; - val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; + val |= CSR_DRAM_INT_TBL_ENABLE | + CSR_DRAM_INIT_TBL_WRAP_CHECK | + CSR_DRAM_INIT_TBL_WRITE_POINTER; IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); diff --git a/kernel/drivers/net/wireless/iwlwifi/pcie/trans.c b/kernel/drivers/net/wireless/iwlwifi/pcie/trans.c index 699a48028..8c7204738 100644 --- a/kernel/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/kernel/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -101,14 +103,26 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) trans_pcie->fw_mon_size = 0; } -static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) +static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct page *page = NULL; dma_addr_t phys; - u32 size; + u32 size = 0; u8 power; + if (!max_power) { + /* default max_power is maximum */ + max_power = 26; + } else { + max_power += 11; + } + + if (WARN(max_power > 26, + "External buffer size for monitor is too big %d, check the FW TLV\n", + max_power)) + return; + if (trans_pcie->fw_mon_page) { dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys, trans_pcie->fw_mon_size, @@ -117,7 +131,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) } phys = 0; - for (power = 26; power >= 11; power--) { + for (power = max_power; power >= 11; power--) { int order; size = BIT(power); @@ -143,6 +157,12 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) if (WARN_ON_ONCE(!page)) return; + if (power != max_power) + IWL_ERR(trans, + "Sorry - debug buffer is only %luK while you requested %luK\n", + (unsigned long)BIT(power - 10), + (unsigned long)BIT(max_power - 10)); + trans_pcie->fw_mon_page = page; trans_pcie->fw_mon_phys = phys; trans_pcie->fw_mon_size = size; @@ -164,6 +184,9 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { + if (trans->cfg->apmg_not_supported) + return; + if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, @@ -297,7 +320,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) * bits do not disable clocks. This preserves any hardware * bits already set by default in "CLK_CTRL_REG" after reset. */ - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { + if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(20); @@ -503,8 +526,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) spin_unlock(&trans_pcie->irq_lock); - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) - iwl_pcie_set_pwr(trans, false); + iwl_pcie_set_pwr(trans, false); iwl_op_mode_nic_config(trans->op_mode); @@ -572,10 +594,8 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) do { ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) { - ret = 0; - goto out; - } + if (ret >= 0) + return 0; usleep_range(200, 1000); t += 200; @@ -585,10 +605,6 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) IWL_ERR(trans, "Couldn't prepare the card\n"); -out: - iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, - CSR_RESET_LINK_PWR_MGMT_DISABLED); - return ret; } @@ -760,8 +776,15 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { last_read_idx = i; + /* + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between + * CPU1 to CPU2. + * PAGING_SEPARATOR_SECTION delimiter - separate between + * CPU2 non paged to CPU2 paging sec. + */ if (!image->sec[i].data || - image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || + image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); @@ -809,8 +832,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { last_read_idx = i; + /* + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between + * CPU1 to CPU2. + * PAGING_SEPARATOR_SECTION delimiter - separate between + * CPU2 non paged to CPU2 paging sec. + */ if (!image->sec[i].data || - image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || + image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); @@ -850,7 +880,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans) get_fw_dbg_mode_string(dest->monitor_mode)); if (dest->monitor_mode == EXTERNAL_MODE) - iwl_pcie_alloc_fw_monitor(trans); + iwl_pcie_alloc_fw_monitor(trans, dest->size_power); else IWL_WARN(trans, "PCI should have external buffer debug\n"); @@ -877,6 +907,14 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans) case PRPH_CLEARBIT: iwl_clear_bits_prph(trans, addr, BIT(val)); break; + case PRPH_BLOCKBIT: + if (iwl_read_prph(trans, addr) & BIT(val)) { + IWL_ERR(trans, + "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", + val, addr); + goto monitor; + } + break; default: IWL_ERR(trans, "FW debug - unknown OP %d\n", dest->reg_ops[i].op); @@ -884,12 +922,20 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans) } } +monitor: if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { iwl_write_prph(trans, le32_to_cpu(dest->base_reg), trans_pcie->fw_mon_phys >> dest->base_shift); - iwl_write_prph(trans, le32_to_cpu(dest->end_reg), - (trans_pcie->fw_mon_phys + - trans_pcie->fw_mon_size) >> dest->end_shift); + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + iwl_write_prph(trans, le32_to_cpu(dest->end_reg), + (trans_pcie->fw_mon_phys + + trans_pcie->fw_mon_size - 256) >> + dest->end_shift); + else + iwl_write_prph(trans, le32_to_cpu(dest->end_reg), + (trans_pcie->fw_mon_phys + + trans_pcie->fw_mon_size) >> + dest->end_shift); } } @@ -924,7 +970,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, /* supported for 7000 only for the moment */ if (iwlwifi_mod_params.fw_monitor && trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - iwl_pcie_alloc_fw_monitor(trans); + iwl_pcie_alloc_fw_monitor(trans, 0); if (trans_pcie->fw_mon_size) { iwl_write_prph(trans, MON_BUFF_BASE_ADDR, @@ -971,24 +1017,32 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, return ret; /* load to FW the binary sections of CPU2 */ - ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2, - &first_ucode_section); - if (ret) - return ret; - - return 0; + return iwl_pcie_load_cpu_sections_8000(trans, image, 2, + &first_ucode_section); } static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { - int ret; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; + int ret; + + mutex_lock(&trans_pcie->mutex); + + /* Someone called stop_device, don't try to start_fw */ + if (trans_pcie->is_down) { + IWL_WARN(trans, + "Can't start_fw since the HW hasn't been started\n"); + ret = EIO; + goto out; + } /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - return -EIO; + ret = -EIO; + goto out; } iwl_enable_rfkill_int(trans); @@ -1000,15 +1054,17 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, else clear_bit(STATUS_RFKILL, &trans->status); iwl_trans_pcie_rf_kill(trans, hw_rfkill); - if (hw_rfkill && !run_in_rfkill) - return -ERFKILL; + if (hw_rfkill && !run_in_rfkill) { + ret = -ERFKILL; + goto out; + } iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_pcie_nic_init(trans); if (ret) { IWL_ERR(trans, "Unable to init nic\n"); - return ret; + goto out; } /* make sure rfkill handshake bits are cleared */ @@ -1026,9 +1082,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* Load the given image to the HW */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - return iwl_pcie_load_given_ucode_8000(trans, fw); + ret = iwl_pcie_load_given_ucode_8000(trans, fw); else - return iwl_pcie_load_given_ucode(trans, fw); + ret = iwl_pcie_load_given_ucode(trans, fw); + +out: + mutex_unlock(&trans_pcie->mutex); + return ret; } static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) @@ -1037,11 +1097,18 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) iwl_pcie_tx_start(trans, scd_addr); } -static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) +static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill, was_hw_rfkill; + lockdep_assert_held(&trans_pcie->mutex); + + if (trans_pcie->is_down) + return; + + trans_pcie->is_down = true; + was_hw_rfkill = iwl_is_rfkill_set(trans); /* tell the device to stop sending interrupts */ @@ -1065,7 +1132,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_rx_stop(trans); /* Power-down device's busmaster DMA clocks */ - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { + if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); @@ -1131,14 +1198,36 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_prepare_card_hw(trans); } +static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + mutex_lock(&trans_pcie->mutex); + _iwl_trans_pcie_stop_device(trans, low_power); + mutex_unlock(&trans_pcie->mutex); +} + void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) { + struct iwl_trans_pcie __maybe_unused *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->mutex); + if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) - iwl_trans_pcie_stop_device(trans, true); + _iwl_trans_pcie_stop_device(trans, true); } static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans->wowlan_d0i3) { + /* Enable persistence mode to avoid reset */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + } + iwl_disable_interrupts(trans); /* @@ -1150,17 +1239,21 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) iwl_pcie_disable_ict(trans); + synchronize_irq(trans_pcie->pci_dev->irq); + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - /* - * reset TX queues -- some of their registers reset during S3 - * so if we don't reset everything here the D3 image would try - * to execute some invalid memory upon resume - */ - iwl_trans_pcie_tx_reset(trans); + if (!trans->wowlan_d0i3) { + /* + * reset TX queues -- some of their registers reset during S3 + * so if we don't reset everything here the D3 image would try + * to execute some invalid memory upon resume + */ + iwl_trans_pcie_tx_reset(trans); + } iwl_pcie_set_pwr(trans, true); } @@ -1202,12 +1295,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_set_pwr(trans, false); - iwl_trans_pcie_tx_reset(trans); + if (trans->wowlan_d0i3) { + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + } else { + iwl_trans_pcie_tx_reset(trans); - ret = iwl_pcie_rx_init(trans); - if (ret) { - IWL_ERR(trans, "Failed to resume the device (RX reset)\n"); - return ret; + ret = iwl_pcie_rx_init(trans); + if (ret) { + IWL_ERR(trans, + "Failed to resume the device (RX reset)\n"); + return ret; + } } val = iwl_read32(trans, CSR_RESET); @@ -1219,11 +1318,14 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } -static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; int err; + lockdep_assert_held(&trans_pcie->mutex); + err = iwl_pcie_prepare_card_hw(trans); if (err) { IWL_ERR(trans, "Error while preparing HW: %d\n", err); @@ -1240,20 +1342,38 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); + /* Set is_down to false here so that...*/ + trans_pcie->is_down = false; + hw_rfkill = iwl_is_rfkill_set(trans); if (hw_rfkill) set_bit(STATUS_RFKILL, &trans->status); else clear_bit(STATUS_RFKILL, &trans->status); + /* ... rfkill can call stop_device and set it false if needed */ iwl_trans_pcie_rf_kill(trans, hw_rfkill); return 0; } +static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + mutex_lock(&trans_pcie->mutex); + ret = _iwl_trans_pcie_start_hw(trans, low_power); + mutex_unlock(&trans_pcie->mutex); + + return ret; +} + static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + mutex_lock(&trans_pcie->mutex); + /* disable interrupts - don't enable HW RF kill interrupt */ spin_lock(&trans_pcie->irq_lock); iwl_disable_interrupts(trans); @@ -1266,6 +1386,10 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) spin_unlock(&trans_pcie->irq_lock); iwl_pcie_disable_ict(trans); + + mutex_unlock(&trans_pcie->mutex); + + synchronize_irq(trans_pcie->pci_dev->irq); } static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) @@ -1326,6 +1450,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, else trans_pcie->rx_page_order = get_order(4 * 1024); + trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header; trans_pcie->command_names = trans_cfg->command_names; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; @@ -1338,11 +1463,10 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, * As this function may be called again in some corner cases don't * do anything if NAPI was already initialized. */ - if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) { + if (!trans_pcie->napi.poll) { init_dummy_netdev(&trans_pcie->napi_dev); - iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi, - &trans_pcie->napi_dev, - iwl_pcie_dummy_napi_poll, 64); + netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi, + iwl_pcie_dummy_napi_poll, 64); } } @@ -1362,14 +1486,13 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iounmap(trans_pcie->hw_base); pci_release_regions(trans_pcie->pci_dev); pci_disable_device(trans_pcie->pci_dev); - kmem_cache_destroy(trans->dev_cmd_pool); if (trans_pcie->napi.poll) netif_napi_del(&trans_pcie->napi); iwl_pcie_free_fw_monitor(trans); - kfree(trans); + iwl_trans_free(trans); } static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) @@ -2170,6 +2293,47 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans, return prph_len; } +static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + int allocated_rb_nums) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int max_len = PAGE_SIZE << trans_pcie->rx_page_order; + struct iwl_rxq *rxq = &trans_pcie->rxq; + u32 i, r, j, rb_len = 0; + + spin_lock(&rxq->lock); + + r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + + for (i = rxq->read, j = 0; + i != r && j < allocated_rb_nums; + i = (i + 1) & RX_QUEUE_MASK, j++) { + struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; + struct iwl_fw_error_dump_rb *rb; + + dma_unmap_page(trans->dev, rxb->page_dma, max_len, + DMA_FROM_DEVICE); + + rb_len += sizeof(**data) + sizeof(*rb) + max_len; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); + (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); + rb = (void *)(*data)->data; + rb->index = cpu_to_le32(i); + memcpy(rb->data, page_address(rxb->page), max_len); + /* remap the page for the free benefit */ + rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, + max_len, + DMA_FROM_DEVICE); + + *data = iwl_fw_error_next_data(*data); + } + + spin_unlock(&rxq->lock); + + return rb_len; +} #define IWL_CSR_TO_DUMP (0x250) static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, @@ -2216,17 +2380,120 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, return sizeof(**data) + fh_regs_len; } -static -struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) +static u32 +iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, + struct iwl_fw_error_dump_fw_mon *fw_mon_data, + u32 monitor_len) +{ + u32 buf_size_in_dwords = (monitor_len >> 2); + u32 *buffer = (u32 *)fw_mon_data->data; + unsigned long flags; + u32 i; + + if (!iwl_trans_grab_nic_access(trans, false, &flags)) + return 0; + + __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1); + for (i = 0; i < buf_size_in_dwords; i++) + buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR); + __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0); + + iwl_trans_release_nic_access(trans, &flags); + + return monitor_len; +} + +static u32 +iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + u32 monitor_len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 len = 0; + + if ((trans_pcie->fw_mon_page && + trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || + trans->dbg_dest_tlv) { + struct iwl_fw_error_dump_fw_mon *fw_mon_data; + u32 base, write_ptr, wrap_cnt; + + /* If there was a dest TLV - use the values from there */ + if (trans->dbg_dest_tlv) { + write_ptr = + le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); + wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); + } else { + base = MON_BUFF_BASE_ADDR; + write_ptr = MON_BUFF_WRPTR; + wrap_cnt = MON_BUFF_CYCLE_CNT; + } + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); + fw_mon_data = (void *)(*data)->data; + fw_mon_data->fw_mon_wr_ptr = + cpu_to_le32(iwl_read_prph(trans, write_ptr)); + fw_mon_data->fw_mon_cycle_cnt = + cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); + fw_mon_data->fw_mon_base_ptr = + cpu_to_le32(iwl_read_prph(trans, base)); + + len += sizeof(**data) + sizeof(*fw_mon_data); + if (trans_pcie->fw_mon_page) { + /* + * The firmware is now asserted, it won't write anything + * to the buffer. CPU can take ownership to fetch the + * data. The buffer will be handed back to the device + * before the firmware will be restarted. + */ + dma_sync_single_for_cpu(trans->dev, + trans_pcie->fw_mon_phys, + trans_pcie->fw_mon_size, + DMA_FROM_DEVICE); + memcpy(fw_mon_data->data, + page_address(trans_pcie->fw_mon_page), + trans_pcie->fw_mon_size); + + monitor_len = trans_pcie->fw_mon_size; + } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { + /* + * Update pointers to reflect actual values after + * shifting + */ + base = iwl_read_prph(trans, base) << + trans->dbg_dest_tlv->base_shift; + iwl_trans_read_mem(trans, base, fw_mon_data->data, + monitor_len / sizeof(u32)); + } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { + monitor_len = + iwl_trans_pci_dump_marbh_monitor(trans, + fw_mon_data, + monitor_len); + } else { + /* Didn't match anything - output no monitor data */ + monitor_len = 0; + } + + len += monitor_len; + (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); + } + + return len; +} + +static struct iwl_trans_dump_data +*iwl_trans_pcie_dump_data(struct iwl_trans *trans, + struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; - u32 len; + u32 len, num_rbs; u32 monitor_len; int i, ptr; + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status); /* transport dump header */ len = sizeof(*dump_data); @@ -2235,22 +2502,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) len += sizeof(*data) + cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); - /* CSR registers */ - len += sizeof(*data) + IWL_CSR_TO_DUMP; - - /* PRPH registers */ - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4; - - len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; - } - - /* FH registers */ - len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); - /* FW monitor */ if (trans_pcie->fw_mon_page) { len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + @@ -2268,7 +2519,8 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) trans->dbg_dest_tlv->end_shift; /* Make "end" point to the actual end */ - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 || + trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) end += (1 << trans->dbg_dest_tlv->end_shift); monitor_len = end - base; len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + @@ -2277,6 +2529,45 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) monitor_len = 0; } + if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { + dump_data = vzalloc(len); + if (!dump_data) + return NULL; + + data = (void *)dump_data->data; + len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); + dump_data->len = len; + + return dump_data; + } + + /* CSR registers */ + len += sizeof(*data) + IWL_CSR_TO_DUMP; + + /* PRPH registers */ + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4; + + len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + + /* FH registers */ + len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); + + if (dump_rbs) { + /* RBs */ + num_rbs = le16_to_cpu(ACCESS_ONCE( + trans_pcie->rxq.rb_stts->closed_rb_num)) + & 0x0FFF; + num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK; + len += num_rbs * (sizeof(*data) + + sizeof(struct iwl_fw_error_dump_rb) + + (PAGE_SIZE << trans_pcie->rx_page_order)); + } + dump_data = vzalloc(len); if (!dump_data) return NULL; @@ -2313,71 +2604,10 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) len += iwl_trans_pcie_dump_prph(trans, &data); len += iwl_trans_pcie_dump_csr(trans, &data); len += iwl_trans_pcie_fh_regs_dump(trans, &data); - /* data is already pointing to the next section */ - - if ((trans_pcie->fw_mon_page && - trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || - trans->dbg_dest_tlv) { - struct iwl_fw_error_dump_fw_mon *fw_mon_data; - u32 base, write_ptr, wrap_cnt; - - /* If there was a dest TLV - use the values from there */ - if (trans->dbg_dest_tlv) { - write_ptr = - le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); - wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); - base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); - } else { - base = MON_BUFF_BASE_ADDR; - write_ptr = MON_BUFF_WRPTR; - wrap_cnt = MON_BUFF_CYCLE_CNT; - } - - data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); - fw_mon_data = (void *)data->data; - fw_mon_data->fw_mon_wr_ptr = - cpu_to_le32(iwl_read_prph(trans, write_ptr)); - fw_mon_data->fw_mon_cycle_cnt = - cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); - fw_mon_data->fw_mon_base_ptr = - cpu_to_le32(iwl_read_prph(trans, base)); - - len += sizeof(*data) + sizeof(*fw_mon_data); - if (trans_pcie->fw_mon_page) { - data->len = cpu_to_le32(trans_pcie->fw_mon_size + - sizeof(*fw_mon_data)); - - /* - * The firmware is now asserted, it won't write anything - * to the buffer. CPU can take ownership to fetch the - * data. The buffer will be handed back to the device - * before the firmware will be restarted. - */ - dma_sync_single_for_cpu(trans->dev, - trans_pcie->fw_mon_phys, - trans_pcie->fw_mon_size, - DMA_FROM_DEVICE); - memcpy(fw_mon_data->data, - page_address(trans_pcie->fw_mon_page), - trans_pcie->fw_mon_size); + if (dump_rbs) + len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); - len += trans_pcie->fw_mon_size; - } else { - /* If we are here then the buffer is internal */ - - /* - * Update pointers to reflect actual values after - * shifting - */ - base = iwl_read_prph(trans, base) << - trans->dbg_dest_tlv->base_shift; - iwl_trans_read_mem(trans, base, fw_mon_data->data, - monitor_len / sizeof(u32)); - data->len = cpu_to_le32(sizeof(*fw_mon_data) + - monitor_len); - len += monitor_len; - } - } + len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); dump_data->len = len; @@ -2433,28 +2663,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; u16 pci_cmd; - int err; + int ret; - trans = kzalloc(sizeof(struct iwl_trans) + - sizeof(struct iwl_trans_pcie), GFP_KERNEL); - if (!trans) { - err = -ENOMEM; - goto out; - } + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), + &pdev->dev, cfg, &trans_ops_pcie, 0); + if (!trans) + return ERR_PTR(-ENOMEM); + + trans->max_skb_frags = IWL_PCIE_MAX_FRAGS; trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - trans->ops = &trans_ops_pcie; - trans->cfg = cfg; - trans_lockdep_init(trans); trans_pcie->trans = trans; spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->reg_lock); spin_lock_init(&trans_pcie->ref_lock); + mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); - err = pci_enable_device(pdev); - if (err) + ret = pci_enable_device(pdev); + if (ret) goto out_no_pci; if (!cfg->base_params->pcie_l1_allowed) { @@ -2470,23 +2698,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); - if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (ret) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ - if (err) { + if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto out_pci_disable_device; } } - err = pci_request_regions(pdev, DRV_NAME); - if (err) { + ret = pci_request_regions(pdev, DRV_NAME); + if (ret) { dev_err(&pdev->dev, "pci_request_regions failed\n"); goto out_pci_disable_device; } @@ -2494,7 +2722,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); if (!trans_pcie->hw_base) { dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); - err = -ENODEV; + ret = -ENODEV; goto out_pci_release_regions; } @@ -2506,9 +2734,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); - err = pci_enable_msi(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); + ret = pci_enable_msi(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { @@ -2526,7 +2754,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { unsigned long flags; - int ret; trans->hw_rev = (trans->hw_rev & 0xfff0) | (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); @@ -2576,30 +2803,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); - snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), - "iwl_cmd_pool:%s", dev_name(trans->dev)); - - trans->dev_cmd_headroom = 0; - trans->dev_cmd_pool = - kmem_cache_create(trans->dev_cmd_pool_name, - sizeof(struct iwl_device_cmd) - + trans->dev_cmd_headroom, - sizeof(void *), - SLAB_HWCACHE_ALIGN, - NULL); - - if (!trans->dev_cmd_pool) { - err = -ENOMEM; + ret = iwl_pcie_alloc_ict(trans); + if (ret) goto out_pci_disable_msi; - } - if (iwl_pcie_alloc_ict(trans)) - goto out_free_cmd_pool; - - err = request_threaded_irq(pdev->irq, iwl_pcie_isr, + ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, iwl_pcie_irq_handler, IRQF_SHARED, DRV_NAME, trans); - if (err) { + if (ret) { IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; } @@ -2611,8 +2822,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, out_free_ict: iwl_pcie_free_ict(trans); -out_free_cmd_pool: - kmem_cache_destroy(trans->dev_cmd_pool); out_pci_disable_msi: pci_disable_msi(pdev); out_pci_release_regions: @@ -2620,7 +2829,6 @@ out_pci_release_regions: out_pci_disable_device: pci_disable_device(pdev); out_no_pci: - kfree(trans); -out: - return ERR_PTR(err); + iwl_trans_free(trans); + return ERR_PTR(ret); } diff --git a/kernel/drivers/net/wireless/iwlwifi/pcie/tx.c b/kernel/drivers/net/wireless/iwlwifi/pcie/tx.c index 5ef8044c2..a8c8a4a74 100644 --- a/kernel/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/kernel/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -219,8 +219,6 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); - sta_id = tx_cmd->sta_id; sec_ctl = tx_cmd->sec_ctl; @@ -239,6 +237,9 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, if (trans_pcie->bc_table_dword) len = DIV_ROUND_UP(len, 4); + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; @@ -387,11 +388,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, /* first TB is never freed - it's the scratchbuf data */ - for (i = 1; i < num_tbs; i++) - dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), - DMA_TO_DEVICE); - + for (i = 1; i < num_tbs; i++) { + if (meta->flags & BIT(i + CMD_TB_BITMAP_POS)) + dma_unmap_page(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + } tfd->num_tbs = 0; } @@ -467,7 +475,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); - return 0; + return num_tbs; } static int iwl_pcie_txq_alloc(struct iwl_trans *trans, @@ -915,6 +923,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) } } + iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); if (trans->cfg->base_params->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); @@ -1049,8 +1058,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, !trans_pcie->cmd_hold_nic_awake) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - udelay(2); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, @@ -1322,13 +1329,24 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, int idx; u16 copy_size, cmd_size, scratch_size; bool had_nocopy = false; + u8 group_id = iwl_cmd_groupid(cmd->id); int i, ret; u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - copy_size = sizeof(out_cmd->hdr); - cmd_size = sizeof(out_cmd->hdr); + if (WARN(!trans_pcie->wide_cmd_header && + group_id > IWL_ALWAYS_LONG_GROUP, + "unsupported wide command %#x\n", cmd->id)) + return -EINVAL; + + if (group_id != 0) { + copy_size = sizeof(struct iwl_cmd_header_wide); + cmd_size = sizeof(struct iwl_cmd_header_wide); + } else { + copy_size = sizeof(struct iwl_cmd_header); + cmd_size = sizeof(struct iwl_cmd_header); + } /* need one for the header if the first is NOCOPY */ BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); @@ -1418,16 +1436,32 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_meta->source = cmd; /* set up the header */ - - out_cmd->hdr.cmd = cmd->id; - out_cmd->hdr.flags = 0; - out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); + if (group_id != 0) { + out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr_wide.group_id = group_id; + out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); + out_cmd->hdr_wide.length = + cpu_to_le16(cmd_size - + sizeof(struct iwl_cmd_header_wide)); + out_cmd->hdr_wide.reserved = 0; + out_cmd->hdr_wide.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + + cmd_pos = sizeof(struct iwl_cmd_header_wide); + copy_size = sizeof(struct iwl_cmd_header_wide); + } else { + out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + out_cmd->hdr.group_id = 0; + + cmd_pos = sizeof(struct iwl_cmd_header); + copy_size = sizeof(struct iwl_cmd_header); + } /* and copy the data that needs to be copied */ - cmd_pos = offsetof(struct iwl_device_cmd, payload); - copy_size = sizeof(out_cmd->hdr); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy; @@ -1466,9 +1500,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } IWL_DEBUG_HC(trans, - "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", get_cmd_string(trans_pcie, out_cmd->hdr.cmd), - out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + group_id, out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); /* start the TFD with the scratchbuf */ @@ -1518,12 +1553,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); } + BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS > + sizeof(out_meta->flags) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) kzfree(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; - trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr && txq->wd_timeout) @@ -1554,15 +1591,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim - * @handler_status: return value of the handler of the command - * (put in setup_rx_handlers) * * If an Rx buffer has an async callback associated with it the callback * will be executed. The attached skb (if present) will only be freed * if the callback returns 1 */ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb, int handler_status) + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -1601,7 +1636,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, meta->source->resp_pkt = pkt; meta->source->_rx_page_addr = (unsigned long)page_address(p); meta->source->_rx_page_order = trans_pcie->rx_page_order; - meta->source->handler_status = handler_status; } iwl_pcie_cmdq_reclaim(trans, txq_id, index); @@ -1764,7 +1798,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; @@ -1773,9 +1807,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, void *tb1_addr; u16 len, tb1_len, tb2_len; bool wait_write_ptr; - __le16 fc = hdr->frame_control; - u8 hdr_len = ieee80211_hdrlen(fc); + __le16 fc; + u8 hdr_len; u16 wifi_seq; + int i; txq = &trans_pcie->txq[txq_id]; q = &txq->q; @@ -1784,6 +1819,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, "TX on unused queue %d\n", txq_id)) return -EINVAL; + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && + __skb_linearize(skb)) + return -ENOMEM; + + /* mac80211 always puts the full header into the SKB's head, + * so there's no need to check if it's readable there + */ + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + hdr_len = ieee80211_hdrlen(fc); + spin_lock(&txq->lock); /* In AGG mode, the index in the ring must correspond to the WiFi @@ -1814,6 +1861,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[q->write_ptr].meta; + out_meta->flags = 0; /* * The second TB (tb1) points to the remainder of the TX command @@ -1847,9 +1895,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* * Set up TFD's third entry to point directly to remainder - * of skb, if any (802.11 null frames have no payload). + * of skb's head, if any */ - tb2_len = skb->len - hdr_len; + tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { dma_addr_t tb2_phys = dma_map_single(trans->dev, skb->data + hdr_len, @@ -1862,6 +1910,29 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); } + /* set up the remaining entries to point to the data */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + goto out_err; + } + tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, + skb_frag_size(frag), false); + + out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); + } + /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); @@ -1871,14 +1942,25 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, skb->data + hdr_len, tb2_len); trace_iwlwifi_dev_tx_data(trans->dev, skb, - skb->data + hdr_len, tb2_len); + hdr_len, skb->len - hdr_len); wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr) { - if (txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + if (txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; + } IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); iwl_trans_pcie_ref(trans); } |