/*This file includes the declaration that are internal to the
* trans_pcie layer */
/** * struct iwl_rx_mem_buffer - driver-side RX buffer descriptor * @page_dma: bus address of rxb page * @page: driver's pointer to the rxb page * @list: list entry for the membuffer * @invalid: rxb is in driver ownership - not owned by HW * @vid: index of this rxb in the global table * @offset: indicates which offset of the page (in bytes) * this buffer uses (if multiple RBs fit into one page)
*/ struct iwl_rx_mem_buffer {
dma_addr_t page_dma; struct page *page; struct list_head list;
u32 offset;
u16 vid; bool invalid;
};
/** * struct iwl_rx_transfer_desc - transfer descriptor * @addr: ptr to free buffer start address * @rbid: unique tag of the buffer * @reserved: reserved
*/ struct iwl_rx_transfer_desc {
__le16 rbid;
__le16 reserved[3];
__le64 addr;
} __packed;
#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
/** * struct iwl_rx_completion_desc - completion descriptor * @reserved1: reserved * @rbid: unique tag of the received buffer * @flags: flags (0: fragmented, all others: reserved) * @reserved2: reserved
*/ struct iwl_rx_completion_desc {
__le32 reserved1;
__le16 rbid;
u8 flags;
u8 reserved2[25];
} __packed;
/** * struct iwl_rx_completion_desc_bz - Bz completion descriptor * @rbid: unique tag of the received buffer * @flags: flags (0: fragmented, all others: reserved) * @reserved: reserved
*/ struct iwl_rx_completion_desc_bz {
__le16 rbid;
u8 flags;
u8 reserved[1];
} __packed;
/** * struct iwl_rxq - Rx queue * @id: queue index * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd) * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @write_actual: actual write pointer written to device, since we update in * blocks of 8 only * @free_count: Number of pre-allocated buffers in rx_free * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: * @rx_free: list of RBDs with allocated RB ready for use * @rx_used: list of RBDs with no RB attached * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: per-queue lock * @queue: actual rx queue. Not used for multi-rx queue. * @next_rb_is_fragment: indicates that the previous RB that we handled set * the fragmented flag, so the next one is still another fragment * @napi: NAPI struct for this queue * @queue_size: size of this queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/ struct iwl_rxq { int id; void *bd;
dma_addr_t bd_dma; void *used_bd;
dma_addr_t used_bd_dma;
u32 read;
u32 write;
u32 free_count;
u32 used_count;
u32 write_actual;
u32 queue_size; struct list_head rx_free; struct list_head rx_used; bool need_update, next_rb_is_fragment; void *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock; struct napi_struct napi; struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
};
/** * struct iwl_rb_allocator - Rx allocator * @req_pending: number of requests the allcator had not processed yet * @req_ready: number of requests honored and ready for claiming * @rbd_allocated: RBDs with pages allocated and ready to be handled to * the queue. This is a list of &struct iwl_rx_mem_buffer * @rbd_empty: RBDs with no page attached for allocator use. This is a list * of &struct iwl_rx_mem_buffer * @lock: protects the rbd_allocated and rbd_empty lists * @alloc_wq: work queue for background calls * @rx_alloc: work struct for background calls
*/ struct iwl_rb_allocator {
atomic_t req_pending;
atomic_t req_ready; struct list_head rbd_allocated; struct list_head rbd_empty;
spinlock_t lock; struct workqueue_struct *alloc_wq; struct work_struct rx_alloc;
};
/** * iwl_get_closed_rb_stts - get closed rb stts from different structs * @trans: transport pointer (for configuration) * @rxq: the rxq to get the rb stts from * Return: last closed RB index
*/ staticinline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans, struct iwl_rxq *rxq)
{ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
__le16 *rb_stts = rxq->rb_stts;
#ifdef CONFIG_IWLWIFI_DEBUGFS /** * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data * debugfs file * * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is * set the file can no longer be used.
*/ enum iwl_fw_mon_dbgfs_state {
IWL_FW_MON_DBGFS_STATE_CLOSED,
IWL_FW_MON_DBGFS_STATE_OPEN,
IWL_FW_MON_DBGFS_STATE_DISABLED,
}; #endif
/** * enum iwl_shared_irq_flags - level of sharing for irq * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
*/ enum iwl_shared_irq_flags {
IWL_SHARED_IRQ_NON_RX = BIT(0),
IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
};
/** * enum iwl_image_response_code - image response values * @IWL_IMAGE_RESP_DEF: the default value of the register * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully * @IWL_IMAGE_RESP_FAIL: iml reading failed
*/ enum iwl_image_response_code {
IWL_IMAGE_RESP_DEF = 0,
IWL_IMAGE_RESP_SUCCESS = 1,
IWL_IMAGE_RESP_FAIL = 2,
};
#ifdef CONFIG_IWLWIFI_DEBUGFS /** * struct cont_rec: continuous recording data structure * @prev_wr_ptr: the last address that was read in monitor_data * debugfs file * @prev_wrap_cnt: the wrap count that was used during the last read in * monitor_data debugfs file * @state: the state of monitor_data debugfs file as described * in &iwl_fw_mon_dbgfs_state enum * @mutex: locked while reading from monitor_data debugfs file
*/ struct cont_rec {
u32 prev_wr_ptr;
u32 prev_wrap_cnt;
u8 state; /* Used to sync monitor_data debugfs file with driver unload flow */ struct mutex mutex;
}; #endif
/** * enum iwl_pcie_imr_status - imr dma transfer state * @IMR_D2S_IDLE: default value of the dma transfer * @IMR_D2S_REQUESTED: dma transfer requested * @IMR_D2S_COMPLETED: dma transfer completed * @IMR_D2S_ERROR: dma transfer error
*/ enum iwl_pcie_imr_status {
IMR_D2S_IDLE,
IMR_D2S_REQUESTED,
IMR_D2S_COMPLETED,
IMR_D2S_ERROR,
};
/** * struct iwl_pcie_txqs - TX queues data * * @queue_used: bit mask of used queues * @queue_stopped: bit mask of stopped queues * @txq: array of TXQ data structures representing the TXQs * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler * @bc_pool: bytecount DMA allocations pool * @bc_tbl_size: bytecount table size * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO * (and similar usage) * @tfd: TFD data * @tfd.max_tbs: max number of buffers per TFD * @tfd.size: TFD size * @tfd.addr_size: TFD/TB address size
*/ struct iwl_pcie_txqs { unsignedlong queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; unsignedlong queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; struct dma_pool *bc_pool;
size_t bc_tbl_size; struct iwl_tso_hdr_page __percpu *tso_hdr_page;
/** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing * @ctxt_info: context information for FW self init * @ctxt_info_v2: context information for v1 devices * @prph_info: prph info for self init * @prph_scratch: prph scratch for self init * @ctxt_info_dma_addr: dma addr of context information * @prph_info_dma_addr: dma addr of prph info * @prph_scratch_dma_addr: dma addr of prph scratch * @ctxt_info_dma_addr: dma addr of context information * @iml: image loader image virtual address * @iml_len: image loader image size * @iml_dma_addr: image loader image DMA address * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM * @kw: keep warm address * @pnvm_data: holds info about pnvm payloads allocated in DRAM * @reduced_tables_data: holds info about power reduced tablse * payloads allocated in DRAM * @pci_dev: basic pci-network driver stuff * @hw_base: pci hardware address support * @ucode_write_complete: indicates that the ucode has been copied. * @ucode_write_waitq: wait queue for uCode load * @rx_page_order: page order for receive buffer size * @rx_buf_bytes: RX buffer (RB) size in bytes * @reg_lock: protect hw register access * @mutex: to protect stop_device / start_fw / start_hw * @fw_mon_data: fw continuous recording data * @cmd_hold_nic_awake: indicates NIC is held awake for APMG workaround * during commands in flight * @msix_entries: array of MSI-X entries * @msix_enabled: true if managed to enable MSI-X * @shared_vec_mask: the type of causes the shared vector handles * (see iwl_shared_irq_flags). * @alloc_vecs: the number of interrupt vectors allocated by the OS * @def_irq: default irq for non rx causes * @fh_init_mask: initial unmasked fh causes * @hw_init_mask: initial unmasked hw causes * @fh_mask: current unmasked fh causes * @hw_mask: current unmasked hw causes * @in_rescan: true if we have triggered a device rescan * @base_rb_stts: base virtual address of receive buffer status for all queues * @base_rb_stts_dma: base physical address of receive buffer status * @supported_dma_mask: DMA mask to validate the actual address against, * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device * @alloc_page_lock: spinlock for the page allocator * @alloc_page: allocated page to still use parts of * @alloc_page_used: how much of the allocated page was already used (bytes) * @imr_status: imr dma state machine * @imr_waitq: imr wait queue for dma completion * @rf_name: name/version of the CRF, if any * @use_ict: whether or not ICT (interrupt table) is used * @ict_index: current ICT read index * @ict_tbl: ICT table pointer * @ict_tbl_dma: ICT table DMA address * @inta_mask: interrupt (INT-A) mask * @irq_lock: lock to synchronize IRQ handling * @txq_memory: TXQ allocation array * @sx_waitq: waitqueue for Sx transitions * @sx_state: state tracking Sx transitions * @opmode_down: indicates opmode went away * @num_rx_bufs: number of RX buffers to allocate/use * @affinity_mask: IRQ affinity mask for each RX queue * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio * enable/disable * @fw_reset_state: state of FW reset handshake * @fw_reset_waitq: waitqueue for FW reset handshake * @is_down: indicates the NIC is down * @isr_stats: interrupt statistics * @napi_dev: (fake) netdev for NAPI registration * @txqs: transport tx queues data. * @me_present: WiAMT/CSME is detected as present (1), not present (0) * or unknown (-1, so can still use it as a boolean safely) * @me_recheck_wk: worker to recheck WiAMT/CSME presence * @invalid_tx_cmd: invalid TX command buffer * @wait_command_queue: wait queue for sync commands
*/ struct iwl_trans_pcie { struct iwl_rxq *rxq; struct iwl_rx_mem_buffer *rx_pool; struct iwl_rx_mem_buffer **global_table; struct iwl_rb_allocator rba; union { struct iwl_context_info *ctxt_info; struct iwl_context_info_v2 *ctxt_info_v2;
}; struct iwl_prph_info *prph_info; struct iwl_prph_scratch *prph_scratch; void *iml;
size_t iml_len;
dma_addr_t ctxt_info_dma_addr;
dma_addr_t prph_info_dma_addr;
dma_addr_t prph_scratch_dma_addr;
dma_addr_t iml_dma_addr; struct iwl_trans *trans;
staticinlinevoid iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
{ /* * Before sending the interrupt the HW disables it to prevent * a nested interrupt. This is done by writing 1 to the corresponding * bit in the mask register. After handling the interrupt, it should be * re-enabled by clearing this bit. This register is defined as * write 1 clear (W1C) register, meaning that it's being clear * by writing 1 to the bit.
*/
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
}
/***************************************************** * TX / HCMD
******************************************************/ /* We need 2 entries for the TX command and header, and another one might * be needed for potential data in the SKB's head. The remaining ones can * be used for frags.
*/ #define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
/* * Note that we put this struct *last* in the page. By doing that, we ensure * that no TB referencing this page can trigger the 32-bit boundary hardware * bug.
*/ struct iwl_tso_page_info {
dma_addr_t dma_addr; struct page *next;
refcount_t use_count;
};
/* * We need this inline in case dma_addr_t is only 32-bits - since the * hardware is always 64-bit, the issue can still occur in that case, * so use u64 for 'phys' here to force the addition in 64-bit.
*/ staticinlinebool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
{ return upper_32_bits(phys) != upper_32_bits(phys + len);
}
int iwl_txq_space(struct iwl_trans *trans, conststruct iwl_txq *q);
/** * iwl_txq_inc_wrap - increment queue index, wrap back to beginning * @trans: the transport (for configuration data) * @index: current index * Return: the queue index incremented, subject to wrapping
*/ staticinlineint iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
{ return ++index &
(trans->mac_cfg->base->max_tfd_queue_size - 1);
}
/** * iwl_txq_dec_wrap - decrement queue index, wrap back to end * @trans: the transport (for configuration data) * @index: current index * Return: the queue index decremented, subject to wrapping
*/ staticinlineint iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
{ return --index &
(trans->mac_cfg->base->max_tfd_queue_size - 1);
}
int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
u32 sta_mask, u8 tid, int size, unsignedint timeout);
int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_txq_dyn_free(struct iwl_trans *trans, int queue); void iwl_txq_gen2_tx_free(struct iwl_trans *trans); int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue); int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
if (!trans_pcie->msix_enabled) { /* * When we'll receive the ALIVE interrupt, the ISR will call * iwl_enable_fw_load_int_ctx_info again to set the ALIVE * interrupt (which is not really needed anymore) but also the * RX interrupt which will allow us to receive the ALIVE * notification (which is Rx) and continue the flow.
*/ if (top_reset)
trans_pcie->inta_mask = CSR_INT_BIT_RESET_DONE; else
trans_pcie->inta_mask = CSR_INT_BIT_ALIVE |
CSR_INT_BIT_FH_RX;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
} else {
u32 val = top_reset ? MSIX_HW_INT_CAUSES_REG_RESET_DONE
: MSIX_HW_INT_CAUSES_REG_ALIVE;
iwl_enable_hw_int_msk_msix(trans, val);
if (top_reset) return; /* * Leave all the FH causes enabled to get the ALIVE * notification.
*/
iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
}
}
staticinlineconstchar *queue_name(struct device *dev, struct iwl_trans_pcie *trans_p, int i)
{ if (trans_p->shared_vec_mask) { int vec = trans_p->shared_vec_mask &
IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
if (i == 0) return DRV_NAME ":shared_IRQ";
return devm_kasprintf(dev, GFP_KERNEL,
DRV_NAME ":queue_%d", i + vec);
} if (i == 0) return DRV_NAME ":default_queue";
if (i == trans_p->alloc_vecs - 1) return DRV_NAME ":exception";
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { /* * On 9000-series devices this bit isn't enabled by default, so * when we power down the device we need set the bit to allow it * to wake up the PCI-E bus for RF-kill interrupts.
*/
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.