#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
 
 #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie)      \
-       (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\
+       (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
        (~(1<<(trans_pcie)->cmd_queue)))
 
 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
        /* Tx queues */
        if (trans_pcie->txq) {
                for (txq_id = 0;
-                    txq_id < cfg(trans)->base_params->num_of_queues; txq_id++)
+                    txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
                        iwl_tx_queue_free(trans, txq_id);
        }
 
        int txq_id, slots_num;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues *
+       u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
                        sizeof(struct iwlagn_scd_bc_tbl);
 
        /*It is not allowed to alloc twice, so warn when this happens.
                goto error;
        }
 
-       trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues,
+       trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
                                  sizeof(struct iwl_tx_queue), GFP_KERNEL);
        if (!trans_pcie->txq) {
                IWL_ERR(trans, "Not enough memory for txq\n");
        }
 
        /* Alloc and init all Tx queues, including the command queue (#4/#9) */
-       for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+       for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
             txq_id++) {
                slots_num = (txq_id == trans_pcie->cmd_queue) ?
                                        TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
        spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
        /* Alloc and init all Tx queues, including the command queue (#4/#9) */
-       for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+       for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
             txq_id++) {
                slots_num = (txq_id == trans_pcie->cmd_queue) ?
                                        TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
        iwl_apm_config(trans);
 
        /* Configure analog phase-lock-loop before activating to D0A */
-       if (cfg(trans)->base_params->pll_cfg_val)
+       if (trans->cfg->base_params->pll_cfg_val)
                iwl_set_bit(trans, CSR_ANA_PLL_CFG,
-                           cfg(trans)->base_params->pll_cfg_val);
+                           trans->cfg->base_params->pll_cfg_val);
 
        /*
         * Set "initialization complete" bit to move adapter from
        if (iwl_tx_init(trans))
                return -ENOMEM;
 
-       if (cfg(trans)->base_params->shadow_reg_enable) {
+       if (trans->cfg->base_params->shadow_reg_enable) {
                /* enable shadow regs in HW */
                iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
                        0x800FFFFF);
                iwl_write_targ_mem(trans, a, 0);
        for (; a < trans_pcie->scd_base_addr +
               SCD_TRANS_TBL_OFFSET_QUEUE(
-                               cfg(trans)->base_params->num_of_queues);
+                               trans->cfg->base_params->num_of_queues);
               a += 4)
                iwl_write_targ_mem(trans, a, 0);
 
        iwl_write_prph(trans, SCD_AGGR_SEL, 0);
 
        /* initiate the queues */
-       for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) {
+       for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
                iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
                iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
                iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
        }
 
        iwl_write_prph(trans, SCD_INTERRUPT_MASK,
-                       IWL_MASK(0, cfg(trans)->base_params->num_of_queues));
+                       IWL_MASK(0, trans->cfg->base_params->num_of_queues));
 
        /* Activate all Tx DMA/FIFO channels */
        iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
        }
 
        /* Unmap DMA from host system and free skb's */
-       for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+       for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
             txq_id++)
                iwl_tx_queue_unmap(trans, txq_id);
 
        int ret = 0;
 
        /* waiting for all the tx frames complete might take a while */
-       for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
+       for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
                if (cnt == trans_pcie->cmd_queue)
                        continue;
                txq = &trans_pcie->txq[cnt];
        int ret;
        size_t bufsz;
 
-       bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues;
+       bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
 
        if (!trans_pcie->txq) {
                IWL_ERR(trans, "txq not ready\n");
        if (!buf)
                return -ENOMEM;
 
-       for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
+       for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
                txq = &trans_pcie->txq[cnt];
                q = &txq->q;
                pos += scnprintf(buf + pos, bufsz - pos,
 
 struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
                                       struct pci_dev *pdev,
-                                      const struct pci_device_id *ent)
+                                      const struct pci_device_id *ent,
+                                      const struct iwl_cfg *cfg)
 {
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
 
        trans->ops = &trans_ops_pcie;
        trans->shrd = shrd;
+       trans->cfg = cfg;
        trans_pcie->trans = trans;
        spin_lock_init(&trans_pcie->irq_lock);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
  * @ops - pointer to iwl_trans_ops
  * @op_mode - pointer to the op_mode
  * @shrd - pointer to iwl_shared which holds shared data from the upper layer
+ * @cfg - pointer to the configuration
  * @reg_lock - protect hw register access
  * @dev - pointer to struct device * that represents the device
  * @hw_id: a u32 with the ID of the device / subdevice.
        const struct iwl_trans_ops *ops;
        struct iwl_op_mode *op_mode;
        struct iwl_shared *shrd;
+       const struct iwl_cfg *cfg;
        enum iwl_trans_state state;
        spinlock_t reg_lock;
 
 extern const struct iwl_trans_ops trans_ops_pcie;
 struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
                                       struct pci_dev *pdev,
-                                      const struct pci_device_id *ent);
+                                      const struct pci_device_id *ent,
+                                      const struct iwl_cfg *cfg);
 int __must_check iwl_pci_register_driver(void);
 void iwl_pci_unregister_driver(void);
 
 extern const struct iwl_trans_ops trans_ops_idi;
 struct iwl_trans *iwl_trans_idi_alloc(struct iwl_shared *shrd,
                                      void *pdev_void,
-                                     const void *ent_void);
+                                     const void *ent_void,
+                                     const struct iwl_cfg *cfg);
 #endif /* __iwl_trans_h__ */