enum iwl_ucode_type cur_ucode;
 }; /*iwl_priv */
 
-extern struct kmem_cache *iwl_tx_cmd_pool;
-
 static inline struct iwl_rxon_context *
 iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
 {
 
 
 static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
 {
+       struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
        struct ieee80211_tx_info *info;
 
        info = IEEE80211_SKB_CB(skb);
-       kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
+       iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
        dev_kfree_skb_any(skb);
 }
 
  * driver and module entry point
  *
  *****************************************************************************/
-
-struct kmem_cache *iwl_tx_cmd_pool;
-
 static int __init iwl_init(void)
 {
 
        pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
        pr_info(DRV_COPYRIGHT "\n");
 
-       iwl_tx_cmd_pool = kmem_cache_create("iwl_dev_cmd",
-                                           sizeof(struct iwl_device_cmd),
-                                           sizeof(void *), 0, NULL);
-       if (!iwl_tx_cmd_pool)
-               return -ENOMEM;
-
        ret = iwlagn_rate_control_register();
        if (ret) {
                pr_err("Unable to register rate control algorithm: %d\n", ret);
-               goto error_rc_register;
+               return ret;
        }
 
        ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops);
        if (ret) {
                pr_err("Unable to register op_mode: %d\n", ret);
-               goto error_opmode_register;
+               iwlagn_rate_control_unregister();
        }
-       return ret;
 
-error_opmode_register:
-       iwlagn_rate_control_unregister();
-error_rc_register:
-       kmem_cache_destroy(iwl_tx_cmd_pool);
        return ret;
 }
 module_init(iwl_init);
 {
        iwl_opmode_deregister("iwldvm");
        iwlagn_rate_control_unregister();
-       kmem_cache_destroy(iwl_tx_cmd_pool);
 }
 module_exit(iwl_exit);
 
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_station_priv *sta_priv = NULL;
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct iwl_device_cmd *dev_cmd = NULL;
+       struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
        __le16 fc;
        u8 hdr_len;
        if (info->flags & IEEE80211_TX_CTL_AMPDU)
                is_agg = true;
 
-       dev_cmd = kmem_cache_alloc(iwl_tx_cmd_pool, GFP_ATOMIC);
+       dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
 
        if (unlikely(!dev_cmd))
                goto drop_unlock_priv;
 
 drop_unlock_sta:
        if (dev_cmd)
-               kmem_cache_free(iwl_tx_cmd_pool, dev_cmd);
+               iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
        spin_unlock(&priv->sta_lock);
 drop_unlock_priv:
        return -1;
 
                        info = IEEE80211_SKB_CB(skb);
                        ctx = info->driver_data[0];
-                       kmem_cache_free(iwl_tx_cmd_pool,
-                                       (info->driver_data[1]));
+                       iwl_trans_free_tx_cmd(priv->trans,
+                                             info->driver_data[1]);
 
                        memset(&info->status, 0, sizeof(info->status));
 
                        WARN_ON_ONCE(1);
 
                info = IEEE80211_SKB_CB(skb);
-               kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
+               iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
 
                if (freed == 1) {
                        /* this is the first skb we deliver in this batch */
 
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @pm_support: set to true in start_hw if link pm is supported
  * @wait_command_queue: the wait_queue for SYNC host commands
+ * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
+ *     The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_headroom: room needed for the transport's private use before the
+ *     device_cmd for Tx - for internal use only
+ *     The user should use iwl_trans_{alloc,free}_tx_cmd.
  */
 struct iwl_trans {
        const struct iwl_trans_ops *ops;
 
        wait_queue_head_t wait_command_queue;
 
+       /* The following fields are internal only */
+       struct kmem_cache *dev_cmd_pool;
+       size_t dev_cmd_headroom;
+
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
        char trans_specific[0] __aligned(sizeof(void *));
        return trans->ops->send_cmd(trans, cmd);
 }
 
+static inline struct iwl_device_cmd *
+iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
+{
+       u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+
+       if (unlikely(dev_cmd_ptr == NULL))
+               return NULL;
+
+       return (struct iwl_device_cmd *)
+                       (dev_cmd_ptr + trans->dev_cmd_headroom);
+}
+
+static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
+                                        struct iwl_device_cmd *dev_cmd)
+{
+       u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
+
+       kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
+}
+
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
                               struct iwl_device_cmd *dev_cmd, int queue)
 {
 
        iounmap(trans_pcie->hw_base);
        pci_release_regions(trans_pcie->pci_dev);
        pci_disable_device(trans_pcie->pci_dev);
+       kmem_cache_destroy(trans->dev_cmd_pool);
 
        kfree(trans);
 }
 {
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
+       char cmd_pool_name[100];
        u16 pci_cmd;
        int err;
 
        init_waitqueue_head(&trans->wait_command_queue);
        spin_lock_init(&trans->reg_lock);
 
+       snprintf(cmd_pool_name, sizeof(cmd_pool_name), "iwl_cmd_pool:%s",
+                dev_name(trans->dev));
+
+       trans->dev_cmd_headroom = 0;
+       trans->dev_cmd_pool =
+               kmem_cache_create(cmd_pool_name,
+                                 sizeof(struct iwl_device_cmd)
+                                 + trans->dev_cmd_headroom,
+                                 sizeof(void *),
+                                 SLAB_HWCACHE_ALIGN,
+                                 NULL);
+
+       if (!trans->dev_cmd_pool)
+               goto out_pci_disable_msi;
+
        return trans;
 
+out_pci_disable_msi:
+       pci_disable_msi(pdev);
 out_pci_release_regions:
        pci_release_regions(pdev);
 out_pci_disable_device: