MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
                  "(0/1, default 0)");
 
+static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
+
 int mlx4_check_port_params(struct mlx4_dev *dev,
                           enum mlx4_port_type *port_type)
 {
        dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
        dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
        dev->caps.reserved_eqs       = dev_cap->reserved_eqs;
+       dev->caps.mtts_per_seg       = 1 << log_mtts_per_seg;
        dev->caps.reserved_mtts      = DIV_ROUND_UP(dev_cap->reserved_mtts,
-                                                   MLX4_MTT_ENTRY_PER_SEG);
+                                                   dev->caps.mtts_per_seg);
        dev->caps.reserved_mrws      = dev_cap->reserved_mrws;
        dev->caps.reserved_uars      = dev_cap->reserved_uars;
        dev->caps.reserved_pds       = dev_cap->reserved_pds;
-       dev->caps.mtt_entry_sz       = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
+       dev->caps.mtt_entry_sz       = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
        dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
        dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
        dev->caps.flags              = dev_cap->flags;
                return -1;
        }
 
+       if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
+               printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
+               return -1;
+       }
+
        return 0;
 }
 
 
        } else
                mtt->page_shift = page_shift;
 
-       for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1)
+       for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
                ++mtt->order;
 
        mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
                mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
                                                   MLX4_MPT_PD_FLAG_RAE);
                mpt_entry->mtt_sz    = cpu_to_be32((1 << mr->mtt.order) *
-                                                  MLX4_MTT_ENTRY_PER_SEG);
+                                                  dev->caps.mtts_per_seg);
        } else {
                mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
        }
            (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
                return -EINVAL;
 
-       if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1))
+       if (start_index & (dev->caps.mtts_per_seg - 1))
                return -EINVAL;
 
        mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
 
        profile[MLX4_RES_EQ].size     = dev_cap->eqc_entry_sz;
        profile[MLX4_RES_DMPT].size   = dev_cap->dmpt_entry_sz;
        profile[MLX4_RES_CMPT].size   = dev_cap->cmpt_entry_sz;
-       profile[MLX4_RES_MTT].size    = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
+       profile[MLX4_RES_MTT].size    = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
        profile[MLX4_RES_MCG].size    = MLX4_MGM_ENTRY_SIZE;
 
        profile[MLX4_RES_QP].num      = request->num_qp;