}
EXPORT_SYMBOL(blk_queue_segment_boundary);
-/**
- * blk_queue_virt_boundary - set boundary rules for bio merging
- * @q: the request queue for the device
- * @mask: the memory boundary mask
- **/
-void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
-{
- q->limits.virt_boundary_mask = mask;
-}
-EXPORT_SYMBOL(blk_queue_virt_boundary);
-
/**
* blk_queue_dma_alignment - set dma length and memory alignment
* @q: the request queue for the device
}
if (ctrl->stripe_size)
blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
- /*
- * Hybrid implementation of blk_queue_virt_boundary and
- * QUEUE_FLAG_SG_GAPS in UEK4 to pick the upstream patches based on
- * blk_queue_virt_boundary().
- */
- blk_queue_virt_boundary(q, ctrl->page_size - 1);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true;
blk_queue_write_cache(q, vwc, vwc);
* allow extending the structure while preserving ABI.
*/
UEK_KABI_USE2(1, unsigned int max_dev_sectors, unsigned int unuse)
- UEK_KABI_USE(2, unsigned long virt_boundary_mask)
+ UEK_KABI_RESERVED(2)
};
struct request_queue {
#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
-extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
return q->limits.seg_boundary_mask;
}
-static inline unsigned long queue_virt_boundary(struct request_queue *q)
-{
- return q->limits.virt_boundary_mask;
-}
-
static inline unsigned int queue_max_sectors(struct request_queue *q)
{
return q->limits.max_sectors;