DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q = bdev_get_queue(bdev);
        int type = REQ_WRITE | REQ_DISCARD;
-       unsigned int max_discard_sectors;
-       unsigned int granularity, alignment, mask;
+       sector_t max_discard_sectors;
+       sector_t granularity, alignment;
        struct bio_batch bb;
        struct bio *bio;
        int ret = 0;
 
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
-       mask = granularity - 1;
-       alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
+       alignment = bdev_discard_alignment(bdev) >> 9;
+       alignment = sector_div(alignment, granularity);
 
        /*
         * Ensure that max_discard_sectors is of the proper
         * granularity, so that requests stay aligned after a split.
         */
        max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-       max_discard_sectors = round_down(max_discard_sectors, granularity);
+       sector_div(max_discard_sectors, granularity);
+       max_discard_sectors *= granularity;
        if (unlikely(!max_discard_sectors)) {
                /* Avoid infinite loop below. Being cautious never hurts. */
                return -EOPNOTSUPP;
 
        while (nr_sects) {
                unsigned int req_sects;
-               sector_t end_sect;
+               sector_t end_sect, tmp;
 
                bio = bio_alloc(gfp_mask, 1);
                if (!bio) {
                 * misaligned, stop the discard at the previous aligned sector.
                 */
                end_sect = sector + req_sects;
-               if (req_sects < nr_sects && (end_sect & mask) != alignment) {
-                       end_sect =
-                               round_down(end_sect - alignment, granularity)
-                               + alignment;
+               tmp = end_sect;
+               if (req_sects < nr_sects &&
+                   sector_div(tmp, granularity) != alignment) {
+                       end_sect = end_sect - alignment;
+                       sector_div(end_sect, granularity);
+                       end_sect = end_sect * granularity + alignment;
                        req_sects = end_sect - sector;
                }
 
 
                        bottom = b->discard_granularity + alignment;
 
                        /* Verify that top and bottom intervals line up */
-                       if (max(top, bottom) & (min(top, bottom) - 1))
+                       if ((max(top, bottom) % min(top, bottom)) != 0)
                                t->discard_misaligned = 1;
                }
 
                                                      b->max_discard_sectors);
                t->discard_granularity = max(t->discard_granularity,
                                             b->discard_granularity);
-               t->discard_alignment = lcm(t->discard_alignment, alignment) &
-                       (t->discard_granularity - 1);
+               t->discard_alignment = lcm(t->discard_alignment, alignment) %
+                       t->discard_granularity;
        }
 
        return ret;
 
 
 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
 {
-       unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
+       sector_t alignment = sector << 9;
+       alignment = sector_div(alignment, lim->discard_granularity);
 
        if (!lim->max_discard_sectors)
                return 0;
 
-       return (lim->discard_granularity + lim->discard_alignment - alignment)
-               & (lim->discard_granularity - 1);
+       alignment = lim->discard_granularity + lim->discard_alignment - alignment;
+       return sector_div(alignment, lim->discard_granularity);
 }
 
 static inline int bdev_discard_alignment(struct block_device *bdev)