For q->poll_nsec == -1, means doing classic poll, not hybrid poll.
We introduce a new flag BLK_MQ_POLL_CLASSIC to replace -1, which
may make code much easier to read.
Additionally, since val is an int obtained with kstrtoint(), val can be
a negative value other than -1, so return -EINVAL for that case.
Thanks to Damien Le Moal for some good suggestion.
Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: Yufen Yu <yuyufen@huawei.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
        /*
         * Default to classic polling
         */
-       q->poll_nsec = -1;
+       q->poll_nsec = BLK_MQ_POLL_CLASSIC;
 
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
        blk_mq_add_queue_tag_set(set, q);
 {
        struct request *rq;
 
-       if (q->poll_nsec == -1)
+       if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
                return false;
 
        if (!blk_qc_t_is_internal(cookie))
 
 {
        int val;
 
-       if (q->poll_nsec == -1)
-               val = -1;
+       if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
+               val = BLK_MQ_POLL_CLASSIC;
        else
                val = q->poll_nsec / 1000;
 
        if (err < 0)
                return err;
 
-       if (val == -1)
-               q->poll_nsec = -1;
-       else
+       if (val == BLK_MQ_POLL_CLASSIC)
+               q->poll_nsec = BLK_MQ_POLL_CLASSIC;
+       else if (val >= 0)
                q->poll_nsec = val * 1000;
+       else
+               return -EINVAL;
 
        return count;
 }
 
 /* Must be consistent with blk_mq_poll_stats_bkt() */
 #define BLK_MQ_POLL_STATS_BKTS 16
 
+/* Doing classic polling */
+#define BLK_MQ_POLL_CLASSIC -1
+
 /*
  * Maximum number of blkcg policies allowed to be registered concurrently.
  * Defined here to simplify include dependency.