]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
scsi: ufs: mcq: Add supporting functions for MCQ abort
authorBao D. Nguyen <quic_nguyenb@quicinc.com>
Mon, 29 May 2023 22:12:22 +0000 (15:12 -0700)
committerMartin K. Petersen <martin.petersen@oracle.com>
Thu, 1 Jun 2023 00:17:08 +0000 (20:17 -0400)
Add supporting functions to handle UFS abort in MCQ mode.

Signed-off-by: Bao D. Nguyen <quic_nguyenb@quicinc.com>
Link: https://lore.kernel.org/r/d452c5ad62dc863cc067ec82daa0885ec98bd508.1685396241.git.quic_nguyenb@quicinc.com
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>
Tested-by: Stanley Chu <stanley.chu@mediatek.com>
Reviewed-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/ufs/core/ufs-mcq.c
drivers/ufs/core/ufshcd-priv.h
drivers/ufs/core/ufshcd.c
include/ufs/ufshcd.h
include/ufs/ufshci.h

index 202ff71e1b582132f474c91379a63b158074a928..655f22087ea14ac58f4412bb9cf2f07876a48229 100644 (file)
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include "ufshcd-priv.h"
+#include <linux/delay.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
 
 #define MAX_QUEUE_SUP GENMASK(7, 0)
 #define UFS_MCQ_MIN_RW_QUEUES 2
@@ -27,6 +31,9 @@
 #define MCQ_ENTRY_SIZE_IN_DWORD        8
 #define CQE_UCD_BA GENMASK_ULL(63, 7)
 
+/* Max mcq register polling time in microseconds */
+#define MCQ_POLL_US 500000
+
 static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
 {
        return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
@@ -419,6 +426,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
                hwq->max_entries = hba->nutrs;
                spin_lock_init(&hwq->sq_lock);
                spin_lock_init(&hwq->cq_lock);
+               mutex_init(&hwq->sq_mutex);
        }
 
        /* The very first HW queue serves device commands */
@@ -429,3 +437,162 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
        host->host_tagset = 1;
        return 0;
 }
+
+static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
+{
+       void __iomem *reg;
+       u32 id = hwq->id, val;
+       int err;
+
+       writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
+       reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
+       err = read_poll_timeout(readl, val, val & SQ_STS, 20,
+                               MCQ_POLL_US, false, reg);
+       if (err)
+               dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
+                       __func__, id, err);
+       return err;
+}
+
+static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
+{
+       void __iomem *reg;
+       u32 id = hwq->id, val;
+       int err;
+
+       writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
+       reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
+       err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
+                               MCQ_POLL_US, false, reg);
+       if (err)
+               dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
+                       __func__, id, err);
+       return err;
+}
+
+/**
+ * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
+ * associated with the pending command.
+ * @hba - per adapter instance.
+ * @task_tag - The command's task tag.
+ *
+ * Returns 0 for success; error code otherwise.
+ */
+int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
+{
+       struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+       struct scsi_cmnd *cmd = lrbp->cmd;
+       struct ufs_hw_queue *hwq;
+       void __iomem *reg, *opr_sqd_base;
+       u32 nexus, id, val;
+       int err;
+
+       if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
+               if (!cmd)
+                       return -EINVAL;
+               hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+       } else {
+               hwq = hba->dev_cmd_queue;
+       }
+
+       id = hwq->id;
+
+       mutex_lock(&hwq->sq_mutex);
+
+       /* stop the SQ fetching before working on it */
+       err = ufshcd_mcq_sq_stop(hba, hwq);
+       if (err)
+               goto unlock;
+
+       /* SQCTI = EXT_IID, IID, LUN, Task Tag */
+       nexus = lrbp->lun << 8 | task_tag;
+       opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
+       writel(nexus, opr_sqd_base + REG_SQCTI);
+
+       /* SQRTCy.ICU = 1 */
+       writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
+
+       /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
+       reg = opr_sqd_base + REG_SQRTS;
+       err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
+                               MCQ_POLL_US, false, reg);
+       if (err)
+               dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
+                       __func__, id, task_tag,
+                       FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
+
+       if (ufshcd_mcq_sq_start(hba, hwq))
+               err = -ETIMEDOUT;
+
+unlock:
+       mutex_unlock(&hwq->sq_mutex);
+       return err;
+}
+
+/**
+ * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
+ * Write the sqe's Command Type to 0xF. The host controller will not
+ * fetch any sqe with Command Type = 0xF.
+ *
+ * @utrd - UTP Transfer Request Descriptor to be nullified.
+ */
+static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
+{
+       u32 dword_0;
+
+       dword_0 = le32_to_cpu(utrd->header.dword_0);
+       dword_0 &= ~UPIU_COMMAND_TYPE_MASK;
+       dword_0 |= FIELD_PREP(UPIU_COMMAND_TYPE_MASK, 0xF);
+       utrd->header.dword_0 = cpu_to_le32(dword_0);
+}
+
+/**
+ * ufshcd_mcq_sqe_search - Search for the command in the submission queue
+ * If the command is in the submission queue and not issued to the device yet,
+ * nullify the sqe so the host controller will skip fetching the sqe.
+ *
+ * @hba - per adapter instance.
+ * @hwq - Hardware Queue to be searched.
+ * @task_tag - The command's task tag.
+ *
+ * Returns true if the SQE containing the command is present in the SQ
+ * (not fetched by the controller); returns false if the SQE is not in the SQ.
+ */
+static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
+                                 struct ufs_hw_queue *hwq, int task_tag)
+{
+       struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+       struct utp_transfer_req_desc *utrd;
+       u32 mask = hwq->max_entries - 1;
+       __le64  cmd_desc_base_addr;
+       bool ret = false;
+       u64 addr, match;
+       u32 sq_head_slot;
+
+       mutex_lock(&hwq->sq_mutex);
+
+       ufshcd_mcq_sq_stop(hba, hwq);
+       sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq);
+       if (sq_head_slot == hwq->sq_tail_slot)
+               goto out;
+
+       cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr;
+       addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
+
+       while (sq_head_slot != hwq->sq_tail_slot) {
+               utrd = hwq->sqe_base_addr +
+                               sq_head_slot * sizeof(struct utp_transfer_req_desc);
+               match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
+               if (addr == match) {
+                       ufshcd_mcq_nullify_sqe(utrd);
+                       ret = true;
+                       goto out;
+               }
+               sq_head_slot = (sq_head_slot + 1) & mask;
+       }
+
+out:
+       ufshcd_mcq_sq_start(hba, hwq);
+       mutex_unlock(&hwq->sq_mutex);
+       return ret;
+}
index d53b93c21a0c6aa8e6394e356b6b35eb95a37600..40727e89200dbc87c8e9ee65f1aafdd9d98d0003 100644 (file)
@@ -78,6 +78,8 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
 unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
                                       struct ufs_hw_queue *hwq);
 
+int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
+
 #define UFSHCD_MCQ_IO_QUEUE_OFFSET     1
 #define SD_ASCII_STD true
 #define SD_RAW false
@@ -404,4 +406,12 @@ static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
 
        return cqe + q->cq_head_slot;
 }
+
+static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q)
+{
+       u32 val = readl(q->mcq_sq_head);
+
+       return val / sizeof(struct utp_transfer_req_desc);
+}
+
 #endif /* _UFSHCD_PRIV_H_ */
index a96c464bd1b7b98bb22358f90a1e70be9616e502..d0e1570f24113e971e6210b08a5ea0554a80b844 100644 (file)
@@ -173,7 +173,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
 enum {
        UFSHCD_MAX_CHANNEL      = 0,
        UFSHCD_MAX_ID           = 1,
-       UFSHCD_NUM_RESERVED     = 1,
        UFSHCD_CMD_PER_LUN      = 32 - UFSHCD_NUM_RESERVED,
        UFSHCD_CAN_QUEUE        = 32 - UFSHCD_NUM_RESERVED,
 };
index f7553293ba98bd0bd6f592216826f99f26ed076a..145710e9c2a5dba4f6bd3c7ca0ffe579affa5134 100644 (file)
@@ -1087,6 +1087,7 @@ struct ufs_hba {
  * @cq_tail_slot: current slot to which CQ tail pointer is pointing
  * @cq_head_slot: current slot to which CQ head pointer is pointing
  * @cq_lock: Synchronize between multiple polling instances
+ * @sq_mutex: prevent submission queue concurrent access
  */
 struct ufs_hw_queue {
        void __iomem *mcq_sq_head;
@@ -1105,6 +1106,8 @@ struct ufs_hw_queue {
        u32 cq_tail_slot;
        u32 cq_head_slot;
        spinlock_t cq_lock;
+       /* prevent concurrent access to submission queue */
+       struct mutex sq_mutex;
 };
 
 static inline bool is_mcq_enabled(struct ufs_hba *hba)
index 7c5a76b2c70a23640cb1135fbfcea873abe71b18..9d291ca7f31d6ea0d3c2c7f7a82213a48b8c5296 100644 (file)
@@ -99,6 +99,9 @@ enum {
 enum {
        REG_SQHP                = 0x0,
        REG_SQTP                = 0x4,
+       REG_SQRTC               = 0x8,
+       REG_SQCTI               = 0xC,
+       REG_SQRTS               = 0x10,
 };
 
 enum {
@@ -111,12 +114,26 @@ enum {
        REG_CQIE                = 0x4,
 };
 
+enum {
+       SQ_START                = 0x0,
+       SQ_STOP                 = 0x1,
+       SQ_ICU                  = 0x2,
+};
+
+enum {
+       SQ_STS                  = 0x1,
+       SQ_CUS                  = 0x2,
+};
+
+#define SQ_ICU_ERR_CODE_MASK           GENMASK(7, 4)
+#define UPIU_COMMAND_TYPE_MASK         GENMASK(31, 28)
 #define UFS_MASK(mask, offset)         ((mask) << (offset))
 
 /* UFS Version 08h */
 #define MINOR_VERSION_NUM_MASK         UFS_MASK(0xFFFF, 0)
 #define MAJOR_VERSION_NUM_MASK         UFS_MASK(0xFFFF, 16)
 
+#define UFSHCD_NUM_RESERVED    1
 /*
  * Controller UFSHCI version
  * - 2.x and newer use the following scheme: