Upstream checkpatch now requires this.
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
        if (dd->userbase) {
                /* If user regs mapped, they are after send, so set limit. */
                u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
+
                if (!dd->piovl15base)
                        snd_lim = dd->uregbase;
                krb32 = (u32 __iomem *)dd->userbase;
        snd_bottom = dd->pio2k_bufbase;
        if (snd_lim == 0) {
                u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
+
                snd_lim = snd_bottom + tot2k;
        }
        /* If 4k buffers exist, account for them by bumping
        /* not very efficient, but it works for now */
        while (reg_addr < reg_end) {
                u64 data;
+
                if (copy_from_user(&data, uaddr, sizeof(data))) {
                        ret = -EFAULT;
                        goto bail;
                op = diag_get_observer(dd, *off);
                if (op) {
                        u32 offset = *off;
+
                        ret = op->hook(dd, op, offset, &data64, 0, use_32);
                }
                /*
                if (count == 4 || count == 8) {
                        u64 data64;
                        u32 offset = *off;
+
                        ret = copy_from_user(&data64, data, count);
                        if (ret) {
                                ret = -EFAULT;
 
                qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
                if (qp_num != QIB_MULTICAST_QPN) {
                        int ruc_res;
+
                        qp = qib_lookup_qpn(ibp, qp_num);
                        if (!qp)
                                goto drop;
        rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
        if (dd->flags & QIB_NODMA_RTAIL) {
                u32 seq = qib_hdrget_seq(rhf_addr);
+
                if (seq != rcd->seq_cnt)
                        goto bail;
                hdrqtail = 0;
 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
 {
        struct qib_devdata *dd = ppd->dd;
+
        ppd->lid = lid;
        ppd->lmc = lmc;
 
 
 
        if (t && dd0->nguid > 1 && t <= dd0->nguid) {
                u8 oguid;
+
                dd->base_guid = dd0->base_guid;
                bguid = (u8 *) &dd->base_guid;
 
 
         */
        if (weight >= qib_cpulist_count) {
                int cpu;
+
                cpu = find_first_zero_bit(qib_cpulist,
                                          qib_cpulist_count);
                if (cpu == qib_cpulist_count)
        }
        if (!ppd) {
                u32 pidx = ctxt % dd->num_pports;
+
                if (usable(dd->pport + pidx))
                        ppd = dd->pport + pidx;
                else {
 
        if (alg == QIB_PORT_ALG_ACROSS) {
                unsigned inuse = ~0U;
+
                /* find device (with ACTIVE ports) with fewest ctxts in use */
                for (ndev = 0; ndev < devmax; ndev++) {
                        struct qib_devdata *dd = qib_lookup(ndev);
                        unsigned cused = 0, cfree = 0, pusable = 0;
+
                        if (!dd)
                                continue;
                        if (port && port <= dd->num_pports &&
        } else {
                for (ndev = 0; ndev < devmax; ndev++) {
                        struct qib_devdata *dd = qib_lookup(ndev);
+
                        if (dd) {
                                ret = choose_port_ctxt(fp, dd, port, uinfo);
                                if (!ret)
        }
        for (ndev = 0; ndev < devmax; ndev++) {
                struct qib_devdata *dd = qib_lookup(ndev);
+
                if (dd) {
                        if (pcibus_to_node(dd->pcidev->bus) < 0) {
                                ret = -EINVAL;
 
                        const char *dev_name, void *data)
 {
        struct dentry *ret;
+
        ret = mount_single(fs_type, flags, data, qibfs_fill_super);
        if (!IS_ERR(ret))
                qib_super = ret->d_sb;
 
                                  enum qib_ureg regno, u64 value, int ctxt)
 {
        u64 __iomem *ubase;
+
        if (dd->userbase)
                ubase = (u64 __iomem *)
                        ((char __iomem *) dd->userbase +
                }
                if (crcs) {
                        u32 cntr = dd->cspec->lli_counter;
+
                        cntr += crcs;
                        if (cntr) {
                                if (cntr > dd->cspec->lli_thresh) {
                        "irq is 0, BIOS error?  Interrupts won't work\n");
        else {
                int ret;
+
                ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
                                  QIB_DRV_NAME, dd);
                if (ret)
 static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
 {
        int ret = 0;
+
        if (!strncmp(what, "ibc", 3)) {
                ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
 static void set_6120_baseaddrs(struct qib_devdata *dd)
 {
        u32 cregbase;
+
        cregbase = qib_read_kreg32(dd, kr_counterregbase);
        dd->cspec->cregbase = (u64 __iomem *)
                ((char __iomem *) dd->kregbase + cregbase);
 
 static void reenable_7220_chase(unsigned long opaque)
 {
        struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
        ppd->cpspec->chase_timer.expires = 0;
        qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
 
                                  enum qib_ureg regno, u64 value, int ctxt)
 {
        u64 __iomem *ubase;
+
        if (dd->userbase)
                ubase = (u64 __iomem *)
                        ((char __iomem *) dd->userbase +
                if (dd->cspec->num_msix_entries) {
                        /* and same for MSIx */
                        u64 val = qib_read_kreg64(dd, kr_intgranted);
+
                        if (val)
                                qib_write_kreg(dd, kr_intgranted, val);
                }
                int err;
                unsigned long flags;
                struct qib_pportdata *ppd = dd->pport;
+
                for (; pidx < dd->num_pports; ++pidx, ppd++) {
                        err = 0;
                        if (pidx == 0 && (hwerrs &
 
        if (n->rcv) {
                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
                qib_update_rhdrq_dca(rcd, cpu);
        } else {
                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
                qib_update_sdma_dca(ppd, cpu);
        }
 }
 
        if (n->rcv) {
                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
                dd = rcd->dd;
        } else {
                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
                dd = ppd->dd;
        }
        qib_devinfo(dd->pcidev,
                struct qib_pportdata *ppd;
                struct qib_qsfp_data *qd;
                u32 mask;
+
                if (!dd->pport[pidx].link_speed_supported)
                        continue;
                mask = QSFP_GPIO_MOD_PRS_N;
                mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
                if (gpiostatus & dd->cspec->gpio_mask & mask) {
                        u64 pins;
+
                        qd = &ppd->cpspec->qsfp_data;
                        gpiostatus &= ~mask;
                        pins = qib_read_kreg64(dd, kr_extstatus);
         */
        for (i = 0; i < msix_entries; i++) {
                u64 vecaddr, vecdata;
+
                vecaddr = qib_read_kreg64(dd, 2 * i +
                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
                vecdata = qib_read_kreg64(dd, 1 + 2 * i +
 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
 {
        u64 newctrlb;
+
        newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
                                    IBA7322_IBC_IBTA_1_2_MASK |
                                    IBA7322_IBC_MAX_SPEED_MASK);
 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
 {
        u32 cregbase;
+
        cregbase = qib_read_kreg32(dd, kr_counterregbase);
 
        dd->cspec->cregbase = (u64 __iomem *)(cregbase +
        struct qib_devdata *dd;
        unsigned long val;
        char *n;
+
        if (strlen(str) >= MAX_ATTEN_LEN) {
                pr_info("txselect_values string too long\n");
                return -ENOSPC;
        val = TIDFLOW_ERRBITS; /* these are W1C */
        for (i = 0; i < dd->cfgctxts; i++) {
                int flow;
+
                for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
                        qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
        }
 
        for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
                struct qib_chippport_specific *cp = ppd->cpspec;
+
                ppd->link_speed_supported = features & PORT_SPD_CAP;
                features >>=  PORT_SPD_CAP_SHIFT;
                if (!ppd->link_speed_supported) {
 static int serdes_7322_init(struct qib_pportdata *ppd)
 {
        int ret = 0;
+
        if (ppd->dd->cspec->r1)
                ret = serdes_7322_init_old(ppd);
        else
 
 static int qib_r_grab(struct qib_devdata *dd)
 {
-       u64 val;
-       val = SJA_EN;
+       u64 val = SJA_EN;
+
        qib_write_kreg(dd, kr_r_access, val);
        qib_read_kreg32(dd, kr_scratch);
        return 0;
 {
        u64 val;
        int timeout;
+
        for (timeout = 0; timeout < 100 ; ++timeout) {
                val = qib_read_kreg32(dd, kr_r_access);
                if (val & R_RDY)
                }
                if (inp) {
                        int tdi = inp[pos >> 3] >> (pos & 7);
+
                        val |= ((tdi & 1) << R_TDI_LSB);
                }
                qib_write_kreg(dd, kr_r_access, val);
 
                        u8 hw_pidx, u8 port)
 {
        int size;
+
        ppd->dd = dd;
        ppd->hw_pidx = hw_pidx;
        ppd->port = port; /* IB port number, not index */
                ppd = dd->pport + pidx;
                if (!ppd->qib_wq) {
                        char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
+
                        snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
                                dd->unit, pidx);
                        ppd->qib_wq =
 
        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
                int mtu;
+
                if (lastfail)
                        ret = lastfail;
                ppd = dd->pport + pidx;
 
        if (!qib_cpulist_count) {
                u32 count = num_online_cpus();
+
                qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
                                      sizeof(long), GFP_KERNEL);
                if (qib_cpulist)
 
 void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
 {
        int r;
+
        r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
                                   dd->pcibar0);
        if (r)
 qib_pci_resume(struct pci_dev *pdev)
 {
        struct qib_devdata *dd = pci_get_drvdata(pdev);
+
        qib_devinfo(pdev, "QIB resume function called\n");
        pci_cleanup_aer_uncorrect_error_status(pdev);
        /*
 
        while (cnt < len) {
                unsigned in_page;
                int wlen = len - cnt;
+
                in_page = addr % QSFP_PAGESIZE;
                if ((in_page + wlen) > QSFP_PAGESIZE)
                        wlen = QSFP_PAGESIZE - in_page;
        while (cnt < len) {
                unsigned in_page;
                int wlen = len - cnt;
+
                in_page = addr % QSFP_PAGESIZE;
                if ((in_page + wlen) > QSFP_PAGESIZE)
                        wlen = QSFP_PAGESIZE - in_page;
                 * set the page to zero, Even if it already appears to be zero.
                 */
                u8 poke = 0;
+
                ret = qib_qsfp_write(ppd, 127, &poke, 1);
                udelay(50);
                if (ret != 1) {
 
        while (bidx < QSFP_DEFAULT_HDR_CNT) {
                int iidx;
+
                ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
                if (ret < 0)
                        goto bail;
 
                 * it again during startup.
                 */
                u64 val;
+
                rst_val &= ~(1ULL);
                qib_write_kreg(dd, kr_hwerrmask,
                               dd->cspec->hwerrmask &
                 * Both should be clear
                 */
                u64 newval = 0;
+
                qib_write_kreg(dd, acc, newval);
                /* First read after write is not trustworthy */
                pollval = qib_read_kreg32(dd, acc);
                /* Need to claim */
                u64 pollval;
                u64 newval = EPB_ACC_REQ | oct_sel;
+
                qib_write_kreg(dd, acc, newval);
                /* First read after write is not trustworthy */
                pollval = qib_read_kreg32(dd, acc);
                        if (!sofar) {
                                /* Only set address at start of chunk */
                                int addrbyte = (addr + sofar) >> 8;
+
                                transval = csbit | EPB_MADDRH | addrbyte;
                                tries = epb_trans(dd, trans, transval,
                                                  &transval);
                dds_reg_map >>= 4;
                for (midx = 0; midx < DDS_ROWS; ++midx) {
                        u64 __iomem *daddr = taddr + ((midx << 4) + idx);
+
                        data = dds_init_vals[midx].reg_vals[idx];
                        writeq(data, daddr);
                        mmiowb();
 
                udelay(2);
        else {
                int rise_usec;
+
                for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
                        if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
                                break;
 static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
 {
        int ret = 1;
+
        if (flags & QIB_TWSI_START)
                start_seq(dd);
 
        int sub_len;
        const u8 *bp = buffer;
        int max_wait_time, i;
-       int ret;
-       ret = 1;
+       int ret = 1;
 
        while (len > 0) {
                if (dev == QIB_TWSI_NO_DEV) {
 
 
        for (i = 0; i < cnt; i++) {
                int which;
+
                if (!test_bit(i, mask))
                        continue;
                /*
 
                sdma_rb_node->refcount++;
        } else {
                int ret;
+
                sdma_rb_node = kmalloc(sizeof(
                        struct qib_user_sdma_rb_node), GFP_KERNEL);
                if (!sdma_rb_node)
 
                        if (tiddma) {
                                char *tidsm = (char *)pkt + pktsize;
+
                                cfur = copy_from_user(tidsm,
                                        iov[idx].iov_base, tidsmsize);
                                if (cfur) {
 
 done:
        if (dd->flags & QIB_USE_SPCL_TRIG) {
                u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
                qib_flush_wc();
                __raw_writel(0xaebecede, piobuf_orig + spcl_off);
        }
 
        if (dd->piobcnt2k && dd->piobcnt4k) {
                /* 2 sizes for chip */
                unsigned long pio2kbase, pio4kbase;
+
                pio2kbase = dd->piobufbase & 0xffffffffUL;
                pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
                if (pio2kbase < pio4kbase) {
                piolen = 1ULL << (bits + 1);
        }
        if (pioaddr & (piolen - 1)) {
-               u64 atmp;
-               atmp = pioaddr & ~(piolen - 1);
+               u64 atmp = pioaddr & ~(piolen - 1);
+
                if (atmp < addr || (atmp + piolen) > (addr + len)) {
                        qib_dev_err(dd,
                                "No way to align address/size (%llx/%llx), no WC mtrr\n",