u64 __percpu *cntr,
                          int vl, int mode, u64 data)
 {
-
        u64 ret = 0;
 
        if (vl != CNTR_INVALID_VL)
        u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
 
        if (reg & QSFP_HFI0_MODPRST_N) {
-
                dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
                                __func__);
 
        }
 
        if (reg & QSFP_HFI0_INT_N) {
-
                dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
                                __func__);
                spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
        dc_start(dd);
 
        if (qd->cache_refresh_required) {
-
                set_qsfp_int_n(ppd, 0);
 
                wait_for_qsfp_init(ppd);
                                "%s: logical state did not change to ACTIVE\n",
                                __func__);
                } else {
-
                        /* tell all engines to go running */
                        sdma_all_running(dd);
 
 
 static inline void init_packet(struct hfi1_ctxtdata *rcd,
                              struct hfi1_packet *packet)
 {
-
        packet->rsize = rcd->rcvhdrqentsize; /* words */
        packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
        packet->rcd = rcd;
 
 static inline void finish_packet(struct hfi1_packet *packet)
 {
-
        /*
         * Nothing we need to free for the packet.
         *
 
 static inline void process_rcv_qp_work(struct hfi1_packet *packet)
 {
-
        struct hfi1_ctxtdata *rcd;
        struct rvt_qp *qp, *nqp;
 
 
        __be32 error_info_select_mask;
        __be32 reserved1;
        struct _port_ei {
-
                u8 port_number;
                u8 reserved2[7];
 
 
        for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
                         8 * sizeof(vl_select_mask)) {
-
                if (counter_select & CS_PORT_XMIT_DATA)
                        write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
 
 
 
        spin_lock_irqsave(&qp->s_lock, flags);
        if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
-
                /*
                 * If we couldn't queue the DMA request, save the info
                 * and try again later rather than destroying the
 
 
        /* Is paging enabled? */
        if (!(cache[2] & 4)) {
-
                /* Paging enabled, page 03 required */
                if ((cache[195] & 0xC0) == 0xC0) {
                        /* all */
        lenstr[1] = '\0';
 
        if (ppd->qsfp_info.cache_valid) {
-
                if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
                        sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
 
 
 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
 {
        if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
-
                unsigned index;
                struct hfi1_devdata *dd = sde->dd;
 
 
        for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
                        ++this_idx) {
-
                sde = &dd->per_sdma[this_idx];
                if (!list_empty(&sde->dmawait))
                        dd_dev_err(dd, "sde %u: dmawait list not empty!\n",