S:    Maintained
  F:    drivers/media/dvb-frontends/hd29l2*
  
- HEWLETT-PACKARD SMART2 RAID DRIVER
- L:    iss_storagedev@hp.com
- S:    Orphan
- F:    Documentation/blockdev/cpqarray.txt
- F:    drivers/block/cpqarray.*
- 
  HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
 -M:    Don Brace <don.brace@pmcs.com>
 +M:    Don Brace <don.brace@microsemi.com>
  L:    iss_storagedev@hp.com
 -L:    storagedev@pmcs.com
 +L:    esc.storagedev@microsemi.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    Documentation/scsi/hpsa.txt
 
        }
  
        page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
 -      if (!page)
 +      if (!page) {
 +              bio_put(bio);
                return -ENOMEM;
 +      }
  
        while ((slot = find_first_zero_bit(rblk->invalid_pages,
-                                           nr_pgs_per_blk)) < nr_pgs_per_blk) {
+                                           nr_sec_per_blk)) < nr_sec_per_blk) {
  
                /* Lock laddr */
-               phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
+               phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
  
  try:
                spin_lock(&rrpc->rev_lock);
 
        u16 old_ms;
        unsigned short bs;
  
 +      if (test_bit(NVME_NS_DEAD, &ns->flags)) {
 +              set_capacity(disk, 0);
 +              return -ENODEV;
 +      }
        if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
-               dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
-                               __func__, ns->ctrl->instance, ns->ns_id);
+               dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
+                               __func__);
                return -ENODEV;
        }
        if (id->ncap == 0) {
  
        return ret;
  }
+ EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
  
 +static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
 +              struct request_queue *q)
 +{
 +      if (ctrl->max_hw_sectors) {
 +              u32 max_segments =
 +                      (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
 +
 +              blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
 +              blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
 +      }
 +      if (ctrl->stripe_size)
 +              blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
 +      if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
 +              blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
 +      blk_queue_virt_boundary(q, ctrl->page_size - 1);
 +}
 +
  /*
   * Initialize the cached copies of the Identify data and various controller
   * register in our nvme_ctrl structure.  This should be called as soon as
  {
        struct nvme_ns *ns, *next;
  
 -      mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
                nvme_ns_remove(ns);
 -      mutex_unlock(&ctrl->namespaces_mutex);
  }
+ EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
  
  static DEFINE_IDA(nvme_instance_ida);
  
                goto out_release_instance;
        }
        get_device(ctrl->device);
-       dev_set_drvdata(ctrl->device, ctrl);
 +      ida_init(&ctrl->ns_ida);
  
        spin_lock(&dev_list_lock);
        list_add_tail(&ctrl->node, &nvme_ctrl_list);
  out:
        return ret;
  }
+ EXPORT_SYMBOL_GPL(nvme_init_ctrl);
  
 +/**
 + * nvme_kill_queues(): Ends all namespace queues
 + * @ctrl: the dead controller that needs to end
 + *
 + * Call this function when the driver determines it is unable to get the
 + * controller in a state capable of servicing IO.
 + */
 +void nvme_kill_queues(struct nvme_ctrl *ctrl)
 +{
 +      struct nvme_ns *ns;
 +
 +      mutex_lock(&ctrl->namespaces_mutex);
 +      list_for_each_entry(ns, &ctrl->namespaces, list) {
 +              if (!kref_get_unless_zero(&ns->kref))
 +                      continue;
 +
 +              /*
 +               * Revalidating a dead namespace sets capacity to 0. This will
 +               * end buffered writers dirtying pages that can't be synced.
 +               */
 +              if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
 +                      revalidate_disk(ns->disk);
 +
 +              blk_set_queue_dying(ns->queue);
 +              blk_mq_abort_requeue_list(ns->queue);
 +              blk_mq_start_stopped_hw_queues(ns->queue, true);
 +
 +              nvme_put_ns(ns);
 +      }
 +      mutex_unlock(&ctrl->namespaces_mutex);
 +}
++EXPORT_SYMBOL_GPL(nvme_kill_queues);
 +
  void nvme_stop_queues(struct nvme_ctrl *ctrl)
  {
        struct nvme_ns *ns;
 
  
        switch (result & 0xff07) {
        case NVME_AER_NOTICE_NS_CHANGED:
-               dev_info(dev->dev, "rescanning\n");
+               dev_info(dev->ctrl.device, "rescanning\n");
 -              queue_work(nvme_workq, &dev->scan_work);
 +              nvme_queue_scan(dev);
        default:
-               dev_warn(dev->dev, "async event result %08x\n", result);
+               dev_warn(dev->ctrl.device, "async event result %08x\n", result);
        }
  }
  
        if (!blk_mq_request_started(req))
                return;
  
-       dev_dbg_ratelimited(nvmeq->q_dmadev,
 -      dev_warn(nvmeq->dev->ctrl.device,
++      dev_dbg_ratelimited(nvmeq->dev->ctrl.device,
                 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
  
        status = NVME_SC_ABORT_REQ;
                if (blk_mq_alloc_tag_set(&dev->tagset))
                        return 0;
                dev->ctrl.tagset = &dev->tagset;
+       } else {
+               blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ 
+               /* Free previously allocated queues that are no longer usable */
+               nvme_free_queues(dev, dev->online_queues);
        }
 -      queue_work(nvme_workq, &dev->scan_work);
+ 
 +      nvme_queue_scan(dev);
        return 0;
  }
  
        int i;
        u32 csts = -1;
  
-       nvme_dev_list_remove(dev);
+       del_timer_sync(&dev->watchdog_timer);
  
        mutex_lock(&dev->shutdown_lock);
 -      if (dev->bar) {
 +      if (pci_is_enabled(to_pci_dev(dev->dev))) {
                nvme_stop_queues(&dev->ctrl);
                csts = readl(dev->bar + NVME_REG_CSTS);
        }
        kfree(dev);
  }
  
-       dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
 +static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
 +{
++      dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
 +
 +      kref_get(&dev->ctrl.kref);
 +      nvme_dev_disable(dev, false);
 +      if (!schedule_work(&dev->remove_work))
 +              nvme_put_ctrl(&dev->ctrl);
 +}
 +
  static void nvme_reset_work(struct work_struct *work)
  {
        struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
  
        result = nvme_setup_io_queues(dev);
        if (result)
 -              goto free_tags;
 +              goto out;
  
        dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
+       queue_work(nvme_workq, &dev->async_work);
  
-       result = nvme_dev_list_add(dev);
-       if (result)
-               goto out;
+       mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
  
        /*
         * Keep the controller around but remove all namespaces if we don't have
        dev->dev = get_device(&pdev->dev);
        pci_set_drvdata(pdev, dev);
  
-       INIT_LIST_HEAD(&dev->node);
 +      result = nvme_dev_map(dev);
 +      if (result)
 +              goto free;
 +
        INIT_WORK(&dev->scan_work, nvme_dev_scan);
        INIT_WORK(&dev->reset_work, nvme_reset_work);
        INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
  {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
  
+       del_timer_sync(&dev->watchdog_timer);
+ 
 +      set_bit(NVME_CTRL_REMOVING, &dev->flags);
        pci_set_drvdata(pdev, NULL);
 -      flush_work(&dev->reset_work);
+       flush_work(&dev->async_work);
        flush_work(&dev->scan_work);
        nvme_remove_namespaces(&dev->ctrl);
        nvme_uninit_ctrl(&dev->ctrl);