spin_lock_irqsave(&vio->lock, flags);
- state = ldc_state(vio->lp);
-
err = 0;
- if (state == LDC_STATE_INIT) {
+ if (vio->lp)
+ state = ldc_state(vio->lp);
+ else {
+ state = LDC_STATE_INVALID;
+ err = -EINVAL;
+ }
+
+ if (!err && (state == LDC_STATE_INIT)) {
err = ldc_bind(vio->lp);
if (err)
printk(KERN_WARNING "%s: Port %lu bind failed, "
delay = 128;
} while (err == -EAGAIN);
- if (err == -ENOTCONN)
+ if (err == -ENOTCONN) {
+ printk(KERN_ERR PFX "vio_ldc_send() failure, err=%d.\n", err);
vdc_ldc_reset(port);
+ }
return err;
}
return comp.err;
}
-static void vdc_port_down(struct vdc_port *port)
-{
- ldc_disconnect(port->vio.lp);
- ldc_unbind(port->vio.lp);
- vdc_free_tx_ring(port);
- vio_ldc_free(&port->vio);
-}
-
static int probe_disk(struct vdc_port *port)
{
struct request_queue *q;
spin_unlock_irqrestore(&vio->lock, flags);
}
+/*
+ * Reset the connection by disconnecting and reconnecting the LDC.
+ * There is no need to free and reallocate the LDC; in fact this
+ * causes various race conditions unless the channel is freed/allocated
+ * under a mutex (see ldc.c:__ldc_channel_exits()).
+ */
static void vdc_ldc_reset(struct vdc_port *port)
{
int err;
+ struct vio_driver_state *vio = &port->vio;
- assert_spin_locked(&port->vio.lock);
+ assert_spin_locked(&vio->lock);
pr_warn(PFX "%s ldc link reset\n", port->disk_name);
+
if (!port->disk)
return;
blk_stop_queue(port->disk->queue);
vdc_requeue_inflight(port);
- vdc_port_down(port);
+ vio_link_state_change(vio, LDC_EVENT_RESET);
- err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
- if (err) {
- pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
- return;
- }
-
- err = vdc_alloc_tx_ring(port);
- if (err) {
- pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
- goto err_free_ldc;
- }
+ err = ldc_connect(vio->lp);
+ if (err)
+ pr_err(PFX "%s connect failed, err=%d\n",
+ port->disk_name, err);
if (port->ldc_timeout)
mod_timer(&port->ldc_reset_timer,
round_jiffies(jiffies + HZ * port->ldc_timeout));
- mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
- return;
-
-err_free_ldc:
- vio_ldc_free(&port->vio);
+ mod_timer(&vio->timer, round_jiffies(jiffies + HZ));
}
static const struct vio_device_id vdc_port_match[] = {
};
#define VDS_PORT_SEQ 0x1
+#define VDS_PORT_FINI 0x2
static inline struct vds_port *to_vds_port(struct vio_driver_state *vio)
{
* The size of the cache object accomdate the largest possible
* IO transfer initiated from either dring or descriptor mode.
*/
- max_cookies = (roundup(VDS_MAX_XFER_SIZE, PAGE_SIZE) / PAGE_SIZE) + 1;
+ max_cookies = (roundup(VDS_MAX_XFER_SIZE, PAGE_SIZE) / PAGE_SIZE) + 2;
max_cookies = max(max_cookies, VIO_MAX_RING_COOKIES);
max_entry = max_cookies * sizeof(struct ldc_trans_cookie);
vds_ioc_size = sizeof(struct vds_io) +
max(max_dring_mode, max_desc_mode);
+ BUG_ON(vds_io_cache);
vds_io_cache = kmem_cache_create(vds_ioc_name, vds_ioc_size, 0,
0, NULL);
if (!vds_io_cache) {
void vds_io_fini(void)
{
+ BUG_ON(!vds_io_cache);
kmem_cache_destroy(vds_io_cache);
+ vds_io_cache = NULL;
}
/*
vdsdbg(MEM, "size=%d ioc_size=%d\n", size, vds_ioc_size);
if (size <= vds_ioc_size) {
+ BUG_ON(!vds_io_cache);
io = kmem_cache_zalloc(vds_io_cache, GFP_ATOMIC);
if (!io)
void vds_io_free(struct vds_io *io)
{
if (io->flags & VDS_IO_CACHE) {
+ BUG_ON(!vds_io_cache);
kmem_cache_free(vds_io_cache, io);
} else {
kfree(io->msgbuf);
pkt->vdisk_size, pkt->vdisk_block_size,
pkt->max_xfer_size, pkt->operations);
- return vio_ldc_send(&port->vio, pkt, sizeof(*pkt));
+ return vio_ldc_send(vio, pkt, sizeof(*pkt));
}
static struct vio_driver_ops vds_vio_ops = {
else
BUG();
- vds_io_done(io);
+ /*
+ * If there was a reset then the IO request has been
+ * converted to a reset request queued to be executed.
+ */
+ if (!(io->flags & VDS_IO_FINI))
+ vds_io_done(io);
}
static void vds_reset(struct vds_io *io)
BUG_ON(in_interrupt());
+ vds_vio_lock(vio, flags);
+
+ /*
+ * No point resetting the channel if the port is being shutdown.
+ */
+ if (port->flags & VDS_PORT_FINI)
+ goto done;
+
io->flags |= VDS_IO_FINI;
vds_be_fini(port);
- vds_vio_lock(vio, flags);
vio_link_state_change(vio, LDC_EVENT_RESET);
vio->desc_buf_len = 0;
dev_set_drvdata(&vdev->dev, port);
err = sysfs_create_group(&vdev->dev.kobj, &vds_attribute_group);
- if (err)
- goto free_path;
+ if (!err) {
+ vio_port_up(vio);
+ return 0;
+ }
- vio_port_up(vio);
-
- return 0;
+ destroy_workqueue(port->ioq);
free_path:
kfree(port->path);
return err;
}
+static void vds_port_fini(struct vds_port *port)
+{
+ struct vio_driver_state *vio = &port->vio;
+ struct list_head *pos, *tmp;
+ struct vds_io *io;
+
+ BUG_ON(ldc_state(vio->lp) == LDC_STATE_CONNECTED);
+
+ /*
+ * Shutdown the backend and then issue a final flush request
+ * to make sure all outstanding IO is complete.
+ *
+ * The LDC was disconnected before this call so there will
+ * be no further incoming requests. There may still be
+ * requests in progress which may generate some resets since
+ * responding to the client will fail. The reset will be ignored
+ * however since the port is in the FINI state so by the time
+ * the flush is processed there should be no further requests
+ * on the IO queue. Strictly speaking, we should assert that
+ * absolutely no requests are on the queue but ignoring a potential
+ * reset at this point should be harmless.
+ */
+ io = vds_io_alloc(vio, NULL);
+ if (io) {
+ (void) vd_op_flush(io);
+ vds_io_done(io);
+ list_for_each_safe(pos, tmp, &port->io_list) {
+ io = list_entry(pos, struct vds_io, list);
+ BUG_ON(!(io->flags & VDS_IO_FINI));
+ vds_io_done(io);
+ }
+ }
+ vds_be_fini(port);
+}
+
static int vds_port_remove(struct vio_dev *vdev)
{
struct vds_port *port = dev_get_drvdata(&vdev->dev);
struct vio_driver_state *vio = &port->vio;
+ unsigned long flags;
if (!port)
return 0;
+ /*
+ * Disconnect the LDC so no further requests are received
+ * and then shutdown the port.
+ */
+ vds_vio_lock(vio, flags);
del_timer_sync(&vio->timer);
ldc_disconnect(vio->lp); /* XXX vds_port_down() */
+ port->flags |= VDS_PORT_FINI;
+ vds_vio_unlock(vio, flags);
+
+ vds_port_fini(port);
+ destroy_workqueue(port->ioq);
+
+ /*
+ * At this point there should be no other outstanding IO
+ * and all client responses have been completed, either
+ * successfully or failed with a reset due to the LDC being
+ * disconnected, so that vdc_ldc_send() will never be called
+ * again and the LDC can be freed. Without this guaranteed
+ * vio_ldc_send() can panic since it unconditionally dereference
+ * vio->lp.
+ */
vio_ldc_free(vio);
sysfs_remove_group(&vdev->dev.kobj, &vds_attribute_group);
dev_set_drvdata(&vdev->dev, NULL);
struct dk_label *label;
struct vio_disk_vtoc *vtoc = port->vtoc;
+ if (!vtoc) {
+ vdsmsg(err, "NULL vtoc pointer\n");
+ return -EINVAL;
+ }
+
rv = vds_vtoc_get_label(port, &label);
if (!label)
return rv;