#include <linux/backing-dev.h>
#include <linux/compat.h>
#include <linux/log2.h>
+#include <linux/bug.h>
#include <asm/uaccess.h>
#include <linux/spinlock.h>
#include "linux/oracleasm/module_version.h"
-#include "masklog.h"
#include "transaction_file.h"
#include "request.h"
#include "integrity.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
#if PAGE_CACHE_SIZE % 1024
#error Oh no, PAGE_CACHE_SIZE is not divisible by 1k! I cannot cope.
#endif /* PAGE_CACHE_SIZE % 1024 */
static struct kmem_cache *asm_request_cachep;
static struct kmem_cache *asmfs_inode_cachep;
static struct kmem_cache *asmdisk_cachep;
-static struct proc_dir_entry *asm_proc;
static bool use_logical_block_size = false;
module_param(use_logical_block_size, bool, 0644);
return file->f_path.dentry->d_inode;
}
-/*
- * asm disk info
- */
-struct asm_disk_info {
- struct asmfs_inode_info *d_inode;
- struct block_device *d_bdev; /* Block device we I/O to */
- int d_max_sectors; /* Maximum sectors per I/O */
- int d_live; /* Is the disk alive? */
- atomic_t d_ios; /* Count of in-flight I/Os */
- struct list_head d_open; /* List of assocated asm_disk_heads */
- struct inode vfs_inode;
-};
-
/* Argument to iget5_locked()/ilookup5() to map bdev to disk_inode */
struct asmdisk_find_inode_args {
unsigned long fa_handle;
if (!d)
return NULL;
- mlog(ML_DISK, "Allocated disk 0x%p\n", d);
+ trace_disk(d, "alloc");
return &d->vfs_inode;
}
{
struct asm_disk_info *d = ASMDISK_I(inode);
- mlog_bug_on_msg(atomic_read(&d->d_ios),
- "Disk 0x%p has outstanding I/Os\n", d);
-
- mlog_bug_on_msg(!list_empty(&d->d_open),
- "Disk 0x%p has openers\n", d);
-
- mlog(ML_DISK, "Destroying disk 0x%p\n", d);
+ BUG_ON(atomic_read(&d->d_ios));
+ BUG_ON(!list_empty(&d->d_open));
+ trace_disk(d, "destroy");
kmem_cache_free(asmdisk_cachep, d);
}
{
struct asm_disk_info *d = ASMDISK_I(inode);
- mlog_entry("(0x%p)\n", inode);
-
+ trace_disk(d, "evict");
clear_inode(inode);
- mlog_bug_on_msg(atomic_read(&d->d_ios),
- "Disk 0x%p has outstanding I/Os\n", d);
-
- mlog_bug_on_msg(!list_empty(&d->d_open),
- "Disk 0x%p has openers\n", d);
-
- mlog_bug_on_msg(d->d_live,
- "Disk 0x%p is live\n", d);
-
- mlog(ML_DISK, "Clearing disk 0x%p\n", d);
+ BUG_ON(atomic_read(&d->d_ios));
+ BUG_ON(!list_empty(&d->d_open));
+ BUG_ON(d->d_live);
if (d->d_bdev) {
- mlog(ML_DISK,
- "Releasing disk 0x%p (bdev 0x%p, dev %X)\n",
- d, d->d_bdev, d->d_bdev->bd_dev);
blkdev_put(d->d_bdev, FMODE_WRITE | FMODE_READ | FMODE_EXCL);
d->d_bdev = NULL;
}
-
- mlog_exit_void();
}
static int compute_max_sectors(struct block_device *bdev)
{
int max_pages, max_sectors, pow_two_sectors;
- char b[BDEVNAME_SIZE];
struct request_queue *q;
q = bdev_get_queue(bdev);
- mlog(ML_DISK, "Computing limits for block device \%s\":\n",
- bdevname(bdev, b));
- mlog(ML_DISK,
- "\tq->max_sectors = %u, q->max_segments = %u\n",
- queue_max_sectors(q), queue_max_segments(q));
max_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
- mlog(ML_DISK, "\tmax_pages = %d, BIO_MAX_PAGES = %d\n",
- max_pages, BIO_MAX_PAGES);
if (max_pages > BIO_MAX_PAGES)
max_pages = BIO_MAX_PAGES;
if (max_pages > queue_max_segments(q))
/* Why is fls() 1-based???? */
pow_two_sectors = 1 << (fls(max_sectors) - 1);
- mlog(ML_DISK,
- "\tresulting max_pages = %d, max_sectors = %d, "
- "pow_two_sectors = %d\n",
- max_pages, max_sectors, pow_two_sectors);
return pow_two_sectors;
}
struct inode *disk_inode;
struct asmdisk_find_inode_args args;
- mlog_entry("(0x%p, 0x%p)\n", file, bdev);
-
ret = blkdev_get(bdev, FMODE_WRITE | FMODE_READ | FMODE_EXCL, inode->i_sb);
if (ret)
goto out;
if (!h)
goto out_get;
- mlog(ML_DISK, "Looking up disk for bdev %p (dev %X)\n", bdev,
- bdev->bd_dev);
-
args.fa_handle = (unsigned long)bdev;
args.fa_inode = ASMFS_I(inode);
disk_inode = iget5_locked(asmdisk_mnt->mnt_sb,
bdi->ra_pages = 0; /* No readahead */
bdi->capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK;
- mlog_bug_on_msg(atomic_read(&d->d_ios) != 0,
- "Supposedly new disk 0x%p (dev %X) has outstanding I/O\n",
- d, bdev->bd_dev);
- mlog_bug_on_msg(d->d_live,
- "Supposedly new disk 0x%p (dev %X) is live\n",
- d, bdev->bd_dev);
-
- mlog_bug_on_msg(d->d_bdev != bdev,
- "New disk 0x%p has set bdev 0x%p but we were opening 0x%p\n",
- d, d->d_bdev, bdev);
+ BUG_ON(atomic_read(&d->d_ios) != 0);
+ BUG_ON(d->d_live);
+ BUG_ON(d->d_bdev != bdev);
d->d_max_sectors = compute_max_sectors(bdev);
d->d_live = 1;
- mlog(ML_DISK,
- "First open of disk 0x%p (bdev 0x%p, dev %X)\n",
- d, d->d_bdev, d->d_bdev->bd_dev);
unlock_new_inode(disk_inode);
+
+ trace_disk(d, "open");
} else {
/* Already claimed on first open */
- mlog(ML_DISK,
- "Open of disk 0x%p (bdev 0x%p, dev %X)\n",
- d, d->d_bdev, d->d_bdev->bd_dev);
blkdev_put(bdev, FMODE_WRITE | FMODE_READ | FMODE_EXCL);
+ trace_disk(d, "reopen");
}
h->h_disk = d;
list_add(&h->h_dlist, &d->d_open);
spin_unlock_irq(&ASMFS_I(inode)->i_lock);
- mlog_exit(0);
return 0;
out_head:
blkdev_put(bdev, FMODE_WRITE | FMODE_READ | FMODE_EXCL);
out:
- mlog_exit(ret);
return ret;
}
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- mlog_entry("(0x%p, %lu)\n", file, handle);
-
- mlog_bug_on_msg(!ASMFS_FILE(file) || !ASMFS_I(inode),
- "Garbage arguments\n");
+ BUG_ON(!ASMFS_FILE(file) || !ASMFS_I(inode));
args.fa_handle = handle;
args.fa_inode = ASMFS_I(inode);
disk_inode = ilookup5(asmdisk_mnt->mnt_sb, handle,
asmdisk_test, &args);
- if (!disk_inode) {
- mlog_exit(-EINVAL);
+ if (!disk_inode)
return -EINVAL;
- }
d = ASMDISK_I(disk_inode);
bdev = d->d_bdev;
- mlog(ML_DISK, "Closing disk 0x%p (bdev 0x%p, dev %X)\n",
- d, d->d_bdev, d->d_bdev->bd_dev);
+ trace_disk(d, "close");
/*
* If an additional thread raced us to close the disk, it
if (!h) {
spin_unlock_irq(&ASMFS_FILE(file)->f_lock);
iput(disk_inode);
- mlog_exit(-EINVAL);
return -EINVAL;
}
list_del(&h->h_flist);
/* Last close */
if (list_empty(&d->d_open)) {
- mlog(ML_DISK,
- "Last close of disk 0x%p (bdev 0x%p, dev %X)\n",
- d, d->d_bdev, d->d_bdev->bd_dev);
-
- /* I/O path can't look up this disk anymore */
- mlog_bug_on_msg(!d->d_live,
- "Disk 0x%p (bdev 0x%p, dev %X) isn't live at last close\n",
- d, d->d_bdev, d->d_bdev->bd_dev);
+ trace_disk(d, "last");
+ BUG_ON(!d->d_live);
d->d_live = 0;
spin_unlock_irq(&ASMFS_I(inode)->i_lock);
/* Real put */
iput(disk_inode);
- mlog_exit(0);
return 0;
} /* asm_close_disk() */
u16 tmp_status;
unsigned long flags;
- mlog_entry("(0x%p)\n", r);
-
ioc = r->r_ioc;
- mlog(ML_IOC, "User IOC is 0x%p\n", ioc);
/* Need to get the current userspace bits because ASM_CANCELLED is currently set there */
- mlog(ML_IOC, "Getting tmp_status\n");
if (get_user(tmp_status, &(ioc->status_asm_ioc))) {
ret = -EFAULT;
goto out;
/* From here on, ONLY TRUST copy */
- mlog(ML_IOC, "Putting r_status (0x%08X)\n", copy.r_status);
if (put_user(copy.r_status, &(ioc->status_asm_ioc))) {
ret = -EFAULT;
goto out;
}
if (copy.r_status & ASM_ERROR) {
- mlog(ML_IOC, "Putting r_error (0x%08X)\n", copy.r_error);
if (put_user(copy.r_error, &(ioc->error_asm_ioc))) {
ret = -EFAULT;
goto out;
goto out;
}
}
- mlog(ML_IOC,
- "r_status:0x%08X, bitmask:0x%08X, combined:0x%08X\n",
- copy.r_status,
- (ASM_SUBMITTED | ASM_COMPLETED | ASM_ERROR),
- (copy.r_status & (ASM_SUBMITTED | ASM_COMPLETED | ASM_ERROR)));
if (copy.r_status & ASM_FREE) {
u64 z = 0ULL;
if (copy_to_user(&(ioc->reserved_asm_ioc),
ret = -EFAULT;
goto out;
}
- } else if (copy.r_status &
- (ASM_SUBMITTED | ASM_ERROR)) {
+ } else if (copy.r_status & (ASM_SUBMITTED | ASM_ERROR)) {
u64 key = (u64)(unsigned long)r;
- mlog(ML_IOC, "Putting key 0x%p on asm_ioc 0x%p\n",
- r, ioc);
/* Only on first submit */
if (copy_to_user(&(ioc->reserved_asm_ioc),
&key, sizeof(ioc->reserved_asm_ioc))) {
}
out:
- mlog_exit(ret);
+ trace_ioc(ioc, ret, "update");
return ret;
} /* asm_update_user_ioc() */
if (r)
r->r_status = ASM_SUBMITTED;
+ trace_req(r, 0, 0, "alloc");
+
return r;
} /* asm_request_alloc() */
static void asm_request_free(struct asm_request *r)
{
- /* FIXME: Clean up bh and buffer stuff */
+ trace_req(r, 0, 0, "free");
kmem_cache_free(asm_request_cachep, r);
} /* asm_request_free() */
struct asmfs_file_info *afi = r->r_file;
unsigned long flags;
- mlog_bug_on_msg(!afi, "Request 0x%p has no file pointer\n", r);
+ BUG_ON(!afi);
- mlog_entry("(0x%p)\n", r);
+ trace_req(r, 0, 0, "finish");
spin_lock_irqsave(&afi->f_lock, flags);
if (r->r_bio) {
- mlog(ML_REQUEST|ML_BIO,
- "Moving bio 0x%p from request 0x%p to the free list\n",
- r->r_bio, r);
+ trace_bio(r->r_bio, 0, "freelist");
r->r_bio->bi_private = afi->f_bio_free;
afi->f_bio_free = r->r_bio;
r->r_bio = NULL;
if (d) {
atomic_dec(&d->d_ios);
if (atomic_read(&d->d_ios) < 0) {
- mlog(ML_ERROR,
- "d_ios underflow on disk 0x%p (dev %X)\n",
- d, d->d_bdev->bd_dev);
+ pr_err("d_ios underflow on disk 0x%p (dev %X)\n",
+ d, d->d_bdev->bd_dev);
atomic_set(&d->d_ios, 0);
}
}
r->r_elapsed = ((jiffies - r->r_elapsed) * 1000000) / HZ;
- mlog(ML_REQUEST, "Finished request 0x%p\n", r);
-
wake_up(&afi->f_wait);
-
- mlog_exit_void();
} /* asm_finish_io() */
static void asm_end_ioc(struct asm_request *r, unsigned int bytes_done,
int error)
{
- mlog_entry("(0x%p, %u, %d)\n", r, bytes_done, error);
-
- mlog_bug_on_msg(!r, "No request\n");
-
- mlog_bug_on_msg(!(r->r_status & ASM_SUBMITTED),
- "Request 0x%p wasn't submitted\n", r);
+ BUG_ON(!r);
+ BUG_ON(!(r->r_status & ASM_SUBMITTED));
- mlog(ML_REQUEST,
- "Ending request 0x%p, bytes_done = %u, error = %d\n",
- r, bytes_done, error);
- mlog(ML_REQUEST|ML_BIO,
- "Ending request 0x%p, bio 0x%p, len = %u\n",
- r, r->r_bio,
- bytes_done + (r->r_bio ? r->r_bio->bi_iter.bi_size : 0));
+ trace_req(r, bytes_done, error, "end");
switch (error) {
default:
- mlog(ML_REQUEST|ML_ERROR,
- "Invalid error of %d on request 0x%p!\n",
- error, r);
+ pr_err("Invalid error of %d on request 0x%p!\n",
+ error, r);
r->r_error = ASM_ERR_INVAL;
r->r_status |= ASM_LOCAL_ERROR;
break;
}
asm_finish_io(r);
-
- mlog_exit_void();
} /* asm_end_ioc() */
{
struct asm_request *r;
- mlog_entry("(0x%p, %d)\n", bio, error);
-
- mlog(ML_BIO, "bio 0x%p, bi_size is %u\n", bio, bio->bi_iter.bi_size);
+ trace_bio(bio, error, "end_bio_io");
r = bio->bi_private;
- mlog(ML_REQUEST|ML_BIO,
- "Completed bio 0x%p for request 0x%p\n", bio, r);
if (atomic_dec_and_test(&r->r_bio_count)) {
asm_end_ioc(r, r->r_count - (r->r_bio ?
r->r_bio->bi_iter.bi_size : 0),
error);
}
-
- mlog_exit_void();
} /* asm_end_bio_io() */
static int asm_submit_io(struct file *file,
asm_ioc __user *user_iocp,
asm_ioc *ioc)
{
- int ret, rw = READ;
+ int ret = 0, rw = READ;
struct inode *inode = ASMFS_F2I(file);
struct asmdisk_find_inode_args args;
struct asm_request *r;
struct iov_iter iter;
struct iovec iov;
- mlog_entry("(0x%p, 0x%p, 0x%p)\n", file, user_iocp, ioc);
-
- if (!ioc) {
- mlog_exit(-EINVAL);
+ if (!ioc || ioc->status_asm_ioc)
return -EINVAL;
- }
-
- if (ioc->status_asm_ioc) {
- mlog_exit(-EINVAL);
- return -EINVAL;
- }
r = asm_request_alloc();
if (!r) {
- u16 status = ASM_FREE | ASM_ERROR | ASM_LOCAL_ERROR |
- ASM_BUSY;
- if (put_user(status, &(user_iocp->status_asm_ioc))) {
- mlog_exit(-EFAULT);
+ u16 status = ASM_FREE | ASM_ERROR | ASM_LOCAL_ERROR | ASM_BUSY;
+
+ if (put_user(status, &(user_iocp->status_asm_ioc)))
return -EFAULT;
- }
- if (put_user(ASM_ERR_NOMEM, &(user_iocp->error_asm_ioc))) {
- mlog_exit(-EFAULT);
+
+ if (put_user(ASM_ERR_NOMEM, &(user_iocp->error_asm_ioc)))
return -EFAULT;
- }
- mlog_exit(0);
return 0;
}
- mlog(ML_REQUEST,
- "New request at 0x%p alloc()ed for user ioc at 0x%p\n",
- r, user_iocp);
-
r->r_file = ASMFS_FILE(file);
r->r_ioc = user_iocp; /* Userspace asm_ioc */
+ trace_req(r, 0, 0, "submit");
spin_lock_irq(&ASMFS_FILE(file)->f_lock);
list_add(&r->r_list, &ASMFS_FILE(file)->f_ios);
spin_unlock_irq(&ASMFS_FILE(file)->f_lock);
- ret = -ENODEV;
args.fa_handle = (unsigned long)ioc->disk_asm_ioc &
~ASM_INTEGRITY_HANDLE_MASK;
args.fa_inode = ASMFS_I(inode);
disk_inode = ilookup5(asmdisk_mnt->mnt_sb,
(unsigned long)args.fa_handle,
asmdisk_test, &args);
- if (!disk_inode)
+ if (!disk_inode) {
+ ret = -ENODEV;
goto out_error;
+ }
spin_lock_irq(&ASMFS_I(inode)->i_lock);
/* It's in the middle of closing */
spin_unlock_irq(&ASMFS_I(inode)->i_lock);
iput(disk_inode);
+ ret = -ENODEV;
goto out_error;
}
r->r_count = ioc->rcount_asm_ioc * asm_block_size(bdev);
- /* linux only supports unsigned long size sector numbers */
- mlog(ML_IOC,
- "user_iocp 0x%p: first = 0x%llX, masked = 0x%08lX status = %u, buffer_asm_ioc = 0x%08lX, count = %lu\n",
- user_iocp,
- (unsigned long long)ioc->first_asm_ioc,
- (unsigned long)ioc->first_asm_ioc,
- ioc->status_asm_ioc,
- (unsigned long)ioc->buffer_asm_ioc,
- (unsigned long)r->r_count);
- /* Note that priority is ignored for now */
- ret = -EINVAL;
if (!ioc->buffer_asm_ioc ||
(ioc->buffer_asm_ioc != (unsigned long)ioc->buffer_asm_ioc) ||
(ioc->first_asm_ioc != (unsigned long)ioc->first_asm_ioc) ||
(ioc->rcount_asm_ioc != (unsigned long)ioc->rcount_asm_ioc) ||
(ioc->priority_asm_ioc > 7) ||
(r->r_count > (queue_max_sectors(bdev_get_queue(bdev)) << 9)) ||
- (r->r_count < 0))
+ (r->r_count < 0)) {
+ ret = -EINVAL;
goto out_error;
-
- /* Test device size, when known. (massaged from ll_rw_blk.c) */
- if (bdev->bd_inode->i_size >> 9) {
- sector_t maxsector = bdev->bd_inode->i_size >> 9;
- sector_t sector = (sector_t)ioc->first_asm_ioc;
- sector_t blks = (sector_t)ioc->rcount_asm_ioc;
-
- if (maxsector < blks || maxsector - blks < sector) {
- char b[BDEVNAME_SIZE];
- mlog(ML_NOTICE|ML_IOC,
- "Attempt to access beyond end of device\n");
- mlog(ML_NOTICE|ML_IOC,
- "dev %s: want=%llu, limit=%llu\n",
- bdevname(bdev, b),
- (unsigned long long)(sector + blks),
- (unsigned long long)maxsector);
- goto out_error;
- }
}
-
- mlog(ML_REQUEST|ML_IOC,
- "Request 0x%p (user_ioc 0x%p) passed validation checks\n",
- r, user_iocp);
-
if (bdev_get_integrity(bdev))
it = (struct oracleasm_integrity_v2 *)ioc->check_asm_ioc;
else
case ASM_READ:
rw = READ;
- if (it && asm_integrity_check(it, bdev) < 0)
+ if (it && asm_integrity_check(it, bdev) < 0) {
+ ret = -ENOMEM;
goto out_error;
+ }
break;
case ASM_WRITE:
rw = WRITE;
- if (it && asm_integrity_check(it, bdev) < 0)
+ if (it && asm_integrity_check(it, bdev) < 0) {
+ ret = -ENOMEM;
goto out_error;
+ }
break;
case ASM_NOOP:
/* Trigger an errorless completion */
r->r_count = 0;
- break;
+ goto out_error;
}
- /* Not really an error, but hey, it's an end_io call */
- ret = 0;
- if (r->r_count == 0)
- goto out_error;
-
- ret = -ENOMEM;
-
iov.iov_base = (void __user *)ioc->buffer_asm_ioc;
iov.iov_len = r->r_count;
iov_iter_init(&iter, rw, &iov, 1, r->r_count);
if (IS_ERR(r->r_bio)) {
ret = PTR_ERR(r->r_bio);
r->r_bio = NULL;
+ ret = -ENOMEM;
goto out_error;
}
r->r_bio->bi_bdev = bdev;
if (r->r_bio->bi_iter.bi_size != r->r_count) {
- mlog(ML_ERROR|ML_BIO, "Only mapped partial ioc buffer\n");
+ pr_err("%s: Only mapped partial ioc buffer\n", __func__);
bio_unmap_user(r->r_bio);
r->r_bio = NULL;
ret = -ENOMEM;
goto out_error;
}
- mlog(ML_BIO, "Mapped bio 0x%p to request 0x%p\n", r->r_bio, r);
-
/* Block layer always uses 512-byte sector addressing,
* regardless of logical and physical block size.
*/
ret = asm_integrity_map(it, r, rw == READ);
if (ret < 0) {
- mlog(ML_ERROR|ML_BIO,
- "Could not attach integrity payload\n");
+ pr_err("%s: Could not attach integrity payload\n",
+ __func__);
bio_unmap_user(r->r_bio);
+ ret = -ENOMEM;
goto out_error;
}
}
atomic_set(&r->r_bio_count, 1);
- mlog(ML_REQUEST|ML_BIO,
- "Submitting bio 0x%p for request 0x%p\n", r->r_bio, r);
submit_bio(rw, r->r_bio);
-out:
- ret = asm_update_user_ioc(file, r);
+out_error:
+ if (ret)
+ asm_end_ioc(r, 0, ret);
+ else
+ ret = asm_update_user_ioc(file, r);
- mlog_exit(ret);
- return ret;
+ trace_ioc(ioc, ret, "submit");
-out_error:
- mlog(ML_REQUEST, "Submit-side error %d for request 0x%p\n",
- ret, r);
- asm_end_ioc(r, 0, ret);
- goto out;
+ return ret;
} /* asm_submit_io() */
DECLARE_WAITQUEUE(wait, tsk);
DECLARE_WAITQUEUE(to_wait, tsk);
- mlog_entry("(0x%p, 0x%p, 0x%p)\n", file, iocp, to);
+ trace_ioc(iocp, 0, "maybe_wait");
if (copy_from_user(&p, &(iocp->reserved_asm_ioc),
- sizeof(p))) {
- ret = -EFAULT;
- goto out;
- }
+ sizeof(p)))
+ return -EFAULT;
- mlog(ML_REQUEST|ML_IOC, "User asm_ioc 0x%p has key 0x%p\n",
- iocp, (struct asm_request *)(unsigned long)p);
r = (struct asm_request *)(unsigned long)p;
- if (!r) {
- ret = -EINVAL;
- goto out;
- }
+ if (!r)
+ return -EINVAL;
spin_lock_irq(&afi->f_lock);
/* Is it valid? It's surely ugly */
if (!r->r_file || (r->r_file != afi) ||
list_empty(&r->r_list) || !(r->r_status & ASM_SUBMITTED)) {
spin_unlock_irq(&afi->f_lock);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- mlog(ML_REQUEST|ML_IOC,
- "asm_request 0x%p is valid...we think\n", r);
- if (!(r->r_status & (ASM_COMPLETED |
- ASM_BUSY | ASM_ERROR))) {
+ if (!(r->r_status & (ASM_COMPLETED | ASM_BUSY | ASM_ERROR))) {
spin_unlock_irq(&afi->f_lock);
add_wait_queue(&afi->f_wait, &wait);
add_wait_queue(&to->wait, &to_wait);
break;
io_schedule();
if (signal_pending(tsk)) {
- mlog(ML_REQUEST,
- "Signal pending waiting for request 0x%p\n",
- r);
ret = -EINTR;
break;
}
remove_wait_queue(&afi->f_wait, &wait);
remove_wait_queue(&to->wait, &to_wait);
- if (ret)
- goto out;
+ return ret;
}
ret = 0;
* happens and we're safe.
*/
if (r->r_status & ASM_FREE)
- goto out; /* FIXME: Eek, holding lock */
- mlog_bug_on_msg(list_empty(&afi->f_complete),
- "Completion list is empty\n");
+ return 0;
+
+ BUG_ON(list_empty(&afi->f_complete)); /* Completion list is empty */
- mlog(ML_REQUEST|ML_IOC,
- "Removing request 0x%p for asm_ioc 0x%p\n", r, iocp);
+ trace_req(r, 0, 0, "delist");
list_del_init(&r->r_list);
r->r_file = NULL;
r->r_status |= ASM_FREE;
ret = asm_update_user_ioc(file, r);
- mlog(ML_REQUEST, "Freeing request 0x%p\n", r);
asm_request_free(r);
-out:
- mlog_exit(ret);
return ret;
} /* asm_maybe_wait_io() */
struct asm_request *r;
struct asmfs_file_info *afi = ASMFS_FILE(file);
- mlog_entry("(0x%p, 0x%p)\n", file, ioc);
-
spin_lock_irq(&afi->f_lock);
if (list_empty(&afi->f_complete)) {
spin_unlock_irq(&afi->f_lock);
*ioc = NULL;
- mlog_exit(0);
return 0;
}
spin_unlock_irq(&afi->f_lock);
*ioc = r->r_ioc;
+ trace_ioc(r->r_ioc, 0, "complete");
ret = asm_update_user_ioc(file, r);
asm_request_free(r);
- mlog_exit(ret);
return ret;
} /* asm_complete_io() */
DECLARE_WAITQUEUE(wait, tsk);
DECLARE_WAITQUEUE(to_wait, tsk);
- mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", file, io, to, status);
-
/* Early check - expensive stuff follows */
ret = -ETIMEDOUT;
if (to->timed_out)
remove_wait_queue(&to->wait, &to_wait);
out:
- mlog_exit(ret);
return ret;
} /* asm_wait_completion() */
asm_ioc *iocp;
asm_ioc tmp;
- mlog_entry("(0x%p, 0x%p)\n", file, io);
-
for (i = 0; i < io->io_reqlen; i++) {
ret = -EFAULT;
if (get_user(iocp,
if (copy_from_user(&tmp, iocp, sizeof(tmp)))
break;
- mlog(ML_IOC, "Submitting user asm_ioc 0x%p\n", iocp);
ret = asm_submit_io(file, iocp, &tmp);
if (ret)
break;
}
- mlog_exit(ret);
return ret;
} /* asm_submit_io_native() */
u32 i;
asm_ioc *iocp;
- mlog_entry("(0x%p, 0x%p, 0x%p)\n", file, io, to);
-
for (i = 0; i < io->io_waitlen; i++) {
if (get_user(iocp,
((asm_ioc **)((unsigned long)(io->io_waitreqs))) + i)) {
break;
}
- mlog_exit(ret);
return ret;
} /* asm_maybe_wait_io_native() */
u32 i;
asm_ioc *iocp;
- mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", file, io, to, status);
-
for (i = 0; i < io->io_complen; i++) {
ret = asm_complete_io(file, &iocp);
if (ret)
}
- mlog_exit(ret ? ret : i);
return (ret ? ret : i);
} /* asm_complete_ios_native() */
{
asm_ioc32 *ioc_32 = (asm_ioc32 *)ioc;
- mlog_entry("(0x%p)\n", ioc);
-
/*
* Promote the 32bit pointers at the end of the asm_ioc32
* into the asm_ioc64.
*
* Promotion must be done from the tail backwards.
*/
- mlog(ML_IOC, "Promoting (0x%X, 0x%X)\n",
- ioc_32->check_asm_ioc,
- ioc_32->buffer_asm_ioc);
ioc->check_asm_ioc = (u64)ioc_32->check_asm_ioc;
ioc->buffer_asm_ioc = (u64)ioc_32->buffer_asm_ioc;
- mlog(ML_IOC, "Promoted to (0x%"MLFu64", 0x%"MLFu64")\n",
- ioc->check_asm_ioc,
- ioc->buffer_asm_ioc);
-
- mlog_exit_void();
} /* asm_promote_64() */
asm_ioc32 *iocp;
asm_ioc tmp;
- mlog_entry("(0x%p, 0x%p)\n", file, io);
-
for (i = 0; i < io->io_reqlen; i++) {
ret = -EFAULT;
/*
asm_promote_64(&tmp);
- mlog(ML_IOC, "Submitting user asm_ioc 0x%p\n", iocp);
ret = asm_submit_io(file, (asm_ioc *)iocp, &tmp);
if (ret)
break;
}
- mlog_exit(ret);
return ret;
} /* asm_submit_io_thunk() */
u32 iocp_32;
asm_ioc *iocp;
- mlog_entry("(0x%p, 0x%p, 0x%p)\n", file, io, to);
-
for (i = 0; i < io->io_waitlen; i++) {
/*
* io->io_waitreqs is an asm_ioc32**, but the pointers
break;
}
- mlog_exit(ret);
return ret;
} /* asm_maybe_wait_io_thunk() */
u32 iocp_32;
asm_ioc *iocp;
- mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", file, io, to, status);
-
for (i = 0; i < io->io_complen; i++) {
ret = asm_complete_io(file, &iocp);
if (ret)
i--; /* Reset this completion */
}
- mlog_exit(ret ? ret : i);
return (ret ? ret : i);
} /* asm_complete_ios_thunk() */
u32 status = 0;
struct timeout to;
- mlog_entry("(0x%p, 0x%p, %d)\n", file, io, bpl);
-
init_timeout(&to);
if (io->io_timeout) {
struct timespec ts;
- mlog(ML_ABI, "Passed timeout 0x%"MLFu64"\n",
- io->io_timeout);
ret = -EFAULT;
if (asm_fill_timeout(&ts, (unsigned long)(io->io_timeout),
bpl))
ret = 0;
if (io->io_requests) {
- mlog(ML_ABI,
- "oracleasm_io_v2 has requests; reqlen %d\n",
- io->io_reqlen);
ret = -EINVAL;
if (bpl == ASM_BPL_32)
ret = asm_submit_io_32(file, io);
}
if (io->io_waitreqs) {
- mlog(ML_ABI, "oracleasm_io_v2 has waits; waitlen %d\n",
- io->io_waitlen);
ret = -EINVAL;
if (bpl == ASM_BPL_32)
ret = asm_maybe_wait_io_32(file, io, &to);
}
if (io->io_completions) {
- mlog(ML_ABI,
- "oracleasm_io_v2 has completes; complen %d\n",
- io->io_complen);
ret = -EINVAL;
if (bpl == ASM_BPL_32)
ret = asm_complete_ios_32(file, io, &to,
out:
if (put_user(status, (u32 *)(unsigned long)(io->io_statusp)))
ret = -EFAULT;
- mlog_exit(ret);
return ret;
} /* asm_do_io() */
struct asmfs_file_info *afi = ASMFS_FILE(file);
struct bio *bio;
- mlog_entry("(0x%p)\n", file);
-
spin_lock_irq(&afi->f_lock);
while (afi->f_bio_free) {
bio = afi->f_bio_free;
afi->f_bio_free = bio->bi_private;
spin_unlock_irq(&afi->f_lock);
- mlog(ML_BIO, "Unmapping bio 0x%p\n", bio);
+ trace_bio(bio, 0, "unmap");
asm_integrity_unmap(bio);
bio_unmap_user(bio);
spin_lock_irq(&afi->f_lock);
}
spin_unlock_irq(&afi->f_lock);
-
- mlog_exit_void();
}
static int asmfs_file_open(struct inode * inode, struct file * file)
{
struct asmfs_inode_info * aii;
- struct asmfs_file_info * afi;
-
- mlog_entry("(0x%p, 0x%p)\n", inode, file);
+ struct asmfs_file_info *afi;
- mlog_bug_on_msg(ASMFS_FILE(file),
- "Trying to reopen filp 0x%p\n", file);
+ BUG_ON(ASMFS_FILE(file));
- mlog(ML_ABI, "Opening filp 0x%p\n", file);
- afi = (struct asmfs_file_info *)kmalloc(sizeof(*afi),
- GFP_KERNEL);
- if (!afi) {
- mlog_exit(-ENOMEM);
+ afi = (struct asmfs_file_info *)kmalloc(sizeof(*afi), GFP_KERNEL);
+ if (!afi)
return -ENOMEM;
- }
afi->f_file = file;
afi->f_bio_free = NULL;
file->private_data = afi;
- mlog(ML_ABI, "Filp 0x%p has afi 0x%p\n", file, afi);
-
- mlog_exit(0);
return 0;
} /* asmfs_file_open() */
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- mlog_entry("(0x%p, 0x%p)\n", inode, file);
-
aii = ASMFS_I(ASMFS_F2I(file));
afi = ASMFS_FILE(file);
- mlog(ML_ABI, "Release for filp 0x%p (afi = 0x%p)\n", file, afi);
-
/*
* Shouldn't need the lock, no one else has a reference
* asm_close_disk will need to take it when completing I/O
iput(&d->vfs_inode);
}
- mlog(ML_ABI|ML_REQUEST,
- "There are still I/Os hanging off of afi 0x%p\n",
- afi);
io_schedule();
} while (1);
set_task_state(tsk, TASK_RUNNING);
/* And cleanup any pages from those I/Os */
asm_cleanup_bios(file);
- mlog(ML_ABI, "Done with afi 0x%p from filp 0x%p\n", afi, file);
file->private_data = NULL;
kfree(afi);
- mlog_exit(0);
return 0;
} /* asmfs_file_release() */
struct oracleasm_abi_info *abi_info;
int ret;
- mlog_entry("(0x%p, 0x%p, %u)\n", file, buf, (unsigned int)size);
-
- if (size != sizeof(struct oracleasm_abi_info)) {
- mlog_exit(-EINVAL);
+ if (size != sizeof(struct oracleasm_abi_info))
return -EINVAL;
- }
abi_info = (struct oracleasm_abi_info *)buf;
if (!abi_info->ai_status)
abi_info->ai_status = ret;
- mlog_exit(size);
return size;
}
struct asmfs_sb_info *asb = ASMFS_SB(ASMFS_F2I(file)->i_sb);
int ret;
- mlog_entry("(0x%p, 0x%p, %u)\n", file, buf, (unsigned int)size);
-
- if (size != sizeof(struct oracleasm_get_iid_v2)) {
- mlog_exit(-EINVAL);
+ if (size != sizeof(struct oracleasm_get_iid_v2))
return -EINVAL;
- }
iid_info = (struct oracleasm_get_iid_v2 *)buf;
out:
iid_info->gi_abi.ai_status = ret;
- mlog_exit(size);
return size;
}
struct asmfs_sb_info *asb = ASMFS_SB(ASMFS_F2I(file)->i_sb);
int ret;
- mlog_entry("(0x%p, 0x%p, %u)\n", file, buf, (unsigned int)size);
-
- if (size != sizeof(struct oracleasm_get_iid_v2)) {
- mlog_exit(-EINVAL);
+ if (size != sizeof(struct oracleasm_get_iid_v2))
return -EINVAL;
- }
iid_info = (struct oracleasm_get_iid_v2 *)buf;
out:
iid_info->gi_abi.ai_status = ret;
- mlog_exit(size);
return size;
}
unsigned int lsecsz = 0;
int ret;
- mlog_entry("(0x%p, 0x%p, %u)\n", file, buf, (unsigned int)size);
-
- if (size != sizeof(struct oracleasm_query_disk_v2)) {
- mlog_exit(-EINVAL);
+ if (size != sizeof(struct oracleasm_query_disk_v2))
return -EINVAL;
- }
qd_info = (struct oracleasm_query_disk_v2 *)buf;
& ASM_LSECSZ_MASK;
}
- mlog(ML_ABI|ML_DISK,
- "Querydisk returning qd_max_sectors = %u and "
- "qd_hardsect_size = %u, lsecsz = %u, qd_integrity = %u\n",
- qd_info->qd_max_sectors, lsecsz, qd_info->qd_hardsect_size,
- asm_integrity_format(bdev));
+ trace_querydisk(bdev, qd_info);
ret = 0;
out:
qd_info->qd_abi.ai_status = ret;
- mlog_exit(size);
return size;
}
struct file *filp;
int ret;
- mlog_entry("(0x%p, 0x%p, %u)\n", file, buf, (unsigned int)size);
-
- if (size != sizeof(struct oracleasm_open_disk_v2)) {
- mlog_exit(-EINVAL);
+ if (size != sizeof(struct oracleasm_open_disk_v2))
return -EINVAL;
- }
if (copy_from_user(&od_info,
(struct oracleasm_open_disk_v2 __user *)buf,
- sizeof(struct oracleasm_open_disk_v2))) {
- mlog_exit(-EFAULT);
+ sizeof(struct oracleasm_open_disk_v2)))
return -EFAULT;
- }
od_info.od_handle = 0; /* Unopened */
asm_close_disk(file,
(unsigned long)od_info.od_handle);
/* Ignore close errors, this is the real error */
- mlog_exit(-EFAULT);
return -EFAULT;
}
- mlog_exit(size);
return size;
}
struct oracleasm_close_disk_v2 cd_info;
int ret;
- mlog_entry("(0x%p, 0x%p, %u)\n", file, buf, (unsigned int)size);
-
- if (size != sizeof(struct oracleasm_close_disk_v2)) {
- mlog_exit(-EINVAL);
+ if (size != sizeof(struct oracleasm_close_disk_v2))
return -EINVAL;
- }
if (copy_from_user(&cd_info,
(struct oracleasm_close_disk_v2 __user *)buf,
- sizeof(struct oracleasm_close_disk_v2))) {
- mlog_exit(-EFAULT);
+ sizeof(struct oracleasm_close_disk_v2)))
return -EFAULT;
- }
ret = asmfs_verify_abi(&cd_info.cd_abi);
if (ret)
cd_info.cd_abi.ai_status = ret;
if (copy_to_user((struct oracleasm_close_disk_v2 __user *)buf,
&cd_info,
- sizeof(struct oracleasm_close_disk_v2))) {
- mlog_exit(-EFAULT);
+ sizeof(struct oracleasm_close_disk_v2)))
return -EFAULT;
- }
- mlog_exit(size);
return size;
}
struct oracleasm_io_v2 io_info;
int ret;
- mlog_entry("(0x%p, 0x%p, %u)\n", file, buf, (unsigned int)size);
-
- if (size != sizeof(struct oracleasm_io_v2)) {
- mlog_exit(-EINVAL);
+ if (size != sizeof(struct oracleasm_io_v2))
return -EINVAL;
- }
if (copy_from_user(&io_info,
(struct oracleasm_io_v2 __user *)buf,
- sizeof(struct oracleasm_io_v2))) {
- mlog_exit(-EFAULT);
+ sizeof(struct oracleasm_io_v2)))
return -EFAULT;
- }
ret = asmfs_verify_abi(&io_info.io_abi);
if (ret)
out_error:
user_abi_info = (struct oracleasm_abi_info __user *)buf;
- if (put_user(ret, &(user_abi_info->ai_status))) {
- mlog_exit(-EFAULT);
+ if (put_user(ret, &(user_abi_info->ai_status)))
return -EFAULT;
- }
- mlog_exit(size);
return size;
}
struct oracleasm_io_v2 io_info;
int ret;
- mlog_entry("(0x%p, 0x%p, %u)\n", file, buf, (unsigned int)size);
-
- if (size != sizeof(struct oracleasm_io_v2)) {
- mlog_exit(-EINVAL);
+ if (size != sizeof(struct oracleasm_io_v2))
return -EINVAL;
- }
if (copy_from_user(&io_info,
(struct oracleasm_io_v2 __user *)buf,
- sizeof(struct oracleasm_io_v2))) {
- mlog_exit(-EFAULT);
+ sizeof(struct oracleasm_io_v2)))
return -EFAULT;
- }
ret = asmfs_verify_abi(&io_info.io_abi);
if (ret)
out_error:
user_abi_info = (struct oracleasm_abi_info __user *)buf;
- if (put_user(ret, &(user_abi_info->ai_status))) {
- mlog_exit(-EFAULT);
+ if (put_user(ret, &(user_abi_info->ai_status)))
return -EFAULT;
- }
- mlog_exit(size);
return size;
}
#endif /* BITS_PER_LONG == 64 */
asm_cleanup_bios(file);
user_abi_info = (struct oracleasm_abi_info __user *)buf;
- if (get_user(op, &((user_abi_info)->ai_type))) {
- mlog_exit(-EFAULT);
+ if (get_user(op, &((user_abi_info)->ai_type)))
return -EFAULT;
- }
switch (op) {
default:
goto out_diskcache;
}
- asm_proc = proc_mkdir(ASM_PROC_PATH, NULL);
- if (asm_proc == NULL) {
- pr_err("oracleasmfs: Unable to register proc directory\n");
- goto out_proc;
- }
-
- ret = mlog_init_proc(asm_proc);
- if (ret) {
- pr_err("oracleasmfs: Unable to register proc mlog\n");
- goto out_mlog;
- }
-
init_asmfs_dir_operations();
ret = register_filesystem(&asmfs_fs_type);
if (ret) {
return 0;
out_register:
- mlog_remove_proc(asm_proc);
-
-out_mlog:
- remove_proc_entry(ASM_PROC_PATH, NULL);
-
-out_proc:
destroy_asmdiskcache();
out_diskcache:
static void __exit exit_asmfs_fs(void)
{
unregister_filesystem(&asmfs_fs_type);
- mlog_remove_proc(asm_proc);
- remove_proc_entry(ASM_PROC_PATH, NULL);
destroy_asmdiskcache();
destroy_requestcache();
destroy_inodecache();
module_exit(exit_asmfs_fs)
MODULE_LICENSE("GPL");
MODULE_VERSION(ASM_MODULE_VERSION);
-MODULE_AUTHOR("Joel Becker <joel.becker@oracle.com>");
+MODULE_AUTHOR("Joel Becker, Martin K. Petersen <martin.petersen@oracle.com>");
MODULE_DESCRIPTION("Kernel driver backing the Generic Linux ASM Library.");
+++ /dev/null
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2005 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#ifndef O2CLUSTER_MASKLOG_H
-#define O2CLUSTER_MASKLOG_H
-
-/*
- * For now this is a trivial wrapper around printk() that gives the critical
- * ability to enable sets of debugging output at run-time. In the future this
- * will almost certainly be redirected to relayfs so that it can pay a
- * substantially lower heisenberg tax.
- *
- * Callers associate the message with a bitmask and a global bitmask is
- * maintained with help from /proc. If any of the bits match the message is
- * output.
- *
- * We must have efficient bit tests on i386 and it seems gcc still emits crazy
- * code for the 64bit compare. It emits very good code for the dual unsigned
- * long tests, though, completely avoiding tests that can never pass if the
- * caller gives a constant bitmask that fills one of the longs with all 0s. So
- * the desire is to have almost all of the calls decided on by comparing just
- * one of the longs. This leads to having infrequently given bits that are
- * frequently matched in the high bits.
- *
- * _ERROR and _NOTICE are used for messages that always go to the console and
- * have appropriate KERN_ prefixes. We wrap these in our function instead of
- * just calling printk() so that this can eventually make its way through
- * relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
- * The inline tests and macro dance give GCC the opportunity to quite cleverly
- * only emit the appropriage printk() when the caller passes in a constant
- * mask, as is almost always the case.
- *
- * All this bitmask nonsense is hidden from the /proc interface so that Joel
- * doesn't have an aneurism. Reading the file gives a straight forward
- * indication of which bits are on or off:
- * ENTRY off
- * EXIT off
- * ERROR off
- * NOTICE on
- *
- * Writing changes the state of a given bit and requires a strictly formatted
- * single write() call:
- *
- * write(fd, "ENTRY on", 8);
- *
- * would turn the entry bit on. "1" is also accepted in the place of "on", and
- * "off" and "0" behave as expected.
- *
- * Some trivial shell can flip all the bits on or off:
- *
- * log_mask="/proc/fs/oracleasm/log_mask"
- * cat $log_mask | (
- * while read bit status; do
- * # $1 is "on" or "off", say
- * echo "$bit $1" > $log_mask
- * done
- * )
- */
-
-/* for task_struct */
-#include <linux/sched.h>
-
-/* bits that are frequently given and infrequently matched in the low word */
-/* NOTE: If you add a flag, you need to also update mlog.c! */
-#define ML_ENTRY 0x0000000000000001ULL /* func call entry */
-#define ML_EXIT 0x0000000000000002ULL /* func call exit */
-#define ML_DISK 0x0000000000000004ULL /* Disk information */
-#define ML_REQUEST 0x0000000000000010ULL /* I/O requests */
-#define ML_BIO 0x0000000000000020ULL /* bios backing I/O */
-#define ML_IOC 0x0000000000000040ULL /* asm_iocs */
-#define ML_ABI 0x0000000000000100ULL /* ABI entry points */
-/* bits that are infrequently given and frequently matched in the high word */
-#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
-#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
-
-#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
-#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
-#ifndef MLOG_MASK_PREFIX
-#define MLOG_MASK_PREFIX 0
-#endif
-
-#define MLOG_MAX_BITS 64
-
-struct mlog_bits {
- unsigned long words[MLOG_MAX_BITS / BITS_PER_LONG];
-};
-
-extern struct mlog_bits mlog_and_bits, mlog_not_bits;
-
-#if BITS_PER_LONG == 32
-
-#define __mlog_test_u64(mask, bits) \
- ( (u32)(mask & 0xffffffff) & bits.words[0] || \
- ((u64)(mask) >> 32) & bits.words[1] )
-#define __mlog_set_u64(mask, bits) do { \
- bits.words[0] |= (u32)(mask & 0xffffffff); \
- bits.words[1] |= (u64)(mask) >> 32; \
-} while (0)
-#define __mlog_clear_u64(mask, bits) do { \
- bits.words[0] &= ~((u32)(mask & 0xffffffff)); \
- bits.words[1] &= ~((u64)(mask) >> 32); \
-} while (0)
-#define MLOG_BITS_RHS(mask) { \
- { \
- [0] = (u32)(mask & 0xffffffff), \
- [1] = (u64)(mask) >> 32, \
- } \
-}
-
-#else /* 32bit long above, 64bit long below */
-
-#define __mlog_test_u64(mask, bits) ((mask) & bits.words[0])
-#define __mlog_set_u64(mask, bits) do { \
- bits.words[0] |= (mask); \
-} while (0)
-#define __mlog_clear_u64(mask, bits) do { \
- bits.words[0] &= ~(mask); \
-} while (0)
-#define MLOG_BITS_RHS(mask) { { (mask) } }
-
-#endif
-
-/*
- * smp_processor_id() "helpfully" screams when called outside preemptible
- * regions in current kernels. sles doesn't have the variants that don't
- * scream. just do this instead of trying to guess which we're building
- * against.. *sigh*.
- */
-#define __mlog_cpu_guess ({ \
- unsigned long _cpu = get_cpu(); \
- put_cpu(); \
- _cpu; \
-})
-
-/* In the following two macros, the whitespace after the ',' just
- * before ##args is intentional. Otherwise, gcc 2.95 will eat the
- * previous token if args expands to nothing.
- */
-#define __mlog_printk(level, fmt, args...) \
- printk(level "(%u,%lu):%s:%d " fmt, current->pid, \
- __mlog_cpu_guess, __PRETTY_FUNCTION__, __LINE__ , \
- ##args)
-
-#define mlog(mask, fmt, args...) do { \
- u64 __m = MLOG_MASK_PREFIX | (mask); \
- if (__mlog_test_u64(__m, mlog_and_bits) && \
- !__mlog_test_u64(__m, mlog_not_bits)) { \
- if (__m & ML_ERROR) \
- __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
- else if (__m & ML_NOTICE) \
- __mlog_printk(KERN_NOTICE, fmt , ##args); \
- else __mlog_printk(KERN_INFO, fmt , ##args); \
- } \
-} while (0)
-
-#define mlog_errno(st) do { \
- if ((st) != -ERESTARTSYS && (st) != -EINTR) \
- mlog(ML_ERROR, "status = %lld\n", (long long)(st)); \
-} while (0)
-
-#define mlog_entry(fmt, args...) do { \
- mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \
-} while (0)
-
-#define mlog_entry_void() do { \
- mlog(ML_ENTRY, "ENTRY:\n"); \
-} while (0)
-
-/* We disable this for old compilers since they don't have support for
- * __builtin_types_compatible_p.
- */
-#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) && \
- !defined(__CHECKER__)
-#define mlog_exit(st) do { \
- if (__builtin_types_compatible_p(typeof(st), unsigned long)) \
- mlog(ML_EXIT, "EXIT: %lu\n", (unsigned long) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), signed long)) \
- mlog(ML_EXIT, "EXIT: %ld\n", (signed long) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), unsigned int) \
- || __builtin_types_compatible_p(typeof(st), unsigned short) \
- || __builtin_types_compatible_p(typeof(st), unsigned char)) \
- mlog(ML_EXIT, "EXIT: %u\n", (unsigned int) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), signed int) \
- || __builtin_types_compatible_p(typeof(st), signed short) \
- || __builtin_types_compatible_p(typeof(st), signed char)) \
- mlog(ML_EXIT, "EXIT: %d\n", (signed int) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), long long)) \
- mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
- else \
- mlog(ML_EXIT, "EXIT: %llu\n", (unsigned long long) (st)); \
-} while (0)
-#else
-#define mlog_exit(st) do { \
- mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
-} while (0)
-#endif
-
-#define mlog_exit_ptr(ptr) do { \
- mlog(ML_EXIT, "EXIT: %p\n", ptr); \
-} while (0)
-
-#define mlog_exit_void() do { \
- mlog(ML_EXIT, "EXIT\n"); \
-} while (0)
-
-#define mlog_bug_on_msg(cond, fmt, args...) do { \
- if (cond) { \
- mlog(ML_ERROR, "bug expression: " #cond "\n"); \
- mlog(ML_ERROR, fmt, ##args); \
- BUG(); \
- } \
-} while (0)
-
-#if (BITS_PER_LONG == 32) || defined(CONFIG_X86_64)
-#define MLFi64 "lld"
-#define MLFu64 "llu"
-#define MLFx64 "llx"
-#else
-#define MLFi64 "ld"
-#define MLFu64 "lu"
-#define MLFx64 "lx"
-#endif
-
-#include <linux/proc_fs.h>
-int mlog_init_proc(struct proc_dir_entry *parent);
-void mlog_remove_proc(struct proc_dir_entry *parent);
-
-#endif /* O2CLUSTER_MASKLOG_H */
--- /dev/null
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM oracleasm
+
+#if !defined(_TRACE_ORACLEASM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ORACLEASM_H
+
+#include <linux/tracepoint.h>
+
+struct asm_disk_info;
+struct asm_ioc64;
+
+#define show_status(flags) \
+ __print_flags(flags, "|", \
+ { ASM_BUSY , "BUSY" }, \
+ { ASM_SUBMITTED , "SUBM" }, \
+ { ASM_COMPLETED , "COMP" }, \
+ { ASM_FREE , "FREE" }, \
+ { ASM_CANCELLED , "CANCEL" }, \
+ { ASM_ERROR , "ERR" }, \
+ { ASM_WARN , "WARN" }, \
+ { ASM_PARTIAL , "PARTIAL" }, \
+ { ASM_BADKEY , "BADKEY" }, \
+ { ASM_BAD_DATA , "DATA" }, \
+ { ASM_LOCAL_ERROR , "LOCAL" })
+
+#define show_op(flags) \
+ __print_flags(flags, "|", \
+ { ASM_READ , "READ" }, \
+ { ASM_WRITE , "WRITE" })
+
+#define show_iflags(flags) \
+ __print_flags(flags, "|", \
+ { ASM_IFLAG_REMAPPED , "REMAP" }, \
+ { ASM_IFLAG_IP_CHECKSUM , "IP" }, \
+ { ASM_IFLAG_CTRL_NOCHECK, "!CTRL" }, \
+ { ASM_IFLAG_DISK_NOCHECK, "!DISK" })
+
+#define show_ifmt(flags) \
+ __print_flags(flags, "|", \
+ { ASM_IMODE_512_512 , "512N" }, \
+ { ASM_IMODE_512_4K , "512E" }, \
+ { ASM_IMODE_4K_4K , "4KN" }, \
+ { ASM_IFMT_IP_CHECKSUM , "IP" }, \
+ { ASM_IFMT_DISK , "DISK" }, \
+ { ASM_IFMT_ATO , "ATO" })
+
+TRACE_EVENT(disk,
+
+ TP_PROTO(struct asm_disk_info *d, char *action),
+
+ TP_ARGS(d, action),
+
+ TP_STRUCT__entry(
+ __string(action , action )
+ __field(void * , disk )
+ __field(dev_t , dev )
+ ),
+
+ TP_fast_assign(
+ __assign_str(action, action);
+ __entry->disk = d;
+ __entry->dev = d->d_bdev ? d->d_bdev->bd_dev : 0;
+ ),
+
+ TP_printk("%-9s dsk=%p dev=%u:%u", __get_str(action), __entry->disk,
+ MAJOR(__entry->dev), MINOR(__entry->dev))
+);
+
+TRACE_EVENT(req,
+
+ TP_PROTO(struct asm_request *r, unsigned int done, int error, char *action),
+
+ TP_ARGS(r, done, error, action),
+
+ TP_STRUCT__entry(
+ __string(action , action )
+ __field(void * , req )
+ __field(dev_t , dev )
+ __field(void * , ioc )
+ __field(unsigned int , bytes )
+ __field(unsigned int , done )
+ __field(int , error )
+ ),
+
+ TP_fast_assign(
+ __assign_str(action, action);
+ __entry->req = r;
+ __entry->dev = r->r_disk ? r->r_disk->d_bdev->bd_dev : 0;
+ __entry->ioc = r->r_ioc;
+ __entry->bytes = r->r_count;
+ __entry->done = done;
+ __entry->error = error;
+ ),
+
+ TP_printk("%-10s req=%p dev=%u:%u ioc=%p bytes=%u done=%u error=%d",
+ __get_str(action), __entry->req, MAJOR(__entry->dev),
+ MINOR(__entry->dev), __entry->ioc, __entry->bytes,
+ __entry->done, __entry->error)
+);
+
+TRACE_EVENT(bio,
+
+ TP_PROTO(struct bio *bio, int error, char *action),
+
+ TP_ARGS(bio, error, action),
+
+ TP_STRUCT__entry(
+ __string(action , action )
+ __field(void * , bio )
+ __field(dev_t , dev )
+ __field(void * , req )
+ __field(int , error )
+ ),
+
+ TP_fast_assign(
+ __assign_str(action, action);
+ __entry->bio = bio;
+ __entry->dev = bio->bi_bdev ? bio->bi_bdev->bd_dev : 0;
+ __entry->req = bio->bi_private;
+ __entry->error = error;
+ ),
+
+ TP_printk("%-10s bio=%p dev=%u:%u req=%p error=%d",
+ __get_str(action), __entry->bio, MAJOR(__entry->dev),
+ MINOR(__entry->dev), __entry->req, __entry->error)
+);
+
+TRACE_EVENT(ioc,
+
+ TP_PROTO(struct _asm_ioc64 *ioc, int ret, char *action),
+
+ TP_ARGS(ioc, ret, action),
+
+ TP_STRUCT__entry(
+ __string(action , action )
+ __field(void * , ioc )
+ __field(u8 , op )
+ __field(sector_t , block )
+ __field(unsigned int , count )
+ __field(u16 , status )
+ __field(s32 , error )
+ __field(s32 , warn )
+ __field(bool , integrity )
+ __field(int , ret )
+ ),
+
+ TP_fast_assign(
+ __assign_str(action, action);
+ __entry->ioc = ioc;
+ __entry->op = ioc->operation_asm_ioc;
+ __entry->block = ioc->first_asm_ioc;
+ __entry->count = ioc->rcount_asm_ioc;
+ __entry->status = ioc->status_asm_ioc;
+ __entry->error = ioc->error_asm_ioc;
+ __entry->warn = ioc->warn_asm_ioc ? true : false;
+ __entry->integrity = ioc->check_asm_ioc;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%-10s ioc=%p op=%s block=%llu bytes=%u status=%s "
+ "error=%d warn=%d integrity=%u ret=%d",
+ __get_str(action), __entry->ioc, show_op(__entry->op),
+ (unsigned long long)__entry->block, __entry->count,
+ show_status(__entry->status), __entry->error, __entry->warn,
+ __entry->integrity, __entry->ret)
+);
+
+TRACE_EVENT(querydisk,
+
+ TP_PROTO(struct block_device *bdev, struct oracleasm_query_disk_v2 *qd),
+
+ TP_ARGS(bdev, qd),
+
+ TP_STRUCT__entry(
+ __field(void * , bdev )
+ __field(void * , qd )
+ __field(dev_t , dev )
+ __field(sector_t , max )
+ __field(unsigned int , pbs )
+ __field(unsigned int , lbs )
+ __field(unsigned char , integrity )
+ ),
+
+ TP_fast_assign(
+ __entry->bdev = bdev;
+ __entry->qd = qd;
+ __entry->dev = bdev->bd_dev ? bdev->bd_dev : 0;
+ __entry->max = qd->qd_max_sectors;
+ __entry->pbs = qd->qd_hardsect_size;
+ __entry->lbs = 1 << (qd->qd_feature >> ASM_LSECSZ_SHIFT);
+ __entry->integrity = qd->qd_feature & ASM_INTEGRITY_QDF_MASK;
+ ),
+
+ TP_printk(" dev=%u:%u max_blocks=%llu pbs=%u lbs=%u integrity=%s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->max, __entry->pbs, __entry->lbs,
+ show_ifmt(__entry->integrity))
+);
+
+TRACE_EVENT(integrity,
+
+ TP_PROTO(struct oracleasm_integrity_v2 *it,
+ struct asm_request *r,
+ unsigned int nr_pages),
+
+ TP_ARGS(it, r, nr_pages),
+
+ TP_STRUCT__entry(
+ __field(void * , ioc )
+ __field(unsigned int , bytes )
+ __field(unsigned int , pages )
+ __field(unsigned int , format )
+ __field(unsigned int , flags )
+ ),
+
+ TP_fast_assign(
+ __entry->ioc = r->r_ioc;
+ __entry->bytes = it->it_bytes;
+ __entry->pages = nr_pages;
+ __entry->format = it->it_format;
+ __entry->flags = it->it_flags;
+ ),
+
+ TP_printk(" ioc=%p prot_bytes=%u nr_pages=%u format=%s flags=%s",
+ __entry->ioc, __entry->bytes, __entry->pages,
+ show_ifmt(__entry->format), show_iflags(__entry->flags))
+);
+
+#endif /* _TRACE_ORACLEASM_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>