* Return: %0 on success, negative error code otherwise
*/
static int pci_mmap_resource(struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma, int sparse)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
}
static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
}
static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
};
static int global_mmio_mmap(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
struct vm_area_struct *vma)
{
struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj));
static DEVICE_ATTR_RO(published);
static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, struct vm_area_struct *vma)
+ const struct bin_attribute *attr, struct vm_area_struct *vma)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
size_t len = vma->vm_end - vma->vm_start;
* memory space.
*/
static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
* memory space. Returns -ENOSYS if the operation isn't supported
*/
static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
*
* Use the regular PCI mapping routines to map a PCI resource into userspace.
*/
-static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
+static int pci_mmap_resource(struct kobject *kobj, const struct bin_attribute *attr,
struct vm_area_struct *vma, int write_combine)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
}
static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
static int
intel_pmt_mmap(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, struct vm_area_struct *vma)
+ const struct bin_attribute *attr, struct vm_area_struct *vma)
{
struct intel_pmt_entry *entry = container_of(attr,
struct intel_pmt_entry,
* The ring buffer is allocated as contiguous memory by vmbus_open
*/
static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct vmbus_channel *channel
char *, loff_t, size_t);
loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *,
loff_t, int);
- int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
+ int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *attr,
struct vm_area_struct *vma);
};