#define HV_PCI_MAP_ATTR_READ           0x01
 #define HV_PCI_MAP_ATTR_WRITE          0x02
+#define HV_PCI_MAP_ATTR_RELAXED_ORDER  0x04
 
 #define HV_PCI_DEVICE_BUILD(b,d,f)     \
        ((((b) & 0xff) << 16) | \
 
        u64 *pglist = p->pglist;
        unsigned long npages = p->npages;
 
+       /* VPCI maj=1, min=[0,1] only supports read and write */
+       if (vpci_major < 2)
+               prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
+
        while (npages != 0) {
                long num;
 
                                   unsigned long attrs)
 {
        unsigned long flags, order, first_page, npages, n;
+       unsigned long prot = 0;
        struct iommu *iommu;
        struct page *page;
        void *ret;
 
        npages = size >> IO_PAGE_SHIFT;
 
+       if (attrs & DMA_ATTR_WEAK_ORDERING)
+               prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
+
        nid = dev->archdata.numa_node;
        page = alloc_pages_node(nid, gfp, order);
        if (unlikely(!page))
        local_irq_save(flags);
 
        iommu_batch_start(dev,
-                         (HV_PCI_MAP_ATTR_READ |
+                         (HV_PCI_MAP_ATTR_READ | prot |
                           HV_PCI_MAP_ATTR_WRITE),
                          entry);
 
        if (direction != DMA_TO_DEVICE)
                prot |= HV_PCI_MAP_ATTR_WRITE;
 
+       if (attrs & DMA_ATTR_WEAK_ORDERING)
+               prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
+
        local_irq_save(flags);
 
        iommu_batch_start(dev, prot, entry);
        if (direction != DMA_TO_DEVICE)
                prot |= HV_PCI_MAP_ATTR_WRITE;
 
+       if (attrs & DMA_ATTR_WEAK_ORDERING)
+               prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
+
        outs = s = segstart = &sglist[0];
        outcount = 1;
        incount = nelems;