its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
 }
 
+static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
+{
+       its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
+}
+
+static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
+{
+       its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
+}
+
+static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
+{
+       its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
+}
+
+static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
+                                       u32 vpe_db_lpi)
+{
+       its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
+}
+
 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
 {
        /* Let's fixup BE commands */
                                           struct its_cmd_block *cmd,
                                           struct its_cmd_desc *desc)
 {
-       unsigned long vpt_addr;
+       unsigned long vpt_addr, vconf_addr;
        u64 target;
-
-       vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
-       target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+       bool alloc;
 
        its_encode_cmd(cmd, GITS_CMD_VMAPP);
        its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
        its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+
+       if (!desc->its_vmapp_cmd.valid) {
+               if (is_v4_1(its)) {
+                       alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
+                       its_encode_alloc(cmd, alloc);
+               }
+
+               goto out;
+       }
+
+       vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+       target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+
        its_encode_target(cmd, target);
        its_encode_vpt_addr(cmd, vpt_addr);
        its_encode_vpt_size(cmd, LPI_NRBITS - 1);
 
+       if (!is_v4_1(its))
+               goto out;
+
+       vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
+
+       alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+
+       its_encode_alloc(cmd, alloc);
+
+       /* We can only signal PTZ when alloc==1. Why do we have two bits? */
+       its_encode_ptz(cmd, alloc);
+       its_encode_vconf_addr(cmd, vconf_addr);
+       its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
+
+out:
        its_fixup_cmd(cmd);
 
        return valid_vpe(its, desc->its_vmapp_cmd.vpe);
 
        vpe->vpe_id = vpe_id;
        vpe->vpt_page = vpt_page;
-       vpe->vpe_proxy_event = -1;
+       if (gic_rdists->has_rvpeid)
+               atomic_set(&vpe->vmapp_count, 0);
+       else
+               vpe->vpe_proxy_event = -1;
 
        return 0;
 }
 
        irq_hw_number_t         vpe_db_lpi;
        /* VPE resident */
        bool                    resident;
-       /* VPE proxy mapping */
-       int                     vpe_proxy_event;
+       union {
+               /* GICv4.0 implementations */
+               struct {
+                       /* VPE proxy mapping */
+                       int     vpe_proxy_event;
+                       /* Implementation Defined Area Invalid */
+                       bool    idai;
+               };
+               /* GICv4.1 implementations */
+               struct {
+                       atomic_t vmapp_count;
+               };
+       };
+
        /*
         * This collection ID is used to indirect the target
         * redistributor for this VPE. The ID itself isn't involved in
        u16                     col_idx;
        /* Unique (system-wide) VPE identifier */
        u16                     vpe_id;
-       /* Implementation Defined Area Invalid */
-       bool                    idai;
        /* Pending VLPIs on schedule out? */
        bool                    pending_last;
 };