size_t i;
        unsigned long flags;
        struct arm_smmu_master *master;
-       struct arm_smmu_cmdq_batch cmds = {};
+       struct arm_smmu_cmdq_batch cmds;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_cmdq_ent cmd = {
                .opcode = CMDQ_OP_CFGI_CD,
                },
        };
 
+       cmds.num = 0;
+
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
        list_for_each_entry(master, &smmu_domain->devices, domain_head) {
                for (i = 0; i < master->num_streams; i++) {
        unsigned long flags;
        struct arm_smmu_cmdq_ent cmd;
        struct arm_smmu_master *master;
-       struct arm_smmu_cmdq_batch cmds = {};
+       struct arm_smmu_cmdq_batch cmds;
 
        if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
                return 0;
 
        arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
 
+       cmds.num = 0;
+
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
        list_for_each_entry(master, &smmu_domain->devices, domain_head) {
                if (!master->ats_enabled)
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        unsigned long end = iova + size, num_pages = 0, tg = 0;
        size_t inv_range = granule;
-       struct arm_smmu_cmdq_batch cmds = {};
+       struct arm_smmu_cmdq_batch cmds;
 
        if (!size)
                return;
                num_pages = size >> tg;
        }
 
+       cmds.num = 0;
+
        while (iova < end) {
                if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
                        /*