diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index e8d7dbe495f030..97e78a351cf35b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2775,14 +2775,15 @@ void arm_smmu_domain_inv_range(struct arm_smmu_domain *smmu_domain,
 	rcu_read_unlock();
 }
 
-static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
-					 unsigned long iova, size_t granule,
-					 void *cookie)
+static void arm_smmu_tlb_inv_range_nosync(struct iommu_iotlb_gather *gather,
+					  unsigned long iova, size_t size,
+					  size_t granule, void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
 	struct iommu_domain *domain = &smmu_domain->domain;
 
-	iommu_iotlb_gather_add_page(domain, gather, iova, granule);
+	iommu_iotlb_gather_add_range_pgsize(domain, gather, iova, size,
+					    granule);
 }
 
 static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
@@ -2796,7 +2797,7 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
 static const struct iommu_flush_ops arm_smmu_flush_ops = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context,
 	.tlb_flush_walk = arm_smmu_tlb_inv_walk,
-	.tlb_add_page	= arm_smmu_tlb_inv_page_nosync,
+	.tlb_add_range	= arm_smmu_tlb_inv_range_nosync,
 };
 
 static bool arm_smmu_dbm_capable(struct arm_smmu_device *smmu)
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 40e33257d3c2c5..87292a7f094687 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -596,6 +596,10 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
 
 		__arm_v7s_set_pte(ptep, 0, num_entries, &iop->cfg);
 
+		if (!iommu_iotlb_gather_queued(gather))
+			iommu_iotlb_gather_add_range(gather, iova,
+						     num_entries * blk_size);
+
 		for (i = 0; i < num_entries; i++) {
 			if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
 				/* Also flush any partial walks */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 0208e5897c299a..d51531330f8dea 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -666,9 +666,22 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 		/* Clear the remaining entries */
 		__arm_lpae_clear_pte(ptep, &iop->cfg, i);
 
-		if (gather && !iommu_iotlb_gather_queued(gather))
-			for (int j = 0; j < i; j++)
-				io_pgtable_tlb_add_page(iop, gather, iova + j * size, size);
+		if (gather && !iommu_iotlb_gather_queued(gather)) {
+			if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_range) {
+				iop->cfg.tlb->tlb_add_range(gather, iova,
+							    i * size, size,
+							    iop->cookie);
+
+			} else {
+				iommu_iotlb_gather_add_range(gather, iova,
+							     i * size);
+
+				for (int j = 0; j < i; j++)
+					io_pgtable_tlb_add_page(iop, gather,
+								iova + j * size,
+								size);
+			}
+		}
 
 		return i * size;
 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index cbc5d6aa2daa23..75d699dc28e7b0 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -330,6 +330,9 @@ static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
 		i++;
 	}
 
+	if (i && !iommu_iotlb_gather_queued(gather))
+		iommu_iotlb_gather_add_range(gather, iova, i * pgsize);
+
 	return i * pgsize;
 }
 
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 2be990c108de2b..a2f80a92f51f2c 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -828,7 +828,6 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
 {
 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 
-	iommu_iotlb_gather_add_range(gather, iova, pgsize * pgcount);
 	return dom->iop->unmap_pages(dom->iop, iova, pgsize, pgcount, gather);
 }
 
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index c1a34445d244fb..893ea67d322644 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -340,6 +340,7 @@ static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 	spin_lock_irqsave(&dom->pgtlock, flags);
 	memset(pgt_base_iova, 0, pgcount * sizeof(u32));
 	spin_unlock_irqrestore(&dom->pgtlock, flags);
+	iommu_iotlb_gather_add_range(iotlb_gather, iova, size);
 
 	return size;
 }
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index be3f1ce696ba29..b9aa4bbc82acad 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -655,6 +655,7 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova
 
 	memset(pte_addr, 0, sizeof(*pte_addr));
 	sun50i_table_flush(sun50i_domain, pte_addr, 1);
+	iommu_iotlb_gather_add_range(gather, iova, SZ_4K);
 
 	return SZ_4K;
 }
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 587fc13197f122..5865b8f6c6e67a 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -897,6 +897,8 @@ static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova
 	if (unmapped < size)
 		return 0;
 
+	iommu_iotlb_gather_add_range(gather, iova, unmapped);
+
 	/* Device already removed all mappings after detach. */
 	if (!vdomain->nr_endpoints)
 		return unmapped;
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index e19872e37e067f..b109c95b5ff53d 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -42,6 +42,9 @@ struct iommu_flush_ops {
 			       void *cookie);
 	void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
 			     unsigned long iova, size_t granule, void *cookie);
+	void (*tlb_add_range)(struct iommu_iotlb_gather *gather,
+			      unsigned long iova, size_t size, size_t granule,
+			      void *cookie);
 };
 
 /**
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e587d4ac4d3310..d8fcdb61e44c42 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -1034,30 +1034,31 @@ static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gathe
 }
 
 /**
- * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
+ * iommu_iotlb_gather_add_range_pgsize - Include pgsize in the gather
  * @domain: IOMMU domain to be invalidated
  * @gather: TLB gather data
  * @iova: start of page to invalidate
  * @size: size of page to invalidate
+ * @pgsize: page granularity of the invalidation
  *
- * Helper for IOMMU drivers to build invalidation commands based on individual
- * pages, or with page size/table level hints which cannot be gathered if they
- * differ.
+ * Helper for IOMMU drivers to build invalidation commands when using the pgsize
+ * hint. Unlike iommu_iotlb_gather_add_range() this also flushes if the range is
+ * disjoint.
  */
-static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
-					       struct iommu_iotlb_gather *gather,
-					       unsigned long iova, size_t size)
+static inline void iommu_iotlb_gather_add_range_pgsize(
+	struct iommu_domain *domain, struct iommu_iotlb_gather *gather,
+	unsigned long iova, size_t size, size_t pgsize)
 {
 	/*
 	 * If the new page is disjoint from the current range or is mapped at
 	 * a different granularity, then sync the TLB so that the gather
 	 * structure can be rewritten.
 	 */
-	if ((gather->pgsize && gather->pgsize != size) ||
+	if ((gather->pgsize && gather->pgsize != pgsize) ||
 	    iommu_iotlb_gather_is_disjoint(gather, iova, size))
 		iommu_iotlb_sync(domain, gather);
 
-	gather->pgsize = size;
+	gather->pgsize = pgsize;
 	iommu_iotlb_gather_add_range(gather, iova, size);
 }
 
