diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index c90b1bcf311b..f37b39801f69 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -2213,6 +2213,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, cfg->cbndx = ret; + if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_GEOMETRY))) { + /* Geometry is not set use the default geometry */ + domain->geometry.aperture_start = 0; + domain->geometry.aperture_end = (1UL << ias) - 1; + if (domain->geometry.aperture_end >= SZ_1G * 4ULL) + domain->geometry.aperture_end = (SZ_1G * 4ULL) - 1; + } + if (arm_smmu_is_slave_side_secure(smmu_domain)) { smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) { .quirks = quirks, @@ -2261,12 +2269,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap; domain->geometry.aperture_end = (1UL << ias) - 1; domain->geometry.force_aperture = true; - if (smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST)) { - domain->geometry.aperture_start = - smmu_domain->pgtbl_cfg.iova_base; - domain->geometry.aperture_end = - smmu_domain->pgtbl_cfg.iova_end; - } /* Assign an asid */ ret = arm_smmu_init_asid(domain, smmu); @@ -3792,7 +3794,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, ret = 0; break; } - case DOMAIN_ATTR_CB_STALL_DISABLE: if (*((int *)data)) smmu_domain->attributes |= @@ -3805,6 +3806,44 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, 1 << DOMAIN_ATTR_NO_CFRE; ret = 0; break; + case DOMAIN_ATTR_GEOMETRY: { + struct iommu_domain_geometry *geometry = + (struct iommu_domain_geometry *)data; + + if (smmu_domain->smmu != NULL) { + dev_err(smmu_domain->smmu->dev, + "cannot set geometry attribute while attached\n"); + ret = -EBUSY; + break; + } + + if (geometry->aperture_start >= SZ_1G * 4ULL || + geometry->aperture_end >= SZ_1G * 4ULL) { + pr_err("fastmap does not support IOVAs >= 4GB\n"); + ret = -EINVAL; + break; + } + if (smmu_domain->attributes + & (1 << DOMAIN_ATTR_GEOMETRY)) { + if (geometry->aperture_start + < domain->geometry.aperture_start) + domain->geometry.aperture_start = + geometry->aperture_start; + + if (geometry->aperture_end + > domain->geometry.aperture_end) + domain->geometry.aperture_end = + geometry->aperture_end; + } else { + smmu_domain->attributes |= 1 << DOMAIN_ATTR_GEOMETRY; + domain->geometry.aperture_start = + geometry->aperture_start; + domain->geometry.aperture_end = geometry->aperture_end; + } + ret = 0; + break; + } + default: ret = -ENODEV; } diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c index 68e97f1e88d2..ec4ba8670b9a 100644 --- a/drivers/iommu/dma-mapping-fast.c +++ b/drivers/iommu/dma-mapping-fast.c @@ -173,7 +173,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping, iommu_tlbiall(mapping->domain); mapping->have_stale_tlbs = false; - av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, mapping->base, + av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, + mapping->domain->geometry.aperture_start, + mapping->base, mapping->base + mapping->size - 1, skip_sync); } @@ -799,7 +801,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = { * * Creates a mapping structure which holds information about used/unused IO * address ranges, which is required to perform mapping with IOMMU aware - * functions. The only VA range supported is [0, 4GB). + * functions. The only VA range supported is [0, 4GB]. * * The client device need to be attached to the mapping with * fast_smmu_attach_device function. diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c index cdcf77097abf..8f26083ba76a 100644 --- a/drivers/iommu/io-pgtable-fast.c +++ b/drivers/iommu/io-pgtable-fast.c @@ -45,6 +45,7 @@ struct av8l_fast_io_pgtable { struct page **pages; /* page table memory */ int nr_pages; dma_addr_t base; + dma_addr_t start; dma_addr_t end; }; @@ -184,13 +185,13 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep) } void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, - u64 end, bool skip_sync) + u64 start, u64 end, bool skip_sync) { int i; struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); - av8l_fast_iopte *pmdp = data->pmds; + av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start); - for (i = base >> AV8L_FAST_PAGE_SHIFT; + for (i = start >> AV8L_FAST_PAGE_SHIFT; i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) { if (!(*pmdp & AV8L_FAST_PTE_VALID)) { *pmdp = 0; @@ -730,7 +731,7 @@ static int __init av8l_fast_positive_testing(void) } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(ops, base, max, false); + av8l_fast_clear_stale_ptes(ops, base, base, max, false); /* map the entire 4GB VA space with 8K map calls */ for (iova = base; iova < max; iova += SZ_8K) { @@ -751,7 +752,7 @@ static int __init av8l_fast_positive_testing(void) } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(ops, base, max, false); + av8l_fast_clear_stale_ptes(ops, base, base, max, false); /* map the entire 4GB VA space with 16K map calls */ for (iova = base; iova < max; iova += SZ_16K) { @@ -772,7 +773,7 @@ static int __init av8l_fast_positive_testing(void) } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(ops, base, max, false); + av8l_fast_clear_stale_ptes(ops, base, base, max, false); /* map the entire 4GB VA space with 64K map calls */ for (iova = base; iova < max; iova += SZ_64K) { diff --git a/include/linux/io-pgtable-fast.h b/include/linux/io-pgtable-fast.h index b97f27b08b0c..1d5e993c9d22 100644 --- a/include/linux/io-pgtable-fast.h +++ b/include/linux/io-pgtable-fast.h @@ -75,8 +75,8 @@ av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, */ #define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa -void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, u64 end, - bool skip_sync); +void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, + u64 start, u64 end, bool skip_sync); void av8l_register_notify(struct notifier_block *nb); #else /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */ @@ -85,6 +85,7 @@ void av8l_register_notify(struct notifier_block *nb); static inline void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, + u64 start, u64 end, bool skip_sync) {