iommu/arm-smmu: Add support for deferred SMR programming

The SMMU driver currently programs the SMRs with a client's SID and mask
pair early during bootup, or while a client device binds with its
driver. This is not ideal in environments where the SMRs may already
contain the same SID and mask pair, where that pair was programmed into
the SMRs by another VM. An example of this is when two VMs share the same
hardware block and ownership of said hardware block is mutually
exclusive between both VMs and can be transferred dynamically.

To avoid stream matching conflict faults, there needs to be coordination
only one of the two identical SMRs should be valid at any point in time.

Thus, add support for the qcom,iommu-defer-smr-config property.
When present, this property will defer configuring the SMRs for an IOMMU
client to a valid state until a time of the client driver's choosing.
The client driver can alter the validity of the SMRs through the
qcom_iommu_sid_switch() function.

Change-Id: I4505a81ac640d9f87a26d4d136b77089fb01a9c3
Signed-off-by: Isaac J. Manjarres <quic_isaacm@quicinc.com>
This commit is contained in:
Isaac J. Manjarres
2021-11-12 17:42:40 -08:00
committed by Gerrit - the friendly Code Review server
parent 27f0dff20d
commit 7010e5aadf

View File

@@ -1760,12 +1760,31 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
return true;
}
static struct device_node *arm_smmu_get_of_node(struct device *dev)
{
struct device_node *np;
if (!dev->of_node)
return NULL;
np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0);
return np ? np : dev->of_node;
}
static bool dev_defer_smr_configuration(struct device *dev)
{
struct device_node *np = arm_smmu_get_of_node(dev);
return of_property_read_bool(np, "qcom,iommu-defer-smr-config");
}
static int arm_smmu_master_alloc_smes(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_smr *smrs = smmu->smrs;
bool config_smrs = !dev_defer_smr_configuration(dev);
int i, idx, ret;
mutex_lock(&smmu->stream_map_mutex);
@@ -1787,7 +1806,11 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
if (smrs && smmu->s2crs[idx].count == 0) {
smrs[idx].id = sid;
smrs[idx].mask = mask;
smrs[idx].valid = true;
smrs[idx].valid = config_smrs;
} else if (smrs) {
WARN_ON(smrs[idx].valid != config_smrs);
ret = -EINVAL;
goto out_err;
}
smmu->s2crs[idx].count++;
cfg->smendx[i] = (s16)idx;
@@ -1854,17 +1877,6 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
return 0;
}
static struct device_node *arm_iommu_get_of_node(struct device *dev)
{
struct device_node *np;
if (!dev->of_node)
return NULL;
np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0);
return np ? np : dev->of_node;
}
static int arm_smmu_setup_default_domain(struct device *dev,
struct iommu_domain *domain)
{
@@ -1874,7 +1886,7 @@ static int arm_smmu_setup_default_domain(struct device *dev,
u32 val;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
np = arm_iommu_get_of_node(dev);
np = arm_smmu_get_of_node(dev);
if (!np)
return 0;
@@ -2615,14 +2627,12 @@ static int __arm_smmu_sid_switch(struct device *dev, void *data)
return 0;
smmu = cfg->smmu;
mutex_lock(&smmu->stream_map_mutex);
for_each_cfg_sme(cfg, fwspec, i, idx) {
if (dir == SID_RELEASE) {
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), 0);
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), 0);
} else {
arm_smmu_write_sme(smmu, idx);
}
smmu->smrs[idx].valid = dir == SID_ACQUIRE;
arm_smmu_write_sme(smmu, idx);
}
mutex_unlock(&smmu->stream_map_mutex);
return 0;
}