NVIDIA: SAUCE: iommu/arm-smmu-v3: add suspend/resume support
-Add pm ops with suspend and resume functions. -Split reset function into setup irq and reset irq. Setup irq is to register irq handlers and call once during boot. Reset irq is to clear irq status and enable irq that is called during boot and resume. -SMMU can be in translation or passthrough mode. Save it in bypass variable in arm_smmu_device structure and use during resume to re-enable the same mode. http://nvbugs/4267541 http://nvbugs/4638077 http://nvbugs/5088995 Signed-off-by: Pritesh Raithatha <praithatha@nvidia.com> Signed-off-by: Ashish Mhetre <amhetre@nvidia.com> Signed-off-by: Vishwaroop A <va@nvidia.com> Acked-by: Noah Wager <noah.wager@canonical.com> Acked-by: Jacob Martin <jacob.martin@canonical.com> Signed-off-by: Noah Wager <noah.wager@canonical.com>
This commit is contained in:
committed by
Noah Wager
parent
11a6051481
commit
be979fd7a1
@@ -3159,15 +3159,6 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
|
|||||||
int ret, nvec = ARM_SMMU_MAX_MSIS;
|
int ret, nvec = ARM_SMMU_MAX_MSIS;
|
||||||
struct device *dev = smmu->dev;
|
struct device *dev = smmu->dev;
|
||||||
|
|
||||||
/* Clear the MSI address regs */
|
|
||||||
writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
|
|
||||||
writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
|
|
||||||
|
|
||||||
if (smmu->features & ARM_SMMU_FEAT_PRI)
|
|
||||||
writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
|
|
||||||
else
|
|
||||||
nvec--;
|
|
||||||
|
|
||||||
if (!(smmu->features & ARM_SMMU_FEAT_MSI))
|
if (!(smmu->features & ARM_SMMU_FEAT_MSI))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@@ -3176,6 +3167,9 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!(smmu->features & ARM_SMMU_FEAT_PRI))
|
||||||
|
nvec--;
|
||||||
|
|
||||||
/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
|
/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
|
||||||
ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
|
ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@@ -3237,9 +3231,9 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
|
static int arm_smmu_reset_irqs(struct arm_smmu_device *smmu)
|
||||||
{
|
{
|
||||||
int ret, irq;
|
int ret;
|
||||||
u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
|
u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
|
||||||
|
|
||||||
/* Disable IRQs first */
|
/* Disable IRQs first */
|
||||||
@@ -3250,7 +3244,35 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
irq = smmu->combined_irq;
|
if (!smmu->combined_irq) {
|
||||||
|
/*
|
||||||
|
* Clear the MSI address regs. These registers will be reset
|
||||||
|
* in arm_smmu_write_msi_msg callback function by irq_domain
|
||||||
|
* upon a new MSI message.
|
||||||
|
*/
|
||||||
|
writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
|
||||||
|
writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
|
||||||
|
|
||||||
|
if (smmu->features & ARM_SMMU_FEAT_PRI)
|
||||||
|
writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (smmu->features & ARM_SMMU_FEAT_PRI)
|
||||||
|
irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
|
||||||
|
|
||||||
|
/* Enable interrupt generation on the SMMU */
|
||||||
|
ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
|
||||||
|
ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
|
||||||
|
if (ret)
|
||||||
|
dev_warn(smmu->dev, "failed to enable irqs\n");
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
|
||||||
|
{
|
||||||
|
int ret = 0, irq = smmu->combined_irq;
|
||||||
|
|
||||||
if (irq) {
|
if (irq) {
|
||||||
/*
|
/*
|
||||||
* Cavium ThunderX2 implementation doesn't support unique irq
|
* Cavium ThunderX2 implementation doesn't support unique irq
|
||||||
@@ -3266,16 +3288,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
|
|||||||
} else
|
} else
|
||||||
arm_smmu_setup_unique_irqs(smmu);
|
arm_smmu_setup_unique_irqs(smmu);
|
||||||
|
|
||||||
if (smmu->features & ARM_SMMU_FEAT_PRI)
|
return ret;
|
||||||
irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
|
|
||||||
|
|
||||||
/* Enable interrupt generation on the SMMU */
|
|
||||||
ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
|
|
||||||
ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
|
|
||||||
if (ret)
|
|
||||||
dev_warn(smmu->dev, "failed to enable irqs\n");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
|
static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
|
||||||
@@ -3289,7 +3302,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
|
static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
u32 reg, enables;
|
u32 reg, enables;
|
||||||
@@ -3397,9 +3410,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = arm_smmu_setup_irqs(smmu);
|
ret = arm_smmu_reset_irqs(smmu);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(smmu->dev, "failed to setup irqs\n");
|
dev_err(smmu->dev, "failed to reset irqs\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3407,7 +3420,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
|
|||||||
enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
|
enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
|
||||||
|
|
||||||
/* Enable the SMMU interface, or ensure bypass */
|
/* Enable the SMMU interface, or ensure bypass */
|
||||||
if (!bypass || disable_bypass) {
|
if (!smmu->bypass || disable_bypass) {
|
||||||
enables |= CR0_SMMUEN;
|
enables |= CR0_SMMUEN;
|
||||||
} else {
|
} else {
|
||||||
ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
|
ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
|
||||||
@@ -3799,7 +3812,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||||||
resource_size_t ioaddr;
|
resource_size_t ioaddr;
|
||||||
struct arm_smmu_device *smmu;
|
struct arm_smmu_device *smmu;
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
bool bypass;
|
|
||||||
|
|
||||||
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
|
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
|
||||||
if (!smmu)
|
if (!smmu)
|
||||||
@@ -3815,7 +3827,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Set bypass mode according to firmware probing result */
|
/* Set bypass mode according to firmware probing result */
|
||||||
bypass = !!ret;
|
smmu->bypass = !!ret;
|
||||||
|
|
||||||
/* Base address */
|
/* Base address */
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
@@ -3878,8 +3890,12 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||||||
/* Check for RMRs and install bypass STEs if any */
|
/* Check for RMRs and install bypass STEs if any */
|
||||||
arm_smmu_rmr_install_bypass_ste(smmu);
|
arm_smmu_rmr_install_bypass_ste(smmu);
|
||||||
|
|
||||||
|
ret = arm_smmu_setup_irqs(smmu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* Reset the device */
|
/* Reset the device */
|
||||||
ret = arm_smmu_device_reset(smmu, bypass);
|
ret = arm_smmu_device_reset(smmu);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_disable;
|
goto err_disable;
|
||||||
|
|
||||||
@@ -3936,10 +3952,22 @@ static void arm_smmu_driver_unregister(struct platform_driver *drv)
|
|||||||
platform_driver_unregister(drv);
|
platform_driver_unregister(drv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
|
||||||
|
{
|
||||||
|
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
return arm_smmu_device_reset(smmu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct dev_pm_ops arm_smmu_pm_ops = {
|
||||||
|
SET_SYSTEM_SLEEP_PM_OPS(NULL, arm_smmu_runtime_resume)
|
||||||
|
};
|
||||||
|
|
||||||
static struct platform_driver arm_smmu_driver = {
|
static struct platform_driver arm_smmu_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "arm-smmu-v3",
|
.name = "arm-smmu-v3",
|
||||||
.of_match_table = arm_smmu_of_match,
|
.of_match_table = arm_smmu_of_match,
|
||||||
|
.pm = &arm_smmu_pm_ops,
|
||||||
.suppress_bind_attrs = true,
|
.suppress_bind_attrs = true,
|
||||||
},
|
},
|
||||||
.probe = arm_smmu_device_probe,
|
.probe = arm_smmu_device_probe,
|
||||||
|
|||||||
@@ -685,6 +685,8 @@ struct arm_smmu_device {
|
|||||||
|
|
||||||
struct rb_root streams;
|
struct rb_root streams;
|
||||||
struct mutex streams_mutex;
|
struct mutex streams_mutex;
|
||||||
|
|
||||||
|
bool bypass;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct arm_smmu_stream {
|
struct arm_smmu_stream {
|
||||||
|
|||||||
Reference in New Issue
Block a user