@@ -13,10 +13,13 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -99,6 +102,8 @@ enum state {
#define OVERRUN_ERROR_THRESHOLD 3
+static DEFINE_PER_CPU(struct freq_qos_request, qos_req);
+
struct dcmi_graph_entity {
struct v4l2_async_subdev asd;
@@ -133,6 +138,7 @@ struct stm32_dcmi {
struct resource *res;
struct reset_control *rstc;
int sequence;
+ int irq;
struct list_head buffers;
struct dcmi_buf *active;
@@ -173,6 +179,11 @@ struct stm32_dcmi {
struct media_device mdev;
struct media_pad vid_cap_pad;
struct media_pipeline pipeline;
+
+ u32 min_frequency;
+ cpumask_var_t boosted;
+ struct irq_affinity_notify notify;
+ struct mutex freq_lock;
};
static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
@@ -722,6 +733,156 @@ static void dcmi_pipeline_stop(struct stm32_dcmi *dcmi)
dcmi_pipeline_s_stream(dcmi, 0);
}
+static void dcmi_get_min_frequency(struct stm32_dcmi *dcmi)
+{
+ struct device_node *np = dcmi->mdev.dev->of_node;
+
+ dcmi->min_frequency = FREQ_QOS_MIN_DEFAULT_VALUE;
+
+ of_property_read_u32(np, "st,stm32-dcmi-min-frequency",
+ &dcmi->min_frequency);
+}
+
+static void dcmi_irq_notifier_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct stm32_dcmi *dcmi = container_of(notify,
+ struct stm32_dcmi,
+ notify);
+ struct cpufreq_policy *p;
+ int cpu;
+
+ mutex_lock(&dcmi->freq_lock);
+ /*
+ * For all boosted CPUs check if it is still the case
+ * if not remove the request
+ */
+ for_each_cpu(cpu, dcmi->boosted) {
+ if (cpumask_test_cpu(cpu, mask))
+ continue;
+
+ p = cpufreq_cpu_get(cpu);
+ if (!p)
+ continue;
+
+ freq_qos_remove_request(&per_cpu(qos_req, cpu));
+ cpumask_andnot(dcmi->boosted, dcmi->boosted, p->cpus);
+
+ cpufreq_cpu_put(p);
+ }
+
+ /*
+ * For CPUs in the mask check if they are boosted if not add
+ * a request
+ */
+ for_each_cpu(cpu, mask) {
+ if (cpumask_test_cpu(cpu, dcmi->boosted))
+ continue;
+
+ p = cpufreq_cpu_get(cpu);
+ if (!p)
+ continue;
+
+ freq_qos_add_request(&p->constraints, &per_cpu(qos_req, cpu),
+ FREQ_QOS_MIN, dcmi->min_frequency);
+ cpumask_or(dcmi->boosted, dcmi->boosted, p->cpus);
+ cpufreq_cpu_put(p);
+ }
+
+ mutex_unlock(&dcmi->freq_lock);
+}
+
+static void dcmi_irq_notifier_release(struct kref *ref)
+{
+ /*
+ * This is required by affinity notifier. We don't have anything to
+ * free here.
+ */
+}
+
+static void dcmi_get_cpu_policy(struct stm32_dcmi *dcmi)
+{
+ struct cpufreq_policy *p;
+ int cpu;
+
+ if (!alloc_cpumask_var(&dcmi->boosted, GFP_KERNEL))
+ return;
+
+ mutex_lock(&dcmi->freq_lock);
+
+ for_each_cpu(cpu, irq_get_affinity_mask(dcmi->irq)) {
+ if (cpumask_test_cpu(cpu, dcmi->boosted))
+ continue;
+
+ p = cpufreq_cpu_get(cpu);
+ if (!p)
+ continue;
+
+ freq_qos_add_request(&p->constraints, &per_cpu(qos_req, cpu),
+ FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ cpumask_or(dcmi->boosted, dcmi->boosted, p->cpus);
+
+ cpufreq_cpu_put(p);
+ }
+
+ mutex_unlock(&dcmi->freq_lock);
+}
+
+static void dcmi_put_cpu_policy(struct stm32_dcmi *dcmi)
+{
+ struct cpufreq_policy *p;
+ int cpu;
+
+ mutex_lock(&dcmi->freq_lock);
+
+ for_each_cpu(cpu, irq_get_affinity_mask(dcmi->irq)) {
+ if (!cpumask_test_cpu(cpu, dcmi->boosted))
+ continue;
+
+ p = cpufreq_cpu_get(cpu);
+ if (!p)
+ continue;
+
+ freq_qos_remove_request(&per_cpu(qos_req, cpu));
+ cpumask_andnot(dcmi->boosted, dcmi->boosted, p->cpus);
+
+ cpufreq_cpu_put(p);
+ }
+
+ free_cpumask_var(dcmi->boosted);
+
+ mutex_unlock(&dcmi->freq_lock);
+}
+
+static void dcmi_set_min_frequency(struct stm32_dcmi *dcmi, s32 freq)
+{
+ struct irq_affinity_notify *notify = &dcmi->notify;
+ int cpu;
+
+ mutex_lock(&dcmi->freq_lock);
+
+ for_each_cpu(cpu, irq_get_affinity_mask(dcmi->irq)) {
+ if (!cpumask_test_cpu(cpu, dcmi->boosted))
+ continue;
+
+ if (!freq_qos_request_active(&per_cpu(qos_req, cpu)))
+ continue;
+
+ freq_qos_update_request(&per_cpu(qos_req, cpu), freq);
+ }
+
+ mutex_unlock(&dcmi->freq_lock);
+
+ if (freq != FREQ_QOS_MIN_DEFAULT_VALUE) {
+ notify->notify = dcmi_irq_notifier_notify;
+ notify->release = dcmi_irq_notifier_release;
+ irq_set_affinity_notifier(dcmi->irq, notify);
+ } else {
+ irq_set_affinity_notifier(dcmi->irq, NULL);
+ }
+}
+
static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
@@ -736,11 +897,13 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
goto err_release_buffers;
}
+ dcmi_set_min_frequency(dcmi, dcmi->min_frequency);
+
ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
if (ret < 0) {
dev_err(dcmi->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n",
__func__, ret);
- goto err_pm_put;
+ goto err_drop_qos;
}
ret = dcmi_pipeline_start(dcmi);
@@ -835,7 +998,8 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
err_media_pipeline_stop:
media_pipeline_stop(&dcmi->vdev->entity);
-err_pm_put:
+err_drop_qos:
+ dcmi_set_min_frequency(dcmi, FREQ_QOS_MIN_DEFAULT_VALUE);
pm_runtime_put(dcmi->dev);
err_release_buffers:
@@ -863,6 +1027,8 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
media_pipeline_stop(&dcmi->vdev->entity);
+ dcmi_set_min_frequency(dcmi, FREQ_QOS_MIN_DEFAULT_VALUE);
+
spin_lock_irq(&dcmi->irqlock);
/* Disable interruptions */
@@ -1838,7 +2004,6 @@ static int dcmi_probe(struct platform_device *pdev)
struct vb2_queue *q;
struct dma_chan *chan;
struct clk *mclk;
- int irq;
int ret = 0;
match = of_match_device(of_match_ptr(stm32_dcmi_of_match), &pdev->dev);
@@ -1879,9 +2044,9 @@ static int dcmi_probe(struct platform_device *pdev)
dcmi->bus.bus_width = ep.bus.parallel.bus_width;
dcmi->bus.data_shift = ep.bus.parallel.data_shift;
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return irq ? irq : -ENXIO;
+ dcmi->irq = platform_get_irq(pdev, 0);
+ if (dcmi->irq <= 0)
+ return dcmi->irq ? dcmi->irq : -ENXIO;
dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!dcmi->res) {
@@ -1895,11 +2060,12 @@ static int dcmi_probe(struct platform_device *pdev)
return PTR_ERR(dcmi->regs);
}
- ret = devm_request_threaded_irq(&pdev->dev, irq, dcmi_irq_callback,
+ ret = devm_request_threaded_irq(&pdev->dev, dcmi->irq,
+ dcmi_irq_callback,
dcmi_irq_thread, IRQF_ONESHOT,
dev_name(&pdev->dev), dcmi);
if (ret) {
- dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ dev_err(&pdev->dev, "Unable to request irq %d\n", dcmi->irq);
return ret;
}
@@ -2022,6 +2188,9 @@ static int dcmi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Probe done\n");
+ dcmi_get_min_frequency(dcmi);
+ dcmi_get_cpu_policy(dcmi);
+
platform_set_drvdata(pdev, dcmi);
pm_runtime_enable(&pdev->dev);
@@ -2049,6 +2218,8 @@ static int dcmi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
+ dcmi_put_cpu_policy(dcmi);
+
v4l2_async_notifier_unregister(&dcmi->notifier);
v4l2_async_notifier_cleanup(&dcmi->notifier);
media_entity_cleanup(&dcmi->vdev->entity);
Before start streaming set cpufreq minimum frequency requirement. The cpufreq governor will adapt the frequencies and we will have no latency for handling interrupts. The frequency requirement is retrieved from the device-tree node. While streaming be notified if the IRQ affinity change thanks to irq_affinity_notify callback. Signed-off-by: Benjamin Gaignard <benjamin.gaignard@st.com> --- version 3: - add a cpumask field to track boosted CPUs - add irq_affinity_notify callback - protect cpumask field with a mutex drivers/media/platform/stm32/stm32-dcmi.c | 187 ++++++++++++++++++++++++++++-- 1 file changed, 179 insertions(+), 8 deletions(-)