@@ -276,6 +276,26 @@ static struct dma_info sh_dmac_info = {
.flags = DMAC_CHANNELS_TEI_CAPABLE,
};
+#if defined(CONFIG_CPU_SUBTYPE_SH7723) ||
+defined(CONFIG_CPU_SUBTYPE_SH7724)
+int sh_dma_notifier(struct notifier_block *nb, unsigned long val, void
+*args) {
+ if (!in_nmi())
+ return NOTIFY_DONE;
+
+ /* every NMI usually stops all active DMA transfers. */
+ /* Reset DMA channels so that the transfers are resumed */
+ dmaor_reset(0);
+ dmaor_reset(1);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sh_dma_nb = {
+ .notifier_call = sh_dma_notifier,
+ .priority = 0,
+};
+#endif
+
#ifdef CONFIG_CPU_SH4
static unsigned int get_dma_error_irq(int n) { @@ -330,6 +350,12 @@ static int __init sh_dmac_init(void)
return i;
#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7723) || defined(CONFIG_CPU_SUBTYPE_SH7724)
+ i = register_die_notifier(&sh_dma_nb);
+ if (unlikely(i != 0))
+ return i;
+#endif
+
return register_dmac(info);
}
@@ -338,6 +364,10 @@ static void __exit sh_dmac_exit(void) #ifdef CONFIG_CPU_SH4
int n;
+#if defined(CONFIG_CPU_SUBTYPE_SH7723) || defined(CONFIG_CPU_SUBTYPE_SH7724)
+ unregister_die_notifier(&sh_dma_nb);
+#endif
+
for (n = 0; n < NR_DMAE; n++) {
free_irq(get_dma_error_irq(n), (void *)dmae_name[n]);
}
@@ -849,6 +849,22 @@ static irqreturn_t sh_dmae_err(int irq, void *data) } #endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7723) ||
+defined(CONFIG_CPU_SUBTYPE_SH7724)
+int dmae_notifier_call(struct notifier_block *nb, unsigned long val,
+void *args) {
+ struct sh_dmae_device *shdev = container_of(nb, struct sh_dmae_device,
+nb);
+
+ if (!in_nmi())
+ return NOTIFY_DONE;
+
+ /* every NMI usually stops all active DMA transfers. */
+ /* Reset DMA channels so that the transfers are resumed */
+ sh_dmae_rst(shdev);
+
+ return NOTIFY_OK;
+}
+#endif
+
static void dmae_do_tasklet(unsigned long data) {
struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; @@ -1085,6 +1101,18 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
chanirq_res = errirq_res;
#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7723) || defined(CONFIG_CPU_SUBTYPE_SH7724)
+ shdev->nb.notifier_call = dmae_notifier_call;
+ shdev->nb.priority = 0;
+
+ err = register_die_notifier(&shdev->nb);
+ if (err) {
+ dev_err(&pdev->dev, "Registering NMI notifier failed\n");
+ goto eirqres;
+ }
+#endif
+
if (chanirq_res->start == chanirq_res->end &&
!platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
/* Special case - all multiplexed */
@@ -1157,6 +1185,10 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
struct resource *res;
int errirq = platform_get_irq(pdev, 0);
+#if defined(CONFIG_CPU_SUBTYPE_SH7723) || defined(CONFIG_CPU_SUBTYPE_SH7724)
+ unregister_die_notifier(&shdev->nb);
+#endif
+
dma_async_device_unregister(&shdev->common);
if (errirq > 0)
@@ -43,6 +43,9 @@ struct sh_dmae_device {
struct dma_device common;
struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS];
struct sh_dmae_pdata *pdata;
+#if defined(CONFIG_CPU_SUBTYPE_SH7723) || defined(CONFIG_CPU_SUBTYPE_SH7724)
+ struct notifier_block nb;
+#endif
u32 __iomem *chan_reg;
u16 __iomem *dmars;
};