LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH] irqchip/irq-ath79-intc: add irq cascade driver for QCA9556 SoCs
@ 2018-05-07 13:37 John Crispin
2018-05-08 7:39 ` Marc Zyngier
0 siblings, 1 reply; 2+ messages in thread
From: John Crispin @ 2018-05-07 13:37 UTC (permalink / raw)
To: Thomas Gleixner, Jason Cooper, Marc Zyngier
Cc: linux-kernel, linux-mips, John Crispin
The QCA ATH79 MIPS target is being converted to pure OF. Right now the
platform code will setup the IRQ cascade found on the QCA9556 and newer
SoCs and uses fixed IRQ numbers for the peripherals attached to the
cascade. This patch adds a proper driver based on the code previously
located inside arch/mips/ath79/irq.c.
Signed-off-by: John Crispin <john@phrozen.org>
---
drivers/irqchip/Makefile | 1 +
drivers/irqchip/irq-ath79-intc.c | 108 +++++++++++++++++++++++++++++++++++++++
2 files changed, 109 insertions(+)
create mode 100644 drivers/irqchip/irq-ath79-intc.c
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index d27e3e3619e0..f63c94a92e25 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_IRQCHIP) += irqchip.o
obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o
obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
+obj-$(CONFIG_ATH79) += irq-ath79-intc.o
obj-$(CONFIG_ATH79) += irq-ath79-misc.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
diff --git a/drivers/irqchip/irq-ath79-intc.c b/drivers/irqchip/irq-ath79-intc.c
new file mode 100644
index 000000000000..ba15b1ac98b3
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-intc.c
@@ -0,0 +1,108 @@
+/*
+ * Atheros QCA955X specific interrupt cascade handling
+ *
+ * Copyright (C) 2018 John Crispin <john@phrozen.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+#define ATH79_MAX_INTC_CASCADE 3
+
+struct ath79_intc {
+ struct irq_chip chip;
+ u32 irq;
+ u32 pending_mask;
+ u32 irq_mask[ATH79_MAX_INTC_CASCADE];
+};
+
+static void ath79_intc_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct ath79_intc *intc = domain->host_data;
+ u32 pending;
+
+ pending = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
+ pending &= intc->pending_mask;
+
+ if (pending) {
+ int i;
+
+ for (i = 0; i < domain->hwirq_max; i++)
+ if (pending & intc->irq_mask[i])
+ generic_handle_irq(irq_find_mapping(domain, i));
+ } else {
+ spurious_interrupt();
+ }
+}
+
+static void ath79_intc_irq_unmask(struct irq_data *d)
+{
+}
+
+static void ath79_intc_irq_mask(struct irq_data *d)
+{
+}
+
+static int ath79_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct ath79_intc *intc = d->host_data;
+
+ irq_set_chip_and_handler(irq, &intc->chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ath79_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = ath79_intc_map,
+};
+
+static int __init qca9556_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ struct ath79_intc *intc;
+ int cnt, i;
+
+ cnt = of_property_count_u32_elems(node, "qcom,pending-bits");
+ if (cnt > ATH79_MAX_INTC_CASCADE)
+ panic("Too many INTC pending bits\n");
+
+ intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ panic("Failed to allocate INTC memory\n");
+ intc->chip.name = "INTC";
+ intc->chip.irq_unmask = ath79_intc_irq_unmask,
+ intc->chip.irq_mask = ath79_intc_irq_mask,
+
+ of_property_read_u32_array(node, "qcom,pending-bits", intc->irq_mask,
+ cnt);
+ for (i = 0; i < cnt; i++)
+ intc->pending_mask |= intc->irq_mask[i];
+
+ intc->irq = irq_of_parse_and_map(node, 0);
+ if (!intc->irq)
+ panic("Failed to get INTC IRQ");
+
+ domain = irq_domain_add_linear(node, cnt, &ath79_irq_domain_ops,
+ intc);
+ irq_set_chained_handler_and_data(intc->irq, ath79_intc_irq_handler,
+ domain);
+
+ return 0;
+}
+IRQCHIP_DECLARE(qca9556_intc, "qcom,qca9556-intc",
+ qca9556_intc_of_init);
--
2.11.0
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH] irqchip/irq-ath79-intc: add irq cascade driver for QCA9556 SoCs
2018-05-07 13:37 [PATCH] irqchip/irq-ath79-intc: add irq cascade driver for QCA9556 SoCs John Crispin
@ 2018-05-08 7:39 ` Marc Zyngier
0 siblings, 0 replies; 2+ messages in thread
From: Marc Zyngier @ 2018-05-08 7:39 UTC (permalink / raw)
To: John Crispin, Thomas Gleixner, Jason Cooper; +Cc: linux-kernel, linux-mips
Hi John,
On 07/05/18 14:37, John Crispin wrote:
> The QCA ATH79 MIPS target is being converted to pure OF. Right now the
> platform code will setup the IRQ cascade found on the QCA9556 and newer
> SoCs and uses fixed IRQ numbers for the peripherals attached to the
> cascade. This patch adds a proper driver based on the code previously
> located inside arch/mips/ath79/irq.c.
>
> Signed-off-by: John Crispin <john@phrozen.org>
> ---
> drivers/irqchip/Makefile | 1 +
> drivers/irqchip/irq-ath79-intc.c | 108 +++++++++++++++++++++++++++++++++++++++
> 2 files changed, 109 insertions(+)
> create mode 100644 drivers/irqchip/irq-ath79-intc.c
>
> diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
> index d27e3e3619e0..f63c94a92e25 100644
> --- a/drivers/irqchip/Makefile
> +++ b/drivers/irqchip/Makefile
> @@ -3,6 +3,7 @@ obj-$(CONFIG_IRQCHIP) += irqchip.o
>
> obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o
> obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
> +obj-$(CONFIG_ATH79) += irq-ath79-intc.o
> obj-$(CONFIG_ATH79) += irq-ath79-misc.o
> obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
> obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
> diff --git a/drivers/irqchip/irq-ath79-intc.c b/drivers/irqchip/irq-ath79-intc.c
> new file mode 100644
> index 000000000000..ba15b1ac98b3
> --- /dev/null
> +++ b/drivers/irqchip/irq-ath79-intc.c
> @@ -0,0 +1,108 @@
> +/*
> + * Atheros QCA955X specific interrupt cascade handling
> + *
> + * Copyright (C) 2018 John Crispin <john@phrozen.org>
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published
> + * by the Free Software Foundation.
> + */
> +
> +#include <linux/interrupt.h>
> +#include <linux/irqchip.h>
> +#include <linux/of.h>
> +#include <linux/of_irq.h>
> +#include <linux/irqdomain.h>
> +
> +#include <asm/irq_cpu.h>
> +#include <asm/mach-ath79/ath79.h>
> +#include <asm/mach-ath79/ar71xx_regs.h>
> +
> +#define ATH79_MAX_INTC_CASCADE 3
Why 3? Is that a property of the HW? Or could it be inferred from the DT?
> +
> +struct ath79_intc {
> + struct irq_chip chip;
> + u32 irq;
> + u32 pending_mask;
> + u32 irq_mask[ATH79_MAX_INTC_CASCADE];
> +};
> +
> +static void ath79_intc_irq_handler(struct irq_desc *desc)
> +{
> + struct irq_domain *domain = irq_desc_get_handler_data(desc);
> + struct ath79_intc *intc = domain->host_data;
> + u32 pending;
> +
> + pending = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
> + pending &= intc->pending_mask;
Isn't this "pending_mask" more of an "enabled"?
> +
> + if (pending) {
> + int i;
> +
> + for (i = 0; i < domain->hwirq_max; i++)
Don't. This is an implementation detail of the irq domain, and you're
not supposed to access that field.
> + if (pending & intc->irq_mask[i])
What are you trying to do here? Can't you directly infer the pending
interrupt from the pending field?
> + generic_handle_irq(irq_find_mapping(domain, i));
> + } else {
> + spurious_interrupt();
> + }
Missing chained_irq_enter/exit calls.
> +}
> +
> +static void ath79_intc_irq_unmask(struct irq_data *d)
> +{
> +}
> +
> +static void ath79_intc_irq_mask(struct irq_data *d)
> +{
> +}
So you cannot mask or unmask an interrupt? What is this thing? An OR gate?
> +
> +static int ath79_intc_map(struct irq_domain *d, unsigned int irq,
> + irq_hw_number_t hw)
> +{
> + struct ath79_intc *intc = d->host_data;
> +
> + irq_set_chip_and_handler(irq, &intc->chip, handle_level_irq);
> +
> + return 0;
> +}
> +
> +static const struct irq_domain_ops ath79_irq_domain_ops = {
> + .xlate = irq_domain_xlate_onecell,
> + .map = ath79_intc_map,
> +};
> +
> +static int __init qca9556_intc_of_init(
> + struct device_node *node, struct device_node *parent)
> +{
> + struct irq_domain *domain;
> + struct ath79_intc *intc;
> + int cnt, i;
> +
> + cnt = of_property_count_u32_elems(node, "qcom,pending-bits");
Where is this binding documented? What does "pending_bits" even mean if
it is statically defined?
> + if (cnt > ATH79_MAX_INTC_CASCADE)
> + panic("Too many INTC pending bits\n");
> +
> + intc = kzalloc(sizeof(*intc), GFP_KERNEL);
> + if (!intc)
> + panic("Failed to allocate INTC memory\n");
> + intc->chip.name = "INTC";
> + intc->chip.irq_unmask = ath79_intc_irq_unmask,
> + intc->chip.irq_mask = ath79_intc_irq_mask,
> +
> + of_property_read_u32_array(node, "qcom,pending-bits", intc->irq_mask,
> + cnt);
> + for (i = 0; i < cnt; i++)
> + intc->pending_mask |= intc->irq_mask[i];
> +
> + intc->irq = irq_of_parse_and_map(node, 0);
> + if (!intc->irq)
> + panic("Failed to get INTC IRQ");
Do you really need the panics in this function? That seem a bit of a
harsh treatment for something that is not necessarily fatal.
> +
> + domain = irq_domain_add_linear(node, cnt, &ath79_irq_domain_ops,
> + intc);
> + irq_set_chained_handler_and_data(intc->irq, ath79_intc_irq_handler,
> + domain);
> +
> + return 0;
> +}
> +IRQCHIP_DECLARE(qca9556_intc, "qcom,qca9556-intc",
> + qca9556_intc_of_init);
>
Thanks,
M.
--
Jazz is not dead. It just smells funny...
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2018-05-08 7:39 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-07 13:37 [PATCH] irqchip/irq-ath79-intc: add irq cascade driver for QCA9556 SoCs John Crispin
2018-05-08 7:39 ` Marc Zyngier
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).