LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH V2] virtio-mmio: harden interrupt
@ 2021-11-26  4:41 Jason Wang
  2021-12-08 20:27 ` Michael S. Tsirkin
  0 siblings, 1 reply; 5+ messages in thread
From: Jason Wang @ 2021-11-26  4:41 UTC (permalink / raw)
  To: mst, jasowang, virtualization, linux-kernel

This patch tries to make sure the virtio interrupt handler for MMIO
won't be called after a reset and before virtio_device_ready(). We
can't use IRQF_NO_AUTOEN since we're using shared interrupt
(IRQF_SHARED). So this patch tracks the interrupt enabling status in a
new intr_soft_enabled variable and toggle it during in
vm_disable/enable_interrupts(). The MMIO interrupt handler will check
intr_soft_enabled before processing the actual interrupt.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
Changes since V1:
- Silent compling warnings
 drivers/virtio/virtio_mmio.c | 37 ++++++++++++++++++++++++++++++++++++
 1 file changed, 37 insertions(+)

diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 56128b9c46eb..c517afdd2cc5 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -90,6 +90,7 @@ struct virtio_mmio_device {
 	/* a list of queues so we can dispatch IRQs */
 	spinlock_t lock;
 	struct list_head virtqueues;
+	bool intr_soft_enabled;
 };
 
 struct virtio_mmio_vq_info {
@@ -100,7 +101,37 @@ struct virtio_mmio_vq_info {
 	struct list_head node;
 };
 
+/* disable irq handlers */
+static void vm_disable_cbs(struct virtio_device *vdev)
+{
+	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+	int irq = platform_get_irq(vm_dev->pdev, 0);
 
+	/*
+	 * The below synchronize() guarantees that any
+	 * interrupt for this line arriving after
+	 * synchronize_irq() has completed is guaranteed to see
+	 * intx_soft_enabled == false.
+	 */
+	WRITE_ONCE(vm_dev->intr_soft_enabled, false);
+	synchronize_irq(irq);
+}
+
+/* enable irq handlers */
+static void vm_enable_cbs(struct virtio_device *vdev)
+{
+	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+	int irq = platform_get_irq(vm_dev->pdev, 0);
+
+	disable_irq(irq);
+	/*
+	 * The above disable_irq() provides TSO ordering and
+	 * as such promotes the below store to store-release.
+	 */
+	WRITE_ONCE(vm_dev->intr_soft_enabled, true);
+	enable_irq(irq);
+	return;
+}
 
 /* Configuration interface */
 
@@ -262,6 +293,8 @@ static void vm_reset(struct virtio_device *vdev)
 
 	/* 0 status means a reset. */
 	writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
+	/* Disable VQ/configuration callbacks. */
+	vm_disable_cbs(vdev);
 }
 
 
@@ -288,6 +321,9 @@ static irqreturn_t vm_interrupt(int irq, void *opaque)
 	unsigned long flags;
 	irqreturn_t ret = IRQ_NONE;
 
+	if (!READ_ONCE(vm_dev->intr_soft_enabled))
+		return IRQ_NONE;
+
 	/* Read and acknowledge interrupts */
 	status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
 	writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
@@ -529,6 +565,7 @@ static bool vm_get_shm_region(struct virtio_device *vdev,
 }
 
 static const struct virtio_config_ops virtio_mmio_config_ops = {
+	.enable_cbs     = vm_enable_cbs,
 	.get		= vm_get,
 	.set		= vm_set,
 	.generation	= vm_generation,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH V2] virtio-mmio: harden interrupt
  2021-11-26  4:41 [PATCH V2] virtio-mmio: harden interrupt Jason Wang
@ 2021-12-08 20:27 ` Michael S. Tsirkin
  2021-12-09  2:06   ` Jason Wang
  0 siblings, 1 reply; 5+ messages in thread
From: Michael S. Tsirkin @ 2021-12-08 20:27 UTC (permalink / raw)
  To: Jason Wang; +Cc: virtualization, linux-kernel

On Fri, Nov 26, 2021 at 12:41:02PM +0800, Jason Wang wrote:
> This patch tries to make sure the virtio interrupt handler for MMIO
> won't be called after a reset and before virtio_device_ready(). We
> can't use IRQF_NO_AUTOEN since we're using shared interrupt
> (IRQF_SHARED). So this patch tracks the interrupt enabling status in a
> new intr_soft_enabled variable and toggle it during in
> vm_disable/enable_interrupts(). The MMIO interrupt handler will check
> intr_soft_enabled before processing the actual interrupt.
> 
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> Changes since V1:
> - Silent compling warnings
>  drivers/virtio/virtio_mmio.c | 37 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 37 insertions(+)
> 
> diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
> index 56128b9c46eb..c517afdd2cc5 100644
> --- a/drivers/virtio/virtio_mmio.c
> +++ b/drivers/virtio/virtio_mmio.c
> @@ -90,6 +90,7 @@ struct virtio_mmio_device {
>  	/* a list of queues so we can dispatch IRQs */
>  	spinlock_t lock;
>  	struct list_head virtqueues;
> +	bool intr_soft_enabled;
>  };
>  
>  struct virtio_mmio_vq_info {
> @@ -100,7 +101,37 @@ struct virtio_mmio_vq_info {
>  	struct list_head node;
>  };
>  
> +/* disable irq handlers */
> +static void vm_disable_cbs(struct virtio_device *vdev)
> +{
> +	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
> +	int irq = platform_get_irq(vm_dev->pdev, 0);
>  
> +	/*
> +	 * The below synchronize() guarantees that any
> +	 * interrupt for this line arriving after
> +	 * synchronize_irq() has completed is guaranteed to see
> +	 * intx_soft_enabled == false.
> +	 */
> +	WRITE_ONCE(vm_dev->intr_soft_enabled, false);
> +	synchronize_irq(irq);
> +}
> +
> +/* enable irq handlers */
> +static void vm_enable_cbs(struct virtio_device *vdev)
> +{
> +	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
> +	int irq = platform_get_irq(vm_dev->pdev, 0);
> +
> +	disable_irq(irq);
> +	/*
> +	 * The above disable_irq() provides TSO ordering and
> +	 * as such promotes the below store to store-release.
> +	 */
> +	WRITE_ONCE(vm_dev->intr_soft_enabled, true);
> +	enable_irq(irq);
> +	return;
> +}
>  
>  /* Configuration interface */
>  
> @@ -262,6 +293,8 @@ static void vm_reset(struct virtio_device *vdev)
>  
>  	/* 0 status means a reset. */
>  	writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);

There was a discussion about reading status to make sure it is clear.
The spec says we should, this can't hurt as a further hardening measure.
In fact, let's do it in the core maybe? Spec says it applies to all
devices ...

> +	/* Disable VQ/configuration callbacks. */
> +	vm_disable_cbs(vdev);
>  }
>  
>  
> @@ -288,6 +321,9 @@ static irqreturn_t vm_interrupt(int irq, void *opaque)
>  	unsigned long flags;
>  	irqreturn_t ret = IRQ_NONE;
>  
> +	if (!READ_ONCE(vm_dev->intr_soft_enabled))
> +		return IRQ_NONE;
> +

So if the write is seen before reset happened (should not happen, but we
are talking a buggy device) then it won't be acknowledged and device
will keep pulling the interrupt. I think as long as we are hardening
this, let's go the full mile and try to avoid DoS if we can, do the
check before invoking the callback, but do not skip the read.
Whether to still return IRQ_NONE is a good question.




>  	/* Read and acknowledge interrupts */
>  	status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
>  	writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
> @@ -529,6 +565,7 @@ static bool vm_get_shm_region(struct virtio_device *vdev,
>  }
>  
>  static const struct virtio_config_ops virtio_mmio_config_ops = {
> +	.enable_cbs     = vm_enable_cbs,
>  	.get		= vm_get,
>  	.set		= vm_set,
>  	.generation	= vm_generation,
> -- 
> 2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH V2] virtio-mmio: harden interrupt
  2021-12-08 20:27 ` Michael S. Tsirkin
@ 2021-12-09  2:06   ` Jason Wang
  2021-12-09  6:55     ` Michael S. Tsirkin
  0 siblings, 1 reply; 5+ messages in thread
From: Jason Wang @ 2021-12-09  2:06 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: virtualization, linux-kernel

On Thu, Dec 9, 2021 at 4:27 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Fri, Nov 26, 2021 at 12:41:02PM +0800, Jason Wang wrote:
> > This patch tries to make sure the virtio interrupt handler for MMIO
> > won't be called after a reset and before virtio_device_ready(). We
> > can't use IRQF_NO_AUTOEN since we're using shared interrupt
> > (IRQF_SHARED). So this patch tracks the interrupt enabling status in a
> > new intr_soft_enabled variable and toggle it during in
> > vm_disable/enable_interrupts(). The MMIO interrupt handler will check
> > intr_soft_enabled before processing the actual interrupt.
> >
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > ---
> > Changes since V1:
> > - Silent compling warnings
> >  drivers/virtio/virtio_mmio.c | 37 ++++++++++++++++++++++++++++++++++++
> >  1 file changed, 37 insertions(+)
> >
> > diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
> > index 56128b9c46eb..c517afdd2cc5 100644
> > --- a/drivers/virtio/virtio_mmio.c
> > +++ b/drivers/virtio/virtio_mmio.c
> > @@ -90,6 +90,7 @@ struct virtio_mmio_device {
> >       /* a list of queues so we can dispatch IRQs */
> >       spinlock_t lock;
> >       struct list_head virtqueues;
> > +     bool intr_soft_enabled;
> >  };
> >
> >  struct virtio_mmio_vq_info {
> > @@ -100,7 +101,37 @@ struct virtio_mmio_vq_info {
> >       struct list_head node;
> >  };
> >
> > +/* disable irq handlers */
> > +static void vm_disable_cbs(struct virtio_device *vdev)
> > +{
> > +     struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
> > +     int irq = platform_get_irq(vm_dev->pdev, 0);
> >
> > +     /*
> > +      * The below synchronize() guarantees that any
> > +      * interrupt for this line arriving after
> > +      * synchronize_irq() has completed is guaranteed to see
> > +      * intx_soft_enabled == false.
> > +      */
> > +     WRITE_ONCE(vm_dev->intr_soft_enabled, false);
> > +     synchronize_irq(irq);
> > +}
> > +
> > +/* enable irq handlers */
> > +static void vm_enable_cbs(struct virtio_device *vdev)
> > +{
> > +     struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
> > +     int irq = platform_get_irq(vm_dev->pdev, 0);
> > +
> > +     disable_irq(irq);
> > +     /*
> > +      * The above disable_irq() provides TSO ordering and
> > +      * as such promotes the below store to store-release.
> > +      */
> > +     WRITE_ONCE(vm_dev->intr_soft_enabled, true);
> > +     enable_irq(irq);
> > +     return;
> > +}
> >
> >  /* Configuration interface */
> >
> > @@ -262,6 +293,8 @@ static void vm_reset(struct virtio_device *vdev)
> >
> >       /* 0 status means a reset. */
> >       writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
>
> There was a discussion about reading status to make sure it is clear.
> The spec says we should, this can't hurt as a further hardening measure.
> In fact, let's do it in the core maybe? Spec says it applies to all
> devices ...

We can do that, but I'm not sure if we break some existing device.

>
> > +     /* Disable VQ/configuration callbacks. */
> > +     vm_disable_cbs(vdev);
> >  }
> >
> >
> > @@ -288,6 +321,9 @@ static irqreturn_t vm_interrupt(int irq, void *opaque)
> >       unsigned long flags;
> >       irqreturn_t ret = IRQ_NONE;
> >
> > +     if (!READ_ONCE(vm_dev->intr_soft_enabled))
> > +             return IRQ_NONE;
> > +
>
> So if the write is seen before reset happened (should not happen, but we
> are talking a buggy device) then it won't be acknowledged and device
> will keep pulling the interrupt. I think as long as we are hardening
> this, let's go the full mile and try to avoid DoS if we can, do the
> check before invoking the callback, but do not skip the read.
> Whether to still return IRQ_NONE is a good question.

Did you mean something like this:

        /* Read and acknowledge interrupts */
        status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
        writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);

        if (status)
                ret = IRQ_HANDLED;

       if (!READ_ONCE(vm_dev->intr_soft_enabled))
               return ret;

Thanks

>
>
>
>
> >       /* Read and acknowledge interrupts */
> >       status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
> >       writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
> > @@ -529,6 +565,7 @@ static bool vm_get_shm_region(struct virtio_device *vdev,
> >  }
> >
> >  static const struct virtio_config_ops virtio_mmio_config_ops = {
> > +     .enable_cbs     = vm_enable_cbs,
> >       .get            = vm_get,
> >       .set            = vm_set,
> >       .generation     = vm_generation,
> > --
> > 2.25.1
>


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH V2] virtio-mmio: harden interrupt
  2021-12-09  2:06   ` Jason Wang
@ 2021-12-09  6:55     ` Michael S. Tsirkin
  2021-12-09  7:57       ` Jason Wang
  0 siblings, 1 reply; 5+ messages in thread
From: Michael S. Tsirkin @ 2021-12-09  6:55 UTC (permalink / raw)
  To: Jason Wang; +Cc: virtualization, linux-kernel

On Thu, Dec 09, 2021 at 10:06:34AM +0800, Jason Wang wrote:
> On Thu, Dec 9, 2021 at 4:27 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Fri, Nov 26, 2021 at 12:41:02PM +0800, Jason Wang wrote:
> > > This patch tries to make sure the virtio interrupt handler for MMIO
> > > won't be called after a reset and before virtio_device_ready(). We
> > > can't use IRQF_NO_AUTOEN since we're using shared interrupt
> > > (IRQF_SHARED). So this patch tracks the interrupt enabling status in a
> > > new intr_soft_enabled variable and toggle it during in
> > > vm_disable/enable_interrupts(). The MMIO interrupt handler will check
> > > intr_soft_enabled before processing the actual interrupt.
> > >
> > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > ---
> > > Changes since V1:
> > > - Silent compling warnings
> > >  drivers/virtio/virtio_mmio.c | 37 ++++++++++++++++++++++++++++++++++++
> > >  1 file changed, 37 insertions(+)
> > >
> > > diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
> > > index 56128b9c46eb..c517afdd2cc5 100644
> > > --- a/drivers/virtio/virtio_mmio.c
> > > +++ b/drivers/virtio/virtio_mmio.c
> > > @@ -90,6 +90,7 @@ struct virtio_mmio_device {
> > >       /* a list of queues so we can dispatch IRQs */
> > >       spinlock_t lock;
> > >       struct list_head virtqueues;
> > > +     bool intr_soft_enabled;
> > >  };
> > >
> > >  struct virtio_mmio_vq_info {
> > > @@ -100,7 +101,37 @@ struct virtio_mmio_vq_info {
> > >       struct list_head node;
> > >  };
> > >
> > > +/* disable irq handlers */
> > > +static void vm_disable_cbs(struct virtio_device *vdev)
> > > +{
> > > +     struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
> > > +     int irq = platform_get_irq(vm_dev->pdev, 0);
> > >
> > > +     /*
> > > +      * The below synchronize() guarantees that any
> > > +      * interrupt for this line arriving after
> > > +      * synchronize_irq() has completed is guaranteed to see
> > > +      * intx_soft_enabled == false.
> > > +      */
> > > +     WRITE_ONCE(vm_dev->intr_soft_enabled, false);
> > > +     synchronize_irq(irq);
> > > +}
> > > +
> > > +/* enable irq handlers */
> > > +static void vm_enable_cbs(struct virtio_device *vdev)
> > > +{
> > > +     struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
> > > +     int irq = platform_get_irq(vm_dev->pdev, 0);
> > > +
> > > +     disable_irq(irq);
> > > +     /*
> > > +      * The above disable_irq() provides TSO ordering and
> > > +      * as such promotes the below store to store-release.
> > > +      */
> > > +     WRITE_ONCE(vm_dev->intr_soft_enabled, true);
> > > +     enable_irq(irq);
> > > +     return;
> > > +}
> > >
> > >  /* Configuration interface */
> > >
> > > @@ -262,6 +293,8 @@ static void vm_reset(struct virtio_device *vdev)
> > >
> > >       /* 0 status means a reset. */
> > >       writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
> >
> > There was a discussion about reading status to make sure it is clear.
> > The spec says we should, this can't hurt as a further hardening measure.
> > In fact, let's do it in the core maybe? Spec says it applies to all
> > devices ...
> 
> We can do that, but I'm not sure if we break some existing device.

Hmm. Have anything specific in mind?

> >
> > > +     /* Disable VQ/configuration callbacks. */
> > > +     vm_disable_cbs(vdev);
> > >  }
> > >
> > >
> > > @@ -288,6 +321,9 @@ static irqreturn_t vm_interrupt(int irq, void *opaque)
> > >       unsigned long flags;
> > >       irqreturn_t ret = IRQ_NONE;
> > >
> > > +     if (!READ_ONCE(vm_dev->intr_soft_enabled))
> > > +             return IRQ_NONE;
> > > +
> >
> > So if the write is seen before reset happened (should not happen, but we
> > are talking a buggy device) then it won't be acknowledged and device
> > will keep pulling the interrupt. I think as long as we are hardening
> > this, let's go the full mile and try to avoid DoS if we can, do the
> > check before invoking the callback, but do not skip the read.
> > Whether to still return IRQ_NONE is a good question.
> 
> Did you mean something like this:
> 
>         /* Read and acknowledge interrupts */
>         status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
>         writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
> 
>         if (status)
>                 ret = IRQ_HANDLED;
> 
>        if (!READ_ONCE(vm_dev->intr_soft_enabled))
>                return ret;
> 
> Thanks

Maybe. Or is

        if (!READ_ONCE(vm_dev->intr_soft_enabled))
                return IRQ_NONE;

better here?


> >
> >
> >
> >
> > >       /* Read and acknowledge interrupts */
> > >       status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
> > >       writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
> > > @@ -529,6 +565,7 @@ static bool vm_get_shm_region(struct virtio_device *vdev,
> > >  }
> > >
> > >  static const struct virtio_config_ops virtio_mmio_config_ops = {
> > > +     .enable_cbs     = vm_enable_cbs,
> > >       .get            = vm_get,
> > >       .set            = vm_set,
> > >       .generation     = vm_generation,
> > > --
> > > 2.25.1
> >


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH V2] virtio-mmio: harden interrupt
  2021-12-09  6:55     ` Michael S. Tsirkin
@ 2021-12-09  7:57       ` Jason Wang
  0 siblings, 0 replies; 5+ messages in thread
From: Jason Wang @ 2021-12-09  7:57 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: virtualization, linux-kernel

On Thu, Dec 9, 2021 at 2:55 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Thu, Dec 09, 2021 at 10:06:34AM +0800, Jason Wang wrote:
> > On Thu, Dec 9, 2021 at 4:27 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Fri, Nov 26, 2021 at 12:41:02PM +0800, Jason Wang wrote:
> > > > This patch tries to make sure the virtio interrupt handler for MMIO
> > > > won't be called after a reset and before virtio_device_ready(). We
> > > > can't use IRQF_NO_AUTOEN since we're using shared interrupt
> > > > (IRQF_SHARED). So this patch tracks the interrupt enabling status in a
> > > > new intr_soft_enabled variable and toggle it during in
> > > > vm_disable/enable_interrupts(). The MMIO interrupt handler will check
> > > > intr_soft_enabled before processing the actual interrupt.
> > > >
> > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > ---
> > > > Changes since V1:
> > > > - Silent compling warnings
> > > >  drivers/virtio/virtio_mmio.c | 37 ++++++++++++++++++++++++++++++++++++
> > > >  1 file changed, 37 insertions(+)
> > > >
> > > > diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
> > > > index 56128b9c46eb..c517afdd2cc5 100644
> > > > --- a/drivers/virtio/virtio_mmio.c
> > > > +++ b/drivers/virtio/virtio_mmio.c
> > > > @@ -90,6 +90,7 @@ struct virtio_mmio_device {
> > > >       /* a list of queues so we can dispatch IRQs */
> > > >       spinlock_t lock;
> > > >       struct list_head virtqueues;
> > > > +     bool intr_soft_enabled;
> > > >  };
> > > >
> > > >  struct virtio_mmio_vq_info {
> > > > @@ -100,7 +101,37 @@ struct virtio_mmio_vq_info {
> > > >       struct list_head node;
> > > >  };
> > > >
> > > > +/* disable irq handlers */
> > > > +static void vm_disable_cbs(struct virtio_device *vdev)
> > > > +{
> > > > +     struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
> > > > +     int irq = platform_get_irq(vm_dev->pdev, 0);
> > > >
> > > > +     /*
> > > > +      * The below synchronize() guarantees that any
> > > > +      * interrupt for this line arriving after
> > > > +      * synchronize_irq() has completed is guaranteed to see
> > > > +      * intx_soft_enabled == false.
> > > > +      */
> > > > +     WRITE_ONCE(vm_dev->intr_soft_enabled, false);
> > > > +     synchronize_irq(irq);
> > > > +}
> > > > +
> > > > +/* enable irq handlers */
> > > > +static void vm_enable_cbs(struct virtio_device *vdev)
> > > > +{
> > > > +     struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
> > > > +     int irq = platform_get_irq(vm_dev->pdev, 0);
> > > > +
> > > > +     disable_irq(irq);
> > > > +     /*
> > > > +      * The above disable_irq() provides TSO ordering and
> > > > +      * as such promotes the below store to store-release.
> > > > +      */
> > > > +     WRITE_ONCE(vm_dev->intr_soft_enabled, true);
> > > > +     enable_irq(irq);
> > > > +     return;
> > > > +}
> > > >
> > > >  /* Configuration interface */
> > > >
> > > > @@ -262,6 +293,8 @@ static void vm_reset(struct virtio_device *vdev)
> > > >
> > > >       /* 0 status means a reset. */
> > > >       writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
> > >
> > > There was a discussion about reading status to make sure it is clear.
> > > The spec says we should, this can't hurt as a further hardening measure.
> > > In fact, let's do it in the core maybe? Spec says it applies to all
> > > devices ...
> >
> > We can do that, but I'm not sure if we break some existing device.
>
> Hmm. Have anything specific in mind?

No, I can send a patch to do that.

>
> > >
> > > > +     /* Disable VQ/configuration callbacks. */
> > > > +     vm_disable_cbs(vdev);
> > > >  }
> > > >
> > > >
> > > > @@ -288,6 +321,9 @@ static irqreturn_t vm_interrupt(int irq, void *opaque)
> > > >       unsigned long flags;
> > > >       irqreturn_t ret = IRQ_NONE;
> > > >
> > > > +     if (!READ_ONCE(vm_dev->intr_soft_enabled))
> > > > +             return IRQ_NONE;
> > > > +
> > >
> > > So if the write is seen before reset happened (should not happen, but we
> > > are talking a buggy device) then it won't be acknowledged and device
> > > will keep pulling the interrupt. I think as long as we are hardening
> > > this, let's go the full mile and try to avoid DoS if we can, do the
> > > check before invoking the callback, but do not skip the read.
> > > Whether to still return IRQ_NONE is a good question.
> >
> > Did you mean something like this:
> >
> >         /* Read and acknowledge interrupts */
> >         status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
> >         writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
> >
> >         if (status)
> >                 ret = IRQ_HANDLED;
> >
> >        if (!READ_ONCE(vm_dev->intr_soft_enabled))
> >                return ret;
> >
> > Thanks
>
> Maybe. Or is
>
>         if (!READ_ONCE(vm_dev->intr_soft_enabled))
>                 return IRQ_NONE;
>
> better here?

Yes, I just paste part of the code, the ret is initialized to IRQ_NONE.

Thanks

>
>
> > >
> > >
> > >
> > >
> > > >       /* Read and acknowledge interrupts */
> > > >       status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
> > > >       writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
> > > > @@ -529,6 +565,7 @@ static bool vm_get_shm_region(struct virtio_device *vdev,
> > > >  }
> > > >
> > > >  static const struct virtio_config_ops virtio_mmio_config_ops = {
> > > > +     .enable_cbs     = vm_enable_cbs,
> > > >       .get            = vm_get,
> > > >       .set            = vm_set,
> > > >       .generation     = vm_generation,
> > > > --
> > > > 2.25.1
> > >
>


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-12-09  7:57 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-26  4:41 [PATCH V2] virtio-mmio: harden interrupt Jason Wang
2021-12-08 20:27 ` Michael S. Tsirkin
2021-12-09  2:06   ` Jason Wang
2021-12-09  6:55     ` Michael S. Tsirkin
2021-12-09  7:57       ` Jason Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).