diff mbox series

[SRU,Bionic,1/1] s390/qeth: don't clobber buffer on async TX completion

Message ID db2a22e43c2812e798505eb8edef6edc524c78c7.1533829928.git.joseph.salisbury@canonical.com
State New
Headers show
Series s390/qeth: don't clobber buffer on async TX completion | expand

Commit Message

Joseph Salisbury Aug. 14, 2018, 7:12 p.m. UTC
From: Julian Wiedmann <jwi@linux.ibm.com>

BugLink: https://bugs.launchpad.net/bugs/1786057

If qeth_qdio_output_handler() detects that a transmit requires async
completion, it replaces the pending buffer's metadata object
(qeth_qdio_out_buffer) so that this queue buffer can be re-used while
the data is pending completion.

Later when the CQ indicates async completion of such a metadata object,
qeth_qdio_cq_handler() tries to free any data associated with this
object (since HW has now completed the transfer). By calling
qeth_clear_output_buffer(), it erronously operates on the queue buffer
that _previously_ belonged to this transfer ... but which has been
potentially re-used several times by now.
This results in double-free's of the buffer's data, and failing
transmits as the buffer descriptor is scrubbed in mid-air.

The correct way of handling this situation is to
1. scrub the queue buffer when it is prepared for re-use, and
2. later obtain the data addresses from the async-completion notifier
   (ie. the AOB), instead of the queue buffer.

All this only affects qeth devices used for af_iucv HiperTransport.

Fixes: 0da9581ddb0f ("qeth: exploit asynchronous delivery of storage blocks")
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit ce28867fd20c23cd769e78b4d619c4755bf71a1c)
Signed-off-by: Joseph Salisbury <joseph.salisbury@canonical.com>
---
 drivers/s390/net/qeth_core.h      | 11 +++++++++++
 drivers/s390/net/qeth_core_main.c | 22 ++++++++++++++++------
 2 files changed, 27 insertions(+), 6 deletions(-)

Comments

Kleber Sacilotto de Souza Aug. 21, 2018, 2:08 p.m. UTC | #1
On 08/14/18 21:12, Joseph Salisbury wrote:
> From: Julian Wiedmann <jwi@linux.ibm.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1786057
> 
> If qeth_qdio_output_handler() detects that a transmit requires async
> completion, it replaces the pending buffer's metadata object
> (qeth_qdio_out_buffer) so that this queue buffer can be re-used while
> the data is pending completion.
> 
> Later when the CQ indicates async completion of such a metadata object,
> qeth_qdio_cq_handler() tries to free any data associated with this
> object (since HW has now completed the transfer). By calling
> qeth_clear_output_buffer(), it erronously operates on the queue buffer
> that _previously_ belonged to this transfer ... but which has been
> potentially re-used several times by now.
> This results in double-free's of the buffer's data, and failing
> transmits as the buffer descriptor is scrubbed in mid-air.
> 
> The correct way of handling this situation is to
> 1. scrub the queue buffer when it is prepared for re-use, and
> 2. later obtain the data addresses from the async-completion notifier
>    (ie. the AOB), instead of the queue buffer.
> 
> All this only affects qeth devices used for af_iucv HiperTransport.
> 
> Fixes: 0da9581ddb0f ("qeth: exploit asynchronous delivery of storage blocks")
> Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
> Signed-off-by: David S. Miller <davem@davemloft.net>
> (cherry picked from commit ce28867fd20c23cd769e78b4d619c4755bf71a1c)
> Signed-off-by: Joseph Salisbury <joseph.salisbury@canonical.com>

Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>

> ---
>  drivers/s390/net/qeth_core.h      | 11 +++++++++++
>  drivers/s390/net/qeth_core_main.c | 22 ++++++++++++++++------
>  2 files changed, 27 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
> index 0d2f96f..c957ab3 100644
> --- a/drivers/s390/net/qeth_core.h
> +++ b/drivers/s390/net/qeth_core.h
> @@ -829,6 +829,17 @@ struct qeth_trap_id {
>  /*some helper functions*/
>  #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
>  
> +static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
> +					  unsigned int elements)
> +{
> +	unsigned int i;
> +
> +	for (i = 0; i < elements; i++)
> +		memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
> +	buf->element[14].sflags = 0;
> +	buf->element[15].sflags = 0;
> +}
> +
>  /**
>   * qeth_get_elements_for_range() -	find number of SBALEs to cover range.
>   * @start:				Start of the address range.
> diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
> index 307fb5a..7b08ea7 100644
> --- a/drivers/s390/net/qeth_core_main.c
> +++ b/drivers/s390/net/qeth_core_main.c
> @@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
>  		struct qeth_qdio_out_buffer *buf,
>  		enum iucv_tx_notify notification);
>  static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
> -static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
> -		struct qeth_qdio_out_buffer *buf,
> -		enum qeth_qdio_buffer_states newbufstate);
>  static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
>  
>  struct workqueue_struct *qeth_wq;
> @@ -488,6 +485,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
>  	struct qaob *aob;
>  	struct qeth_qdio_out_buffer *buffer;
>  	enum iucv_tx_notify notification;
> +	unsigned int i;
>  
>  	aob = (struct qaob *) phys_to_virt(phys_aob_addr);
>  	QETH_CARD_TEXT(card, 5, "haob");
> @@ -512,10 +510,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
>  	qeth_notify_skbs(buffer->q, buffer, notification);
>  
>  	buffer->aob = NULL;
> -	qeth_clear_output_buffer(buffer->q, buffer,
> -				 QETH_QDIO_BUF_HANDLED_DELAYED);
> +	/* Free dangling allocations. The attached skbs are handled by
> +	 * qeth_cleanup_handled_pending().
> +	 */
> +	for (i = 0;
> +	     i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
> +	     i++) {
> +		if (aob->sba[i] && buffer->is_header[i])
> +			kmem_cache_free(qeth_core_header_cache,
> +					(void *) aob->sba[i]);
> +	}
> +	atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
>  
> -	/* from here on: do not touch buffer anymore */
>  	qdio_release_aob(aob);
>  }
>  
> @@ -3764,6 +3770,10 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
>  			QETH_CARD_TEXT(queue->card, 5, "aob");
>  			QETH_CARD_TEXT_(queue->card, 5, "%lx",
>  					virt_to_phys(buffer->aob));
> +
> +			/* prepare the queue slot for re-use: */
> +			qeth_scrub_qdio_buffer(buffer->buffer,
> +					       QETH_MAX_BUFFER_ELEMENTS(card));
>  			if (qeth_init_qdio_out_buf(queue, bidx)) {
>  				QETH_CARD_TEXT(card, 2, "outofbuf");
>  				qeth_schedule_recovery(card);
>
diff mbox series

Patch

diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 0d2f96f..c957ab3 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -829,6 +829,17 @@  struct qeth_trap_id {
 /*some helper functions*/
 #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
 
+static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
+					  unsigned int elements)
+{
+	unsigned int i;
+
+	for (i = 0; i < elements; i++)
+		memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
+	buf->element[14].sflags = 0;
+	buf->element[15].sflags = 0;
+}
+
 /**
  * qeth_get_elements_for_range() -	find number of SBALEs to cover range.
  * @start:				Start of the address range.
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 307fb5a..7b08ea7 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -73,9 +73,6 @@  static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
 		struct qeth_qdio_out_buffer *buf,
 		enum iucv_tx_notify notification);
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
-		struct qeth_qdio_out_buffer *buf,
-		enum qeth_qdio_buffer_states newbufstate);
 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 struct workqueue_struct *qeth_wq;
@@ -488,6 +485,7 @@  static void qeth_qdio_handle_aob(struct qeth_card *card,
 	struct qaob *aob;
 	struct qeth_qdio_out_buffer *buffer;
 	enum iucv_tx_notify notification;
+	unsigned int i;
 
 	aob = (struct qaob *) phys_to_virt(phys_aob_addr);
 	QETH_CARD_TEXT(card, 5, "haob");
@@ -512,10 +510,18 @@  static void qeth_qdio_handle_aob(struct qeth_card *card,
 	qeth_notify_skbs(buffer->q, buffer, notification);
 
 	buffer->aob = NULL;
-	qeth_clear_output_buffer(buffer->q, buffer,
-				 QETH_QDIO_BUF_HANDLED_DELAYED);
+	/* Free dangling allocations. The attached skbs are handled by
+	 * qeth_cleanup_handled_pending().
+	 */
+	for (i = 0;
+	     i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+	     i++) {
+		if (aob->sba[i] && buffer->is_header[i])
+			kmem_cache_free(qeth_core_header_cache,
+					(void *) aob->sba[i]);
+	}
+	atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
 
-	/* from here on: do not touch buffer anymore */
 	qdio_release_aob(aob);
 }
 
@@ -3764,6 +3770,10 @@  void qeth_qdio_output_handler(struct ccw_device *ccwdev,
 			QETH_CARD_TEXT(queue->card, 5, "aob");
 			QETH_CARD_TEXT_(queue->card, 5, "%lx",
 					virt_to_phys(buffer->aob));
+
+			/* prepare the queue slot for re-use: */
+			qeth_scrub_qdio_buffer(buffer->buffer,
+					       QETH_MAX_BUFFER_ELEMENTS(card));
 			if (qeth_init_qdio_out_buf(queue, bidx)) {
 				QETH_CARD_TEXT(card, 2, "outofbuf");
 				qeth_schedule_recovery(card);