diff mbox

[net-next] crypto: algif - explicitly mark end of data

Message ID 20150330212543.7133.97653.stgit@tstruk-mobl1
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

tadeusz.struk@intel.com March 30, 2015, 9:25 p.m. UTC
After the TX sgl is expanded we need to explicitly mark end of data
at the last buffer that contains data.

Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
---
 crypto/algif_skcipher.c |   13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)


--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

David Miller April 1, 2015, 7:32 p.m. UTC | #1
From: Tadeusz Struk <tadeusz.struk@intel.com>
Date: Mon, 30 Mar 2015 14:25:43 -0700

> +	int mark = 0, err = -ENOMEM;
>  
>  	lock_sock(sk);
>  	req = kmalloc(reqlen, GFP_KERNEL);
 ...
> @@ -573,10 +572,11 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
>  			kfree(sreq->tsg);
>  			sreq->tsg = tmp;
>  			tx_nents *= 2;
> +			mark = 1;
 ...
> @@ -604,6 +604,9 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
>  		iov_iter_advance(&msg->msg_iter, used);
>  	}
>  
> +	if (mark)
> +		sg_mark_end(sreq->tsg + txbufs - 1);

Please use type 'bool' and true/false for 'mark'.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 8276f21..9492dd5 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -509,11 +509,10 @@  static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
 	struct skcipher_async_req *sreq;
 	struct ablkcipher_request *req;
 	struct skcipher_async_rsgl *last_rsgl = NULL;
-	unsigned int len = 0, tx_nents = skcipher_all_sg_nents(ctx);
+	unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
 	unsigned int reqlen = sizeof(struct skcipher_async_req) +
 				GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
-	int i = 0;
-	int err = -ENOMEM;
+	int mark = 0, err = -ENOMEM;
 
 	lock_sock(sk);
 	req = kmalloc(reqlen, GFP_KERNEL);
@@ -555,7 +554,7 @@  static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
 			     iov_iter_count(&msg->msg_iter));
 		used = min_t(unsigned long, used, sg->length);
 
-		if (i == tx_nents) {
+		if (txbufs == tx_nents) {
 			struct scatterlist *tmp;
 			int x;
 			/* Ran out of tx slots in async request
@@ -573,10 +572,11 @@  static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
 			kfree(sreq->tsg);
 			sreq->tsg = tmp;
 			tx_nents *= 2;
+			mark = 1;
 		}
 		/* Need to take over the tx sgl from ctx
 		 * to the asynch req - these sgls will be freed later */
-		sg_set_page(sreq->tsg + i++, sg_page(sg), sg->length,
+		sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
 			    sg->offset);
 
 		if (list_empty(&sreq->list)) {
@@ -604,6 +604,9 @@  static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
 		iov_iter_advance(&msg->msg_iter, used);
 	}
 
+	if (mark)
+		sg_mark_end(sreq->tsg + txbufs - 1);
+
 	ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
 				     len, sreq->iv);
 	err = ctx->enc ? crypto_ablkcipher_encrypt(req) :