From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753887AbbIGPhb (ORCPT ); Mon, 7 Sep 2015 11:37:31 -0400 Received: from smtp.citrix.com ([66.165.176.89]:24544 "EHLO SMTP.CITRIX.COM" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753199AbbIGPfM (ORCPT ); Mon, 7 Sep 2015 11:35:12 -0400 X-IronPort-AV: E=Sophos;i="5.17,485,1437436800"; d="scan'208";a="298261792" From: Julien Grall To: CC: , , , , "Julien Grall" , =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= , Konrad Rzeszutek Wilk , "Boris Ostrovsky" , David Vrabel Subject: [PATCH v4 06/20] block/xen-blkfront: Split blkif_queue_request in 2 Date: Mon, 7 Sep 2015 16:33:44 +0100 Message-ID: <1441640038-23615-7-git-send-email-julien.grall@citrix.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1441640038-23615-1-git-send-email-julien.grall@citrix.com> References: <1441640038-23615-1-git-send-email-julien.grall@citrix.com> MIME-Version: 1.0 Content-Type: text/plain; charset="UTF-8" Content-Transfer-Encoding: 8bit X-DLP: MIA2 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Currently, blkif_queue_request has 2 distinct execution path: - Send a discard request - Send a read/write request The function is also allocating grants to use for generating the request. Although, this is only used for read/write request. Rather than having a function with 2 distinct execution path, separate the function in 2. This will also remove one level of tabulation. Signed-off-by: Julien Grall Reviewed-by: Roger Pau Monné --- Cc: Konrad Rzeszutek Wilk Cc: Boris Ostrovsky Cc: David Vrabel Roger, if you really want if can drop the else clause in blkif_queue_request, IHMO it's more clear here. Although I've kept your Reviewed-by. Let me know if it's not fine. Changes in v3: - Fix errors reported by checkpatch.pl - Add Roger's Reviewed-by Changes in v2: - Patch added --- drivers/block/xen-blkfront.c | 277 ++++++++++++++++++++++++------------------- 1 file changed, 153 insertions(+), 124 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 432e105..b11f084 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -395,13 +395,35 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, return 0; } -/* - * Generate a Xen blkfront IO request from a blk layer request. Reads - * and writes are handled as expected. - * - * @req: a request struct - */ -static int blkif_queue_request(struct request *req) +static int blkif_queue_discard_req(struct request *req) +{ + struct blkfront_info *info = req->rq_disk->private_data; + struct blkif_request *ring_req; + unsigned long id; + + /* Fill out a communications ring structure. */ + ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); + id = get_id_from_freelist(info); + info->shadow[id].request = req; + + ring_req->operation = BLKIF_OP_DISCARD; + ring_req->u.discard.nr_sectors = blk_rq_sectors(req); + ring_req->u.discard.id = id; + ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req); + if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) + ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; + else + ring_req->u.discard.flag = 0; + + info->ring.req_prod_pvt++; + + /* Keep a private copy so we can reissue requests when recovering. */ + info->shadow[id].req = *ring_req; + + return 0; +} + +static int blkif_queue_rw_req(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; struct blkif_request *ring_req; @@ -421,9 +443,6 @@ static int blkif_queue_request(struct request *req) struct scatterlist *sg; int nseg, max_grefs; - if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) - return 1; - max_grefs = req->nr_phys_segments; if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST) /* @@ -453,139 +472,131 @@ static int blkif_queue_request(struct request *req) id = get_id_from_freelist(info); info->shadow[id].request = req; - if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { - ring_req->operation = BLKIF_OP_DISCARD; - ring_req->u.discard.nr_sectors = blk_rq_sectors(req); - ring_req->u.discard.id = id; - ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req); - if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) - ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; - else - ring_req->u.discard.flag = 0; + BUG_ON(info->max_indirect_segments == 0 && + req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); + BUG_ON(info->max_indirect_segments && + req->nr_phys_segments > info->max_indirect_segments); + nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); + ring_req->u.rw.id = id; + if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { + /* + * The indirect operation can only be a BLKIF_OP_READ or + * BLKIF_OP_WRITE + */ + BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); + ring_req->operation = BLKIF_OP_INDIRECT; + ring_req->u.indirect.indirect_op = rq_data_dir(req) ? + BLKIF_OP_WRITE : BLKIF_OP_READ; + ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); + ring_req->u.indirect.handle = info->handle; + ring_req->u.indirect.nr_segments = nseg; } else { - BUG_ON(info->max_indirect_segments == 0 && - req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); - BUG_ON(info->max_indirect_segments && - req->nr_phys_segments > info->max_indirect_segments); - nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); - ring_req->u.rw.id = id; - if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { + ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); + ring_req->u.rw.handle = info->handle; + ring_req->operation = rq_data_dir(req) ? + BLKIF_OP_WRITE : BLKIF_OP_READ; + if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { /* - * The indirect operation can only be a BLKIF_OP_READ or - * BLKIF_OP_WRITE + * Ideally we can do an unordered flush-to-disk. + * In case the backend onlysupports barriers, use that. + * A barrier request a superset of FUA, so we can + * implement it the same way. (It's also a FLUSH+FUA, + * since it is guaranteed ordered WRT previous writes.) */ - BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); - ring_req->operation = BLKIF_OP_INDIRECT; - ring_req->u.indirect.indirect_op = rq_data_dir(req) ? - BLKIF_OP_WRITE : BLKIF_OP_READ; - ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); - ring_req->u.indirect.handle = info->handle; - ring_req->u.indirect.nr_segments = nseg; - } else { - ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); - ring_req->u.rw.handle = info->handle; - ring_req->operation = rq_data_dir(req) ? - BLKIF_OP_WRITE : BLKIF_OP_READ; - if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { - /* - * Ideally we can do an unordered flush-to-disk. In case the - * backend onlysupports barriers, use that. A barrier request - * a superset of FUA, so we can implement it the same - * way. (It's also a FLUSH+FUA, since it is - * guaranteed ordered WRT previous writes.) - */ - switch (info->feature_flush & - ((REQ_FLUSH|REQ_FUA))) { - case REQ_FLUSH|REQ_FUA: - ring_req->operation = - BLKIF_OP_WRITE_BARRIER; - break; - case REQ_FLUSH: - ring_req->operation = - BLKIF_OP_FLUSH_DISKCACHE; - break; - default: - ring_req->operation = 0; - } + switch (info->feature_flush & + ((REQ_FLUSH|REQ_FUA))) { + case REQ_FLUSH|REQ_FUA: + ring_req->operation = + BLKIF_OP_WRITE_BARRIER; + break; + case REQ_FLUSH: + ring_req->operation = + BLKIF_OP_FLUSH_DISKCACHE; + break; + default: + ring_req->operation = 0; } - ring_req->u.rw.nr_segments = nseg; } - for_each_sg(info->shadow[id].sg, sg, nseg, i) { - fsect = sg->offset >> 9; - lsect = fsect + (sg->length >> 9) - 1; + ring_req->u.rw.nr_segments = nseg; + } + for_each_sg(info->shadow[id].sg, sg, nseg, i) { + fsect = sg->offset >> 9; + lsect = fsect + (sg->length >> 9) - 1; - if ((ring_req->operation == BLKIF_OP_INDIRECT) && - (i % SEGS_PER_INDIRECT_FRAME == 0)) { - unsigned long uninitialized_var(pfn); + if ((ring_req->operation == BLKIF_OP_INDIRECT) && + (i % SEGS_PER_INDIRECT_FRAME == 0)) { + unsigned long uninitialized_var(pfn); - if (segments) - kunmap_atomic(segments); + if (segments) + kunmap_atomic(segments); - n = i / SEGS_PER_INDIRECT_FRAME; - if (!info->feature_persistent) { - struct page *indirect_page; - - /* Fetch a pre-allocated page to use for indirect grefs */ - BUG_ON(list_empty(&info->indirect_pages)); - indirect_page = list_first_entry(&info->indirect_pages, - struct page, lru); - list_del(&indirect_page->lru); - pfn = page_to_pfn(indirect_page); - } - gnt_list_entry = get_grant(&gref_head, pfn, info); - info->shadow[id].indirect_grants[n] = gnt_list_entry; - segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); - ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; + n = i / SEGS_PER_INDIRECT_FRAME; + if (!info->feature_persistent) { + struct page *indirect_page; + + /* + * Fetch a pre-allocated page to use for + * indirect grefs + */ + BUG_ON(list_empty(&info->indirect_pages)); + indirect_page = list_first_entry(&info->indirect_pages, + struct page, lru); + list_del(&indirect_page->lru); + pfn = page_to_pfn(indirect_page); } + gnt_list_entry = get_grant(&gref_head, pfn, info); + info->shadow[id].indirect_grants[n] = gnt_list_entry; + segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); + ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; + } - gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); - ref = gnt_list_entry->gref; + gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); + ref = gnt_list_entry->gref; - info->shadow[id].grants_used[i] = gnt_list_entry; + info->shadow[id].grants_used[i] = gnt_list_entry; - if (rq_data_dir(req) && info->feature_persistent) { - char *bvec_data; - void *shared_data; + if (rq_data_dir(req) && info->feature_persistent) { + char *bvec_data; + void *shared_data; - BUG_ON(sg->offset + sg->length > PAGE_SIZE); + BUG_ON(sg->offset + sg->length > PAGE_SIZE); - shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); - bvec_data = kmap_atomic(sg_page(sg)); + shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); + bvec_data = kmap_atomic(sg_page(sg)); - /* - * this does not wipe data stored outside the - * range sg->offset..sg->offset+sg->length. - * Therefore, blkback *could* see data from - * previous requests. This is OK as long as - * persistent grants are shared with just one - * domain. It may need refactoring if this - * changes - */ - memcpy(shared_data + sg->offset, - bvec_data + sg->offset, - sg->length); + /* + * this does not wipe data stored outside the + * range sg->offset..sg->offset+sg->length. + * Therefore, blkback *could* see data from + * previous requests. This is OK as long as + * persistent grants are shared with just one + * domain. It may need refactoring if this + * changes + */ + memcpy(shared_data + sg->offset, + bvec_data + sg->offset, + sg->length); - kunmap_atomic(bvec_data); - kunmap_atomic(shared_data); - } - if (ring_req->operation != BLKIF_OP_INDIRECT) { - ring_req->u.rw.seg[i] = - (struct blkif_request_segment) { - .gref = ref, - .first_sect = fsect, - .last_sect = lsect }; - } else { - n = i % SEGS_PER_INDIRECT_FRAME; - segments[n] = + kunmap_atomic(bvec_data); + kunmap_atomic(shared_data); + } + if (ring_req->operation != BLKIF_OP_INDIRECT) { + ring_req->u.rw.seg[i] = (struct blkif_request_segment) { - .gref = ref, - .first_sect = fsect, - .last_sect = lsect }; - } + .gref = ref, + .first_sect = fsect, + .last_sect = lsect }; + } else { + n = i % SEGS_PER_INDIRECT_FRAME; + segments[n] = + (struct blkif_request_segment) { + .gref = ref, + .first_sect = fsect, + .last_sect = lsect }; } - if (segments) - kunmap_atomic(segments); } + if (segments) + kunmap_atomic(segments); info->ring.req_prod_pvt++; @@ -598,6 +609,24 @@ static int blkif_queue_request(struct request *req) return 0; } +/* + * Generate a Xen blkfront IO request from a blk layer request. Reads + * and writes are handled as expected. + * + * @req: a request struct + */ +static int blkif_queue_request(struct request *req) +{ + struct blkfront_info *info = req->rq_disk->private_data; + + if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) + return 1; + + if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) + return blkif_queue_discard_req(req); + else + return blkif_queue_rw_req(req); +} static inline void flush_requests(struct blkfront_info *info) { -- 2.1.4 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Julien Grall Subject: [PATCH v4 06/20] block/xen-blkfront: Split blkif_queue_request in 2 Date: Mon, 7 Sep 2015 16:33:44 +0100 Message-ID: <1441640038-23615-7-git-send-email-julien.grall@citrix.com> References: <1441640038-23615-1-git-send-email-julien.grall@citrix.com> Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: base64 Return-path: Received: from mail6.bemta5.messagelabs.com ([195.245.231.135]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1ZYyRu-0000a6-Jj for xen-devel@lists.xenproject.org; Mon, 07 Sep 2015 15:35:14 +0000 In-Reply-To: <1441640038-23615-1-git-send-email-julien.grall@citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xenproject.org Cc: ian.campbell@citrix.com, stefano.stabellini@eu.citrix.com, linux-kernel@vger.kernel.org, Julien Grall , David Vrabel , Boris Ostrovsky , linux-arm-kernel@lists.infradead.org, =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= List-Id: xen-devel@lists.xenproject.org Q3VycmVudGx5LCBibGtpZl9xdWV1ZV9yZXF1ZXN0IGhhcyAyIGRpc3RpbmN0IGV4ZWN1dGlvbiBw YXRoOgogICAgLSBTZW5kIGEgZGlzY2FyZCByZXF1ZXN0CiAgICAtIFNlbmQgYSByZWFkL3dyaXRl IHJlcXVlc3QKClRoZSBmdW5jdGlvbiBpcyBhbHNvIGFsbG9jYXRpbmcgZ3JhbnRzIHRvIHVzZSBm b3IgZ2VuZXJhdGluZyB0aGUKcmVxdWVzdC4gQWx0aG91Z2gsIHRoaXMgaXMgb25seSB1c2VkIGZv ciByZWFkL3dyaXRlIHJlcXVlc3QuCgpSYXRoZXIgdGhhbiBoYXZpbmcgYSBmdW5jdGlvbiB3aXRo IDIgZGlzdGluY3QgZXhlY3V0aW9uIHBhdGgsIHNlcGFyYXRlCnRoZSBmdW5jdGlvbiBpbiAyLiBU aGlzIHdpbGwgYWxzbyByZW1vdmUgb25lIGxldmVsIG9mIHRhYnVsYXRpb24uCgpTaWduZWQtb2Zm LWJ5OiBKdWxpZW4gR3JhbGwgPGp1bGllbi5ncmFsbEBjaXRyaXguY29tPgpSZXZpZXdlZC1ieTog Um9nZXIgUGF1IE1vbm7DqSA8cm9nZXIucGF1QGNpdHJpeC5jb20+CgotLS0KQ2M6IEtvbnJhZCBS emVzenV0ZWsgV2lsayA8a29ucmFkLndpbGtAb3JhY2xlLmNvbT4KQ2M6IEJvcmlzIE9zdHJvdnNr eSA8Ym9yaXMub3N0cm92c2t5QG9yYWNsZS5jb20+CkNjOiBEYXZpZCBWcmFiZWwgPGRhdmlkLnZy YWJlbEBjaXRyaXguY29tPgoKICAgIFJvZ2VyLCBpZiB5b3UgcmVhbGx5IHdhbnQgaWYgY2FuIGRy b3AgdGhlIGVsc2UgY2xhdXNlIGluCiAgICBibGtpZl9xdWV1ZV9yZXF1ZXN0LCBJSE1PIGl0J3Mg bW9yZSBjbGVhciBoZXJlLiBBbHRob3VnaCBJJ3ZlIGtlcHQKICAgIHlvdXIgUmV2aWV3ZWQtYnku IExldCBtZSBrbm93IGlmIGl0J3Mgbm90IGZpbmUuCgogICAgQ2hhbmdlcyBpbiB2MzoKICAgICAg ICAtIEZpeCBlcnJvcnMgcmVwb3J0ZWQgYnkgY2hlY2twYXRjaC5wbAogICAgICAgIC0gQWRkIFJv Z2VyJ3MgUmV2aWV3ZWQtYnkKCiAgICBDaGFuZ2VzIGluIHYyOgogICAgICAgIC0gUGF0Y2ggYWRk ZWQKLS0tCiBkcml2ZXJzL2Jsb2NrL3hlbi1ibGtmcm9udC5jIHwgMjc3ICsrKysrKysrKysrKysr KysrKysrKysrKy0tLS0tLS0tLS0tLS0tLS0tLS0KIDEgZmlsZSBjaGFuZ2VkLCAxNTMgaW5zZXJ0 aW9ucygrKSwgMTI0IGRlbGV0aW9ucygtKQoKZGlmZiAtLWdpdCBhL2RyaXZlcnMvYmxvY2sveGVu LWJsa2Zyb250LmMgYi9kcml2ZXJzL2Jsb2NrL3hlbi1ibGtmcm9udC5jCmluZGV4IDQzMmUxMDUu LmIxMWYwODQgMTAwNjQ0Ci0tLSBhL2RyaXZlcnMvYmxvY2sveGVuLWJsa2Zyb250LmMKKysrIGIv ZHJpdmVycy9ibG9jay94ZW4tYmxrZnJvbnQuYwpAQCAtMzk1LDEzICszOTUsMzUgQEAgc3RhdGlj IGludCBibGtpZl9pb2N0bChzdHJ1Y3QgYmxvY2tfZGV2aWNlICpiZGV2LCBmbW9kZV90IG1vZGUs CiAJcmV0dXJuIDA7CiB9CiAKLS8qCi0gKiBHZW5lcmF0ZSBhIFhlbiBibGtmcm9udCBJTyByZXF1 ZXN0IGZyb20gYSBibGsgbGF5ZXIgcmVxdWVzdC4gIFJlYWRzCi0gKiBhbmQgd3JpdGVzIGFyZSBo YW5kbGVkIGFzIGV4cGVjdGVkLgotICoKLSAqIEByZXE6IGEgcmVxdWVzdCBzdHJ1Y3QKLSAqLwot c3RhdGljIGludCBibGtpZl9xdWV1ZV9yZXF1ZXN0KHN0cnVjdCByZXF1ZXN0ICpyZXEpCitzdGF0 aWMgaW50IGJsa2lmX3F1ZXVlX2Rpc2NhcmRfcmVxKHN0cnVjdCByZXF1ZXN0ICpyZXEpCit7CisJ c3RydWN0IGJsa2Zyb250X2luZm8gKmluZm8gPSByZXEtPnJxX2Rpc2stPnByaXZhdGVfZGF0YTsK KwlzdHJ1Y3QgYmxraWZfcmVxdWVzdCAqcmluZ19yZXE7CisJdW5zaWduZWQgbG9uZyBpZDsKKwor CS8qIEZpbGwgb3V0IGEgY29tbXVuaWNhdGlvbnMgcmluZyBzdHJ1Y3R1cmUuICovCisJcmluZ19y ZXEgPSBSSU5HX0dFVF9SRVFVRVNUKCZpbmZvLT5yaW5nLCBpbmZvLT5yaW5nLnJlcV9wcm9kX3B2 dCk7CisJaWQgPSBnZXRfaWRfZnJvbV9mcmVlbGlzdChpbmZvKTsKKwlpbmZvLT5zaGFkb3dbaWRd LnJlcXVlc3QgPSByZXE7CisKKwlyaW5nX3JlcS0+b3BlcmF0aW9uID0gQkxLSUZfT1BfRElTQ0FS RDsKKwlyaW5nX3JlcS0+dS5kaXNjYXJkLm5yX3NlY3RvcnMgPSBibGtfcnFfc2VjdG9ycyhyZXEp OworCXJpbmdfcmVxLT51LmRpc2NhcmQuaWQgPSBpZDsKKwlyaW5nX3JlcS0+dS5kaXNjYXJkLnNl Y3Rvcl9udW1iZXIgPSAoYmxraWZfc2VjdG9yX3QpYmxrX3JxX3BvcyhyZXEpOworCWlmICgocmVx LT5jbWRfZmxhZ3MgJiBSRVFfU0VDVVJFKSAmJiBpbmZvLT5mZWF0dXJlX3NlY2Rpc2NhcmQpCisJ CXJpbmdfcmVxLT51LmRpc2NhcmQuZmxhZyA9IEJMS0lGX0RJU0NBUkRfU0VDVVJFOworCWVsc2UK KwkJcmluZ19yZXEtPnUuZGlzY2FyZC5mbGFnID0gMDsKKworCWluZm8tPnJpbmcucmVxX3Byb2Rf cHZ0Kys7CisKKwkvKiBLZWVwIGEgcHJpdmF0ZSBjb3B5IHNvIHdlIGNhbiByZWlzc3VlIHJlcXVl c3RzIHdoZW4gcmVjb3ZlcmluZy4gKi8KKwlpbmZvLT5zaGFkb3dbaWRdLnJlcSA9ICpyaW5nX3Jl cTsKKworCXJldHVybiAwOworfQorCitzdGF0aWMgaW50IGJsa2lmX3F1ZXVlX3J3X3JlcShzdHJ1 Y3QgcmVxdWVzdCAqcmVxKQogewogCXN0cnVjdCBibGtmcm9udF9pbmZvICppbmZvID0gcmVxLT5y cV9kaXNrLT5wcml2YXRlX2RhdGE7CiAJc3RydWN0IGJsa2lmX3JlcXVlc3QgKnJpbmdfcmVxOwpA QCAtNDIxLDkgKzQ0Myw2IEBAIHN0YXRpYyBpbnQgYmxraWZfcXVldWVfcmVxdWVzdChzdHJ1Y3Qg cmVxdWVzdCAqcmVxKQogCXN0cnVjdCBzY2F0dGVybGlzdCAqc2c7CiAJaW50IG5zZWcsIG1heF9n cmVmczsKIAotCWlmICh1bmxpa2VseShpbmZvLT5jb25uZWN0ZWQgIT0gQkxLSUZfU1RBVEVfQ09O TkVDVEVEKSkKLQkJcmV0dXJuIDE7Ci0KIAltYXhfZ3JlZnMgPSByZXEtPm5yX3BoeXNfc2VnbWVu dHM7CiAJaWYgKG1heF9ncmVmcyA+IEJMS0lGX01BWF9TRUdNRU5UU19QRVJfUkVRVUVTVCkKIAkJ LyoKQEAgLTQ1MywxMzkgKzQ3MiwxMzEgQEAgc3RhdGljIGludCBibGtpZl9xdWV1ZV9yZXF1ZXN0 KHN0cnVjdCByZXF1ZXN0ICpyZXEpCiAJaWQgPSBnZXRfaWRfZnJvbV9mcmVlbGlzdChpbmZvKTsK IAlpbmZvLT5zaGFkb3dbaWRdLnJlcXVlc3QgPSByZXE7CiAKLQlpZiAodW5saWtlbHkocmVxLT5j bWRfZmxhZ3MgJiAoUkVRX0RJU0NBUkQgfCBSRVFfU0VDVVJFKSkpIHsKLQkJcmluZ19yZXEtPm9w ZXJhdGlvbiA9IEJMS0lGX09QX0RJU0NBUkQ7Ci0JCXJpbmdfcmVxLT51LmRpc2NhcmQubnJfc2Vj dG9ycyA9IGJsa19ycV9zZWN0b3JzKHJlcSk7Ci0JCXJpbmdfcmVxLT51LmRpc2NhcmQuaWQgPSBp ZDsKLQkJcmluZ19yZXEtPnUuZGlzY2FyZC5zZWN0b3JfbnVtYmVyID0gKGJsa2lmX3NlY3Rvcl90 KWJsa19ycV9wb3MocmVxKTsKLQkJaWYgKChyZXEtPmNtZF9mbGFncyAmIFJFUV9TRUNVUkUpICYm IGluZm8tPmZlYXR1cmVfc2VjZGlzY2FyZCkKLQkJCXJpbmdfcmVxLT51LmRpc2NhcmQuZmxhZyA9 IEJMS0lGX0RJU0NBUkRfU0VDVVJFOwotCQllbHNlCi0JCQlyaW5nX3JlcS0+dS5kaXNjYXJkLmZs YWcgPSAwOworCUJVR19PTihpbmZvLT5tYXhfaW5kaXJlY3Rfc2VnbWVudHMgPT0gMCAmJgorCSAg ICAgICByZXEtPm5yX3BoeXNfc2VnbWVudHMgPiBCTEtJRl9NQVhfU0VHTUVOVFNfUEVSX1JFUVVF U1QpOworCUJVR19PTihpbmZvLT5tYXhfaW5kaXJlY3Rfc2VnbWVudHMgJiYKKwkgICAgICAgcmVx LT5ucl9waHlzX3NlZ21lbnRzID4gaW5mby0+bWF4X2luZGlyZWN0X3NlZ21lbnRzKTsKKwluc2Vn ID0gYmxrX3JxX21hcF9zZyhyZXEtPnEsIHJlcSwgaW5mby0+c2hhZG93W2lkXS5zZyk7CisJcmlu Z19yZXEtPnUucncuaWQgPSBpZDsKKwlpZiAobnNlZyA+IEJMS0lGX01BWF9TRUdNRU5UU19QRVJf UkVRVUVTVCkgeworCQkvKgorCQkgKiBUaGUgaW5kaXJlY3Qgb3BlcmF0aW9uIGNhbiBvbmx5IGJl IGEgQkxLSUZfT1BfUkVBRCBvcgorCQkgKiBCTEtJRl9PUF9XUklURQorCQkgKi8KKwkJQlVHX09O KHJlcS0+Y21kX2ZsYWdzICYgKFJFUV9GTFVTSCB8IFJFUV9GVUEpKTsKKwkJcmluZ19yZXEtPm9w ZXJhdGlvbiA9IEJMS0lGX09QX0lORElSRUNUOworCQlyaW5nX3JlcS0+dS5pbmRpcmVjdC5pbmRp cmVjdF9vcCA9IHJxX2RhdGFfZGlyKHJlcSkgPworCQkJQkxLSUZfT1BfV1JJVEUgOiBCTEtJRl9P UF9SRUFEOworCQlyaW5nX3JlcS0+dS5pbmRpcmVjdC5zZWN0b3JfbnVtYmVyID0gKGJsa2lmX3Nl Y3Rvcl90KWJsa19ycV9wb3MocmVxKTsKKwkJcmluZ19yZXEtPnUuaW5kaXJlY3QuaGFuZGxlID0g aW5mby0+aGFuZGxlOworCQlyaW5nX3JlcS0+dS5pbmRpcmVjdC5ucl9zZWdtZW50cyA9IG5zZWc7 CiAJfSBlbHNlIHsKLQkJQlVHX09OKGluZm8tPm1heF9pbmRpcmVjdF9zZWdtZW50cyA9PSAwICYm Ci0JCSAgICAgICByZXEtPm5yX3BoeXNfc2VnbWVudHMgPiBCTEtJRl9NQVhfU0VHTUVOVFNfUEVS X1JFUVVFU1QpOwotCQlCVUdfT04oaW5mby0+bWF4X2luZGlyZWN0X3NlZ21lbnRzICYmCi0JCSAg ICAgICByZXEtPm5yX3BoeXNfc2VnbWVudHMgPiBpbmZvLT5tYXhfaW5kaXJlY3Rfc2VnbWVudHMp OwotCQluc2VnID0gYmxrX3JxX21hcF9zZyhyZXEtPnEsIHJlcSwgaW5mby0+c2hhZG93W2lkXS5z Zyk7Ci0JCXJpbmdfcmVxLT51LnJ3LmlkID0gaWQ7Ci0JCWlmIChuc2VnID4gQkxLSUZfTUFYX1NF R01FTlRTX1BFUl9SRVFVRVNUKSB7CisJCXJpbmdfcmVxLT51LnJ3LnNlY3Rvcl9udW1iZXIgPSAo YmxraWZfc2VjdG9yX3QpYmxrX3JxX3BvcyhyZXEpOworCQlyaW5nX3JlcS0+dS5ydy5oYW5kbGUg PSBpbmZvLT5oYW5kbGU7CisJCXJpbmdfcmVxLT5vcGVyYXRpb24gPSBycV9kYXRhX2RpcihyZXEp ID8KKwkJCUJMS0lGX09QX1dSSVRFIDogQkxLSUZfT1BfUkVBRDsKKwkJaWYgKHJlcS0+Y21kX2Zs YWdzICYgKFJFUV9GTFVTSCB8IFJFUV9GVUEpKSB7CiAJCQkvKgotCQkJICogVGhlIGluZGlyZWN0 IG9wZXJhdGlvbiBjYW4gb25seSBiZSBhIEJMS0lGX09QX1JFQUQgb3IKLQkJCSAqIEJMS0lGX09Q X1dSSVRFCisJCQkgKiBJZGVhbGx5IHdlIGNhbiBkbyBhbiB1bm9yZGVyZWQgZmx1c2gtdG8tZGlz ay4KKwkJCSAqIEluIGNhc2UgdGhlIGJhY2tlbmQgb25seXN1cHBvcnRzIGJhcnJpZXJzLCB1c2Ug dGhhdC4KKwkJCSAqIEEgYmFycmllciByZXF1ZXN0IGEgc3VwZXJzZXQgb2YgRlVBLCBzbyB3ZSBj YW4KKwkJCSAqIGltcGxlbWVudCBpdCB0aGUgc2FtZSB3YXkuICAoSXQncyBhbHNvIGEgRkxVU0gr RlVBLAorCQkJICogc2luY2UgaXQgaXMgZ3VhcmFudGVlZCBvcmRlcmVkIFdSVCBwcmV2aW91cyB3 cml0ZXMuKQogCQkJICovCi0JCQlCVUdfT04ocmVxLT5jbWRfZmxhZ3MgJiAoUkVRX0ZMVVNIIHwg UkVRX0ZVQSkpOwotCQkJcmluZ19yZXEtPm9wZXJhdGlvbiA9IEJMS0lGX09QX0lORElSRUNUOwot CQkJcmluZ19yZXEtPnUuaW5kaXJlY3QuaW5kaXJlY3Rfb3AgPSBycV9kYXRhX2RpcihyZXEpID8K LQkJCQlCTEtJRl9PUF9XUklURSA6IEJMS0lGX09QX1JFQUQ7Ci0JCQlyaW5nX3JlcS0+dS5pbmRp cmVjdC5zZWN0b3JfbnVtYmVyID0gKGJsa2lmX3NlY3Rvcl90KWJsa19ycV9wb3MocmVxKTsKLQkJ CXJpbmdfcmVxLT51LmluZGlyZWN0LmhhbmRsZSA9IGluZm8tPmhhbmRsZTsKLQkJCXJpbmdfcmVx LT51LmluZGlyZWN0Lm5yX3NlZ21lbnRzID0gbnNlZzsKLQkJfSBlbHNlIHsKLQkJCXJpbmdfcmVx LT51LnJ3LnNlY3Rvcl9udW1iZXIgPSAoYmxraWZfc2VjdG9yX3QpYmxrX3JxX3BvcyhyZXEpOwot CQkJcmluZ19yZXEtPnUucncuaGFuZGxlID0gaW5mby0+aGFuZGxlOwotCQkJcmluZ19yZXEtPm9w ZXJhdGlvbiA9IHJxX2RhdGFfZGlyKHJlcSkgPwotCQkJCUJMS0lGX09QX1dSSVRFIDogQkxLSUZf T1BfUkVBRDsKLQkJCWlmIChyZXEtPmNtZF9mbGFncyAmIChSRVFfRkxVU0ggfCBSRVFfRlVBKSkg ewotCQkJCS8qCi0JCQkJICogSWRlYWxseSB3ZSBjYW4gZG8gYW4gdW5vcmRlcmVkIGZsdXNoLXRv LWRpc2suIEluIGNhc2UgdGhlCi0JCQkJICogYmFja2VuZCBvbmx5c3VwcG9ydHMgYmFycmllcnMs IHVzZSB0aGF0LiBBIGJhcnJpZXIgcmVxdWVzdAotCQkJCSAqIGEgc3VwZXJzZXQgb2YgRlVBLCBz byB3ZSBjYW4gaW1wbGVtZW50IGl0IHRoZSBzYW1lCi0JCQkJICogd2F5LiAgKEl0J3MgYWxzbyBh IEZMVVNIK0ZVQSwgc2luY2UgaXQgaXMKLQkJCQkgKiBndWFyYW50ZWVkIG9yZGVyZWQgV1JUIHBy ZXZpb3VzIHdyaXRlcy4pCi0JCQkJICovCi0JCQkJc3dpdGNoIChpbmZvLT5mZWF0dXJlX2ZsdXNo ICYKLQkJCQkJKChSRVFfRkxVU0h8UkVRX0ZVQSkpKSB7Ci0JCQkJY2FzZSBSRVFfRkxVU0h8UkVR X0ZVQToKLQkJCQkJcmluZ19yZXEtPm9wZXJhdGlvbiA9Ci0JCQkJCQlCTEtJRl9PUF9XUklURV9C QVJSSUVSOwotCQkJCQlicmVhazsKLQkJCQljYXNlIFJFUV9GTFVTSDoKLQkJCQkJcmluZ19yZXEt Pm9wZXJhdGlvbiA9Ci0JCQkJCQlCTEtJRl9PUF9GTFVTSF9ESVNLQ0FDSEU7Ci0JCQkJCWJyZWFr OwotCQkJCWRlZmF1bHQ6Ci0JCQkJCXJpbmdfcmVxLT5vcGVyYXRpb24gPSAwOwotCQkJCX0KKwkJ CXN3aXRjaCAoaW5mby0+ZmVhdHVyZV9mbHVzaCAmCisJCQkJKChSRVFfRkxVU0h8UkVRX0ZVQSkp KSB7CisJCQljYXNlIFJFUV9GTFVTSHxSRVFfRlVBOgorCQkJCXJpbmdfcmVxLT5vcGVyYXRpb24g PQorCQkJCQlCTEtJRl9PUF9XUklURV9CQVJSSUVSOworCQkJCWJyZWFrOworCQkJY2FzZSBSRVFf RkxVU0g6CisJCQkJcmluZ19yZXEtPm9wZXJhdGlvbiA9CisJCQkJCUJMS0lGX09QX0ZMVVNIX0RJ U0tDQUNIRTsKKwkJCQlicmVhazsKKwkJCWRlZmF1bHQ6CisJCQkJcmluZ19yZXEtPm9wZXJhdGlv biA9IDA7CiAJCQl9Ci0JCQlyaW5nX3JlcS0+dS5ydy5ucl9zZWdtZW50cyA9IG5zZWc7CiAJCX0K LQkJZm9yX2VhY2hfc2coaW5mby0+c2hhZG93W2lkXS5zZywgc2csIG5zZWcsIGkpIHsKLQkJCWZz ZWN0ID0gc2ctPm9mZnNldCA+PiA5OwotCQkJbHNlY3QgPSBmc2VjdCArIChzZy0+bGVuZ3RoID4+ IDkpIC0gMTsKKwkJcmluZ19yZXEtPnUucncubnJfc2VnbWVudHMgPSBuc2VnOworCX0KKwlmb3Jf ZWFjaF9zZyhpbmZvLT5zaGFkb3dbaWRdLnNnLCBzZywgbnNlZywgaSkgeworCQlmc2VjdCA9IHNn LT5vZmZzZXQgPj4gOTsKKwkJbHNlY3QgPSBmc2VjdCArIChzZy0+bGVuZ3RoID4+IDkpIC0gMTsK IAotCQkJaWYgKChyaW5nX3JlcS0+b3BlcmF0aW9uID09IEJMS0lGX09QX0lORElSRUNUKSAmJgot CQkJICAgIChpICUgU0VHU19QRVJfSU5ESVJFQ1RfRlJBTUUgPT0gMCkpIHsKLQkJCQl1bnNpZ25l ZCBsb25nIHVuaW5pdGlhbGl6ZWRfdmFyKHBmbik7CisJCWlmICgocmluZ19yZXEtPm9wZXJhdGlv biA9PSBCTEtJRl9PUF9JTkRJUkVDVCkgJiYKKwkJICAgIChpICUgU0VHU19QRVJfSU5ESVJFQ1Rf RlJBTUUgPT0gMCkpIHsKKwkJCXVuc2lnbmVkIGxvbmcgdW5pbml0aWFsaXplZF92YXIocGZuKTsK IAotCQkJCWlmIChzZWdtZW50cykKLQkJCQkJa3VubWFwX2F0b21pYyhzZWdtZW50cyk7CisJCQlp ZiAoc2VnbWVudHMpCisJCQkJa3VubWFwX2F0b21pYyhzZWdtZW50cyk7CiAKLQkJCQluID0gaSAv IFNFR1NfUEVSX0lORElSRUNUX0ZSQU1FOwotCQkJCWlmICghaW5mby0+ZmVhdHVyZV9wZXJzaXN0 ZW50KSB7Ci0JCQkJCXN0cnVjdCBwYWdlICppbmRpcmVjdF9wYWdlOwotCi0JCQkJCS8qIEZldGNo IGEgcHJlLWFsbG9jYXRlZCBwYWdlIHRvIHVzZSBmb3IgaW5kaXJlY3QgZ3JlZnMgKi8KLQkJCQkJ QlVHX09OKGxpc3RfZW1wdHkoJmluZm8tPmluZGlyZWN0X3BhZ2VzKSk7Ci0JCQkJCWluZGlyZWN0 X3BhZ2UgPSBsaXN0X2ZpcnN0X2VudHJ5KCZpbmZvLT5pbmRpcmVjdF9wYWdlcywKLQkJCQkJICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgc3RydWN0IHBhZ2UsIGxydSk7Ci0JCQkJCWxp c3RfZGVsKCZpbmRpcmVjdF9wYWdlLT5scnUpOwotCQkJCQlwZm4gPSBwYWdlX3RvX3BmbihpbmRp cmVjdF9wYWdlKTsKLQkJCQl9Ci0JCQkJZ250X2xpc3RfZW50cnkgPSBnZXRfZ3JhbnQoJmdyZWZf aGVhZCwgcGZuLCBpbmZvKTsKLQkJCQlpbmZvLT5zaGFkb3dbaWRdLmluZGlyZWN0X2dyYW50c1tu XSA9IGdudF9saXN0X2VudHJ5OwotCQkJCXNlZ21lbnRzID0ga21hcF9hdG9taWMocGZuX3RvX3Bh Z2UoZ250X2xpc3RfZW50cnktPnBmbikpOwotCQkJCXJpbmdfcmVxLT51LmluZGlyZWN0LmluZGly ZWN0X2dyZWZzW25dID0gZ250X2xpc3RfZW50cnktPmdyZWY7CisJCQluID0gaSAvIFNFR1NfUEVS X0lORElSRUNUX0ZSQU1FOworCQkJaWYgKCFpbmZvLT5mZWF0dXJlX3BlcnNpc3RlbnQpIHsKKwkJ CQlzdHJ1Y3QgcGFnZSAqaW5kaXJlY3RfcGFnZTsKKworCQkJCS8qCisJCQkJICogRmV0Y2ggYSBw cmUtYWxsb2NhdGVkIHBhZ2UgdG8gdXNlIGZvcgorCQkJCSAqIGluZGlyZWN0IGdyZWZzCisJCQkJ ICovCisJCQkJQlVHX09OKGxpc3RfZW1wdHkoJmluZm8tPmluZGlyZWN0X3BhZ2VzKSk7CisJCQkJ aW5kaXJlY3RfcGFnZSA9IGxpc3RfZmlyc3RfZW50cnkoJmluZm8tPmluZGlyZWN0X3BhZ2VzLAor CQkJCQkJCQkgc3RydWN0IHBhZ2UsIGxydSk7CisJCQkJbGlzdF9kZWwoJmluZGlyZWN0X3BhZ2Ut PmxydSk7CisJCQkJcGZuID0gcGFnZV90b19wZm4oaW5kaXJlY3RfcGFnZSk7CiAJCQl9CisJCQln bnRfbGlzdF9lbnRyeSA9IGdldF9ncmFudCgmZ3JlZl9oZWFkLCBwZm4sIGluZm8pOworCQkJaW5m by0+c2hhZG93W2lkXS5pbmRpcmVjdF9ncmFudHNbbl0gPSBnbnRfbGlzdF9lbnRyeTsKKwkJCXNl Z21lbnRzID0ga21hcF9hdG9taWMocGZuX3RvX3BhZ2UoZ250X2xpc3RfZW50cnktPnBmbikpOwor CQkJcmluZ19yZXEtPnUuaW5kaXJlY3QuaW5kaXJlY3RfZ3JlZnNbbl0gPSBnbnRfbGlzdF9lbnRy eS0+Z3JlZjsKKwkJfQogCi0JCQlnbnRfbGlzdF9lbnRyeSA9IGdldF9ncmFudCgmZ3JlZl9oZWFk LCBwYWdlX3RvX3BmbihzZ19wYWdlKHNnKSksIGluZm8pOwotCQkJcmVmID0gZ250X2xpc3RfZW50 cnktPmdyZWY7CisJCWdudF9saXN0X2VudHJ5ID0gZ2V0X2dyYW50KCZncmVmX2hlYWQsIHBhZ2Vf dG9fcGZuKHNnX3BhZ2Uoc2cpKSwgaW5mbyk7CisJCXJlZiA9IGdudF9saXN0X2VudHJ5LT5ncmVm OwogCi0JCQlpbmZvLT5zaGFkb3dbaWRdLmdyYW50c191c2VkW2ldID0gZ250X2xpc3RfZW50cnk7 CisJCWluZm8tPnNoYWRvd1tpZF0uZ3JhbnRzX3VzZWRbaV0gPSBnbnRfbGlzdF9lbnRyeTsKIAot CQkJaWYgKHJxX2RhdGFfZGlyKHJlcSkgJiYgaW5mby0+ZmVhdHVyZV9wZXJzaXN0ZW50KSB7Ci0J CQkJY2hhciAqYnZlY19kYXRhOwotCQkJCXZvaWQgKnNoYXJlZF9kYXRhOworCQlpZiAocnFfZGF0 YV9kaXIocmVxKSAmJiBpbmZvLT5mZWF0dXJlX3BlcnNpc3RlbnQpIHsKKwkJCWNoYXIgKmJ2ZWNf ZGF0YTsKKwkJCXZvaWQgKnNoYXJlZF9kYXRhOwogCi0JCQkJQlVHX09OKHNnLT5vZmZzZXQgKyBz Zy0+bGVuZ3RoID4gUEFHRV9TSVpFKTsKKwkJCUJVR19PTihzZy0+b2Zmc2V0ICsgc2ctPmxlbmd0 aCA+IFBBR0VfU0laRSk7CiAKLQkJCQlzaGFyZWRfZGF0YSA9IGttYXBfYXRvbWljKHBmbl90b19w YWdlKGdudF9saXN0X2VudHJ5LT5wZm4pKTsKLQkJCQlidmVjX2RhdGEgPSBrbWFwX2F0b21pYyhz Z19wYWdlKHNnKSk7CisJCQlzaGFyZWRfZGF0YSA9IGttYXBfYXRvbWljKHBmbl90b19wYWdlKGdu dF9saXN0X2VudHJ5LT5wZm4pKTsKKwkJCWJ2ZWNfZGF0YSA9IGttYXBfYXRvbWljKHNnX3BhZ2Uo c2cpKTsKIAotCQkJCS8qCi0JCQkJICogdGhpcyBkb2VzIG5vdCB3aXBlIGRhdGEgc3RvcmVkIG91 dHNpZGUgdGhlCi0JCQkJICogcmFuZ2Ugc2ctPm9mZnNldC4uc2ctPm9mZnNldCtzZy0+bGVuZ3Ro LgotCQkJCSAqIFRoZXJlZm9yZSwgYmxrYmFjayAqY291bGQqIHNlZSBkYXRhIGZyb20KLQkJCQkg KiBwcmV2aW91cyByZXF1ZXN0cy4gVGhpcyBpcyBPSyBhcyBsb25nIGFzCi0JCQkJICogcGVyc2lz dGVudCBncmFudHMgYXJlIHNoYXJlZCB3aXRoIGp1c3Qgb25lCi0JCQkJICogZG9tYWluLiBJdCBt YXkgbmVlZCByZWZhY3RvcmluZyBpZiB0aGlzCi0JCQkJICogY2hhbmdlcwotCQkJCSAqLwotCQkJ CW1lbWNweShzaGFyZWRfZGF0YSArIHNnLT5vZmZzZXQsCi0JCQkJICAgICAgIGJ2ZWNfZGF0YSAg ICsgc2ctPm9mZnNldCwKLQkJCQkgICAgICAgc2ctPmxlbmd0aCk7CisJCQkvKgorCQkJICogdGhp cyBkb2VzIG5vdCB3aXBlIGRhdGEgc3RvcmVkIG91dHNpZGUgdGhlCisJCQkgKiByYW5nZSBzZy0+ b2Zmc2V0Li5zZy0+b2Zmc2V0K3NnLT5sZW5ndGguCisJCQkgKiBUaGVyZWZvcmUsIGJsa2JhY2sg KmNvdWxkKiBzZWUgZGF0YSBmcm9tCisJCQkgKiBwcmV2aW91cyByZXF1ZXN0cy4gVGhpcyBpcyBP SyBhcyBsb25nIGFzCisJCQkgKiBwZXJzaXN0ZW50IGdyYW50cyBhcmUgc2hhcmVkIHdpdGgganVz dCBvbmUKKwkJCSAqIGRvbWFpbi4gSXQgbWF5IG5lZWQgcmVmYWN0b3JpbmcgaWYgdGhpcworCQkJ ICogY2hhbmdlcworCQkJICovCisJCQltZW1jcHkoc2hhcmVkX2RhdGEgKyBzZy0+b2Zmc2V0LAor CQkJICAgICAgIGJ2ZWNfZGF0YSAgICsgc2ctPm9mZnNldCwKKwkJCSAgICAgICBzZy0+bGVuZ3Ro KTsKIAotCQkJCWt1bm1hcF9hdG9taWMoYnZlY19kYXRhKTsKLQkJCQlrdW5tYXBfYXRvbWljKHNo YXJlZF9kYXRhKTsKLQkJCX0KLQkJCWlmIChyaW5nX3JlcS0+b3BlcmF0aW9uICE9IEJMS0lGX09Q X0lORElSRUNUKSB7Ci0JCQkJcmluZ19yZXEtPnUucncuc2VnW2ldID0KLQkJCQkJCShzdHJ1Y3Qg YmxraWZfcmVxdWVzdF9zZWdtZW50KSB7Ci0JCQkJCQkJLmdyZWYgICAgICAgPSByZWYsCi0JCQkJ CQkJLmZpcnN0X3NlY3QgPSBmc2VjdCwKLQkJCQkJCQkubGFzdF9zZWN0ICA9IGxzZWN0IH07Ci0J CQl9IGVsc2UgewotCQkJCW4gPSBpICUgU0VHU19QRVJfSU5ESVJFQ1RfRlJBTUU7Ci0JCQkJc2Vn bWVudHNbbl0gPQorCQkJa3VubWFwX2F0b21pYyhidmVjX2RhdGEpOworCQkJa3VubWFwX2F0b21p YyhzaGFyZWRfZGF0YSk7CisJCX0KKwkJaWYgKHJpbmdfcmVxLT5vcGVyYXRpb24gIT0gQkxLSUZf T1BfSU5ESVJFQ1QpIHsKKwkJCXJpbmdfcmVxLT51LnJ3LnNlZ1tpXSA9CiAJCQkJCShzdHJ1Y3Qg YmxraWZfcmVxdWVzdF9zZWdtZW50KSB7Ci0JCQkJCQkJLmdyZWYgICAgICAgPSByZWYsCi0JCQkJ CQkJLmZpcnN0X3NlY3QgPSBmc2VjdCwKLQkJCQkJCQkubGFzdF9zZWN0ICA9IGxzZWN0IH07Ci0J CQl9CisJCQkJCQkuZ3JlZiAgICAgICA9IHJlZiwKKwkJCQkJCS5maXJzdF9zZWN0ID0gZnNlY3Qs CisJCQkJCQkubGFzdF9zZWN0ICA9IGxzZWN0IH07CisJCX0gZWxzZSB7CisJCQluID0gaSAlIFNF R1NfUEVSX0lORElSRUNUX0ZSQU1FOworCQkJc2VnbWVudHNbbl0gPQorCQkJCShzdHJ1Y3QgYmxr aWZfcmVxdWVzdF9zZWdtZW50KSB7CisJCQkJCQkuZ3JlZiAgICAgICA9IHJlZiwKKwkJCQkJCS5m aXJzdF9zZWN0ID0gZnNlY3QsCisJCQkJCQkubGFzdF9zZWN0ICA9IGxzZWN0IH07CiAJCX0KLQkJ aWYgKHNlZ21lbnRzKQotCQkJa3VubWFwX2F0b21pYyhzZWdtZW50cyk7CiAJfQorCWlmIChzZWdt ZW50cykKKwkJa3VubWFwX2F0b21pYyhzZWdtZW50cyk7CiAKIAlpbmZvLT5yaW5nLnJlcV9wcm9k X3B2dCsrOwogCkBAIC01OTgsNiArNjA5LDI0IEBAIHN0YXRpYyBpbnQgYmxraWZfcXVldWVfcmVx dWVzdChzdHJ1Y3QgcmVxdWVzdCAqcmVxKQogCXJldHVybiAwOwogfQogCisvKgorICogR2VuZXJh dGUgYSBYZW4gYmxrZnJvbnQgSU8gcmVxdWVzdCBmcm9tIGEgYmxrIGxheWVyIHJlcXVlc3QuICBS ZWFkcworICogYW5kIHdyaXRlcyBhcmUgaGFuZGxlZCBhcyBleHBlY3RlZC4KKyAqCisgKiBAcmVx OiBhIHJlcXVlc3Qgc3RydWN0CisgKi8KK3N0YXRpYyBpbnQgYmxraWZfcXVldWVfcmVxdWVzdChz dHJ1Y3QgcmVxdWVzdCAqcmVxKQoreworCXN0cnVjdCBibGtmcm9udF9pbmZvICppbmZvID0gcmVx LT5ycV9kaXNrLT5wcml2YXRlX2RhdGE7CisKKwlpZiAodW5saWtlbHkoaW5mby0+Y29ubmVjdGVk ICE9IEJMS0lGX1NUQVRFX0NPTk5FQ1RFRCkpCisJCXJldHVybiAxOworCisJaWYgKHVubGlrZWx5 KHJlcS0+Y21kX2ZsYWdzICYgKFJFUV9ESVNDQVJEIHwgUkVRX1NFQ1VSRSkpKQorCQlyZXR1cm4g YmxraWZfcXVldWVfZGlzY2FyZF9yZXEocmVxKTsKKwllbHNlCisJCXJldHVybiBibGtpZl9xdWV1 ZV9yd19yZXEocmVxKTsKK30KIAogc3RhdGljIGlubGluZSB2b2lkIGZsdXNoX3JlcXVlc3RzKHN0 cnVjdCBibGtmcm9udF9pbmZvICppbmZvKQogewotLSAKMi4xLjQKCgpfX19fX19fX19fX19fX19f X19fX19fX19fX19fX19fX19fX19fX19fX19fX19fXwpYZW4tZGV2ZWwgbWFpbGluZyBsaXN0Clhl bi1kZXZlbEBsaXN0cy54ZW4ub3JnCmh0dHA6Ly9saXN0cy54ZW4ub3JnL3hlbi1kZXZlbAo= From mboxrd@z Thu Jan 1 00:00:00 1970 From: julien.grall@citrix.com (Julien Grall) Date: Mon, 7 Sep 2015 16:33:44 +0100 Subject: [PATCH v4 06/20] block/xen-blkfront: Split blkif_queue_request in 2 In-Reply-To: <1441640038-23615-1-git-send-email-julien.grall@citrix.com> References: <1441640038-23615-1-git-send-email-julien.grall@citrix.com> Message-ID: <1441640038-23615-7-git-send-email-julien.grall@citrix.com> To: linux-arm-kernel@lists.infradead.org List-Id: linux-arm-kernel.lists.infradead.org Currently, blkif_queue_request has 2 distinct execution path: - Send a discard request - Send a read/write request The function is also allocating grants to use for generating the request. Although, this is only used for read/write request. Rather than having a function with 2 distinct execution path, separate the function in 2. This will also remove one level of tabulation. Signed-off-by: Julien Grall Reviewed-by: Roger Pau Monn? --- Cc: Konrad Rzeszutek Wilk Cc: Boris Ostrovsky Cc: David Vrabel Roger, if you really want if can drop the else clause in blkif_queue_request, IHMO it's more clear here. Although I've kept your Reviewed-by. Let me know if it's not fine. Changes in v3: - Fix errors reported by checkpatch.pl - Add Roger's Reviewed-by Changes in v2: - Patch added --- drivers/block/xen-blkfront.c | 277 ++++++++++++++++++++++++------------------- 1 file changed, 153 insertions(+), 124 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 432e105..b11f084 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -395,13 +395,35 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, return 0; } -/* - * Generate a Xen blkfront IO request from a blk layer request. Reads - * and writes are handled as expected. - * - * @req: a request struct - */ -static int blkif_queue_request(struct request *req) +static int blkif_queue_discard_req(struct request *req) +{ + struct blkfront_info *info = req->rq_disk->private_data; + struct blkif_request *ring_req; + unsigned long id; + + /* Fill out a communications ring structure. */ + ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); + id = get_id_from_freelist(info); + info->shadow[id].request = req; + + ring_req->operation = BLKIF_OP_DISCARD; + ring_req->u.discard.nr_sectors = blk_rq_sectors(req); + ring_req->u.discard.id = id; + ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req); + if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) + ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; + else + ring_req->u.discard.flag = 0; + + info->ring.req_prod_pvt++; + + /* Keep a private copy so we can reissue requests when recovering. */ + info->shadow[id].req = *ring_req; + + return 0; +} + +static int blkif_queue_rw_req(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; struct blkif_request *ring_req; @@ -421,9 +443,6 @@ static int blkif_queue_request(struct request *req) struct scatterlist *sg; int nseg, max_grefs; - if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) - return 1; - max_grefs = req->nr_phys_segments; if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST) /* @@ -453,139 +472,131 @@ static int blkif_queue_request(struct request *req) id = get_id_from_freelist(info); info->shadow[id].request = req; - if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { - ring_req->operation = BLKIF_OP_DISCARD; - ring_req->u.discard.nr_sectors = blk_rq_sectors(req); - ring_req->u.discard.id = id; - ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req); - if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) - ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; - else - ring_req->u.discard.flag = 0; + BUG_ON(info->max_indirect_segments == 0 && + req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); + BUG_ON(info->max_indirect_segments && + req->nr_phys_segments > info->max_indirect_segments); + nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); + ring_req->u.rw.id = id; + if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { + /* + * The indirect operation can only be a BLKIF_OP_READ or + * BLKIF_OP_WRITE + */ + BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); + ring_req->operation = BLKIF_OP_INDIRECT; + ring_req->u.indirect.indirect_op = rq_data_dir(req) ? + BLKIF_OP_WRITE : BLKIF_OP_READ; + ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); + ring_req->u.indirect.handle = info->handle; + ring_req->u.indirect.nr_segments = nseg; } else { - BUG_ON(info->max_indirect_segments == 0 && - req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); - BUG_ON(info->max_indirect_segments && - req->nr_phys_segments > info->max_indirect_segments); - nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); - ring_req->u.rw.id = id; - if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { + ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); + ring_req->u.rw.handle = info->handle; + ring_req->operation = rq_data_dir(req) ? + BLKIF_OP_WRITE : BLKIF_OP_READ; + if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { /* - * The indirect operation can only be a BLKIF_OP_READ or - * BLKIF_OP_WRITE + * Ideally we can do an unordered flush-to-disk. + * In case the backend onlysupports barriers, use that. + * A barrier request a superset of FUA, so we can + * implement it the same way. (It's also a FLUSH+FUA, + * since it is guaranteed ordered WRT previous writes.) */ - BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); - ring_req->operation = BLKIF_OP_INDIRECT; - ring_req->u.indirect.indirect_op = rq_data_dir(req) ? - BLKIF_OP_WRITE : BLKIF_OP_READ; - ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); - ring_req->u.indirect.handle = info->handle; - ring_req->u.indirect.nr_segments = nseg; - } else { - ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); - ring_req->u.rw.handle = info->handle; - ring_req->operation = rq_data_dir(req) ? - BLKIF_OP_WRITE : BLKIF_OP_READ; - if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { - /* - * Ideally we can do an unordered flush-to-disk. In case the - * backend onlysupports barriers, use that. A barrier request - * a superset of FUA, so we can implement it the same - * way. (It's also a FLUSH+FUA, since it is - * guaranteed ordered WRT previous writes.) - */ - switch (info->feature_flush & - ((REQ_FLUSH|REQ_FUA))) { - case REQ_FLUSH|REQ_FUA: - ring_req->operation = - BLKIF_OP_WRITE_BARRIER; - break; - case REQ_FLUSH: - ring_req->operation = - BLKIF_OP_FLUSH_DISKCACHE; - break; - default: - ring_req->operation = 0; - } + switch (info->feature_flush & + ((REQ_FLUSH|REQ_FUA))) { + case REQ_FLUSH|REQ_FUA: + ring_req->operation = + BLKIF_OP_WRITE_BARRIER; + break; + case REQ_FLUSH: + ring_req->operation = + BLKIF_OP_FLUSH_DISKCACHE; + break; + default: + ring_req->operation = 0; } - ring_req->u.rw.nr_segments = nseg; } - for_each_sg(info->shadow[id].sg, sg, nseg, i) { - fsect = sg->offset >> 9; - lsect = fsect + (sg->length >> 9) - 1; + ring_req->u.rw.nr_segments = nseg; + } + for_each_sg(info->shadow[id].sg, sg, nseg, i) { + fsect = sg->offset >> 9; + lsect = fsect + (sg->length >> 9) - 1; - if ((ring_req->operation == BLKIF_OP_INDIRECT) && - (i % SEGS_PER_INDIRECT_FRAME == 0)) { - unsigned long uninitialized_var(pfn); + if ((ring_req->operation == BLKIF_OP_INDIRECT) && + (i % SEGS_PER_INDIRECT_FRAME == 0)) { + unsigned long uninitialized_var(pfn); - if (segments) - kunmap_atomic(segments); + if (segments) + kunmap_atomic(segments); - n = i / SEGS_PER_INDIRECT_FRAME; - if (!info->feature_persistent) { - struct page *indirect_page; - - /* Fetch a pre-allocated page to use for indirect grefs */ - BUG_ON(list_empty(&info->indirect_pages)); - indirect_page = list_first_entry(&info->indirect_pages, - struct page, lru); - list_del(&indirect_page->lru); - pfn = page_to_pfn(indirect_page); - } - gnt_list_entry = get_grant(&gref_head, pfn, info); - info->shadow[id].indirect_grants[n] = gnt_list_entry; - segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); - ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; + n = i / SEGS_PER_INDIRECT_FRAME; + if (!info->feature_persistent) { + struct page *indirect_page; + + /* + * Fetch a pre-allocated page to use for + * indirect grefs + */ + BUG_ON(list_empty(&info->indirect_pages)); + indirect_page = list_first_entry(&info->indirect_pages, + struct page, lru); + list_del(&indirect_page->lru); + pfn = page_to_pfn(indirect_page); } + gnt_list_entry = get_grant(&gref_head, pfn, info); + info->shadow[id].indirect_grants[n] = gnt_list_entry; + segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); + ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; + } - gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); - ref = gnt_list_entry->gref; + gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); + ref = gnt_list_entry->gref; - info->shadow[id].grants_used[i] = gnt_list_entry; + info->shadow[id].grants_used[i] = gnt_list_entry; - if (rq_data_dir(req) && info->feature_persistent) { - char *bvec_data; - void *shared_data; + if (rq_data_dir(req) && info->feature_persistent) { + char *bvec_data; + void *shared_data; - BUG_ON(sg->offset + sg->length > PAGE_SIZE); + BUG_ON(sg->offset + sg->length > PAGE_SIZE); - shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); - bvec_data = kmap_atomic(sg_page(sg)); + shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); + bvec_data = kmap_atomic(sg_page(sg)); - /* - * this does not wipe data stored outside the - * range sg->offset..sg->offset+sg->length. - * Therefore, blkback *could* see data from - * previous requests. This is OK as long as - * persistent grants are shared with just one - * domain. It may need refactoring if this - * changes - */ - memcpy(shared_data + sg->offset, - bvec_data + sg->offset, - sg->length); + /* + * this does not wipe data stored outside the + * range sg->offset..sg->offset+sg->length. + * Therefore, blkback *could* see data from + * previous requests. This is OK as long as + * persistent grants are shared with just one + * domain. It may need refactoring if this + * changes + */ + memcpy(shared_data + sg->offset, + bvec_data + sg->offset, + sg->length); - kunmap_atomic(bvec_data); - kunmap_atomic(shared_data); - } - if (ring_req->operation != BLKIF_OP_INDIRECT) { - ring_req->u.rw.seg[i] = - (struct blkif_request_segment) { - .gref = ref, - .first_sect = fsect, - .last_sect = lsect }; - } else { - n = i % SEGS_PER_INDIRECT_FRAME; - segments[n] = + kunmap_atomic(bvec_data); + kunmap_atomic(shared_data); + } + if (ring_req->operation != BLKIF_OP_INDIRECT) { + ring_req->u.rw.seg[i] = (struct blkif_request_segment) { - .gref = ref, - .first_sect = fsect, - .last_sect = lsect }; - } + .gref = ref, + .first_sect = fsect, + .last_sect = lsect }; + } else { + n = i % SEGS_PER_INDIRECT_FRAME; + segments[n] = + (struct blkif_request_segment) { + .gref = ref, + .first_sect = fsect, + .last_sect = lsect }; } - if (segments) - kunmap_atomic(segments); } + if (segments) + kunmap_atomic(segments); info->ring.req_prod_pvt++; @@ -598,6 +609,24 @@ static int blkif_queue_request(struct request *req) return 0; } +/* + * Generate a Xen blkfront IO request from a blk layer request. Reads + * and writes are handled as expected. + * + * @req: a request struct + */ +static int blkif_queue_request(struct request *req) +{ + struct blkfront_info *info = req->rq_disk->private_data; + + if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) + return 1; + + if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) + return blkif_queue_discard_req(req); + else + return blkif_queue_rw_req(req); +} static inline void flush_requests(struct blkfront_info *info) { -- 2.1.4