LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH] nvme: trace: add disk name to tracepoints
@ 2018-05-30 13:20 Johannes Thumshirn
  2018-05-30 23:06 ` Sagi Grimberg
  2018-05-31 16:47 ` Christoph Hellwig
  0 siblings, 2 replies; 13+ messages in thread
From: Johannes Thumshirn @ 2018-05-30 13:20 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, Sagi Grimberg
  Cc: Linux Kernel Mailinglist, Linux NVMe Mailinglist,
	Hannes Reinecke, Johannes Thumshirn

Add disk name to tracepoints so we can better destinguish between
individual disks in the trace output.

Signed-off-by: Johannes Thumshirn <jthumshirn@suse.de>
---
 drivers/nvme/host/core.c  |  4 ++--
 drivers/nvme/host/trace.h | 32 ++++++++++++++++++++------------
 2 files changed, 22 insertions(+), 14 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 99b857e5a7a9..eb112ff2bede 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -647,9 +647,9 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 
 	cmd->common.command_id = req->tag;
 	if (ns)
-		trace_nvme_setup_nvm_cmd(req->q->id, cmd);
+		trace_nvme_setup_nvm_cmd(ns->disk->disk_name, req->q->id, cmd);
 	else
-		trace_nvme_setup_admin_cmd(cmd);
+		trace_nvme_setup_admin_cmd(ns->disk->disk_name, cmd);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index ea91fccd1bc0..3d3f91b933db 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -76,9 +76,10 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
 	nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
 
 TRACE_EVENT(nvme_setup_admin_cmd,
-	    TP_PROTO(struct nvme_command *cmd),
-	    TP_ARGS(cmd),
+	    TP_PROTO(char *disk_name, struct nvme_command *cmd),
+	    TP_ARGS(disk_name, cmd),
 	    TP_STRUCT__entry(
+		    __string(name, disk_name)
 		    __field(u8, opcode)
 		    __field(u8, flags)
 		    __field(u16, cid)
@@ -86,6 +87,7 @@ TRACE_EVENT(nvme_setup_admin_cmd,
 		    __array(u8, cdw10, 24)
 	    ),
 	    TP_fast_assign(
+		    __assign_str(name, disk_name);
 		    __entry->opcode = cmd->common.opcode;
 		    __entry->flags = cmd->common.flags;
 		    __entry->cid = cmd->common.command_id;
@@ -93,17 +95,19 @@ TRACE_EVENT(nvme_setup_admin_cmd,
 		    memcpy(__entry->cdw10, cmd->common.cdw10,
 			   sizeof(__entry->cdw10));
 	    ),
-	    TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
-		      __entry->cid, __entry->flags, __entry->metadata,
+	    TP_printk("%s: cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+		      __get_str(name), __entry->cid, __entry->flags,
+		      __entry->metadata,
 		      show_admin_opcode_name(__entry->opcode),
 		      __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
 );
 
 
 TRACE_EVENT(nvme_setup_nvm_cmd,
-	    TP_PROTO(int qid, struct nvme_command *cmd),
-	    TP_ARGS(qid, cmd),
+	    TP_PROTO(char *disk_name, int qid, struct nvme_command *cmd),
+	    TP_ARGS(disk_name, qid, cmd),
 	    TP_STRUCT__entry(
+		    __string(name, disk_name)
 		    __field(int, qid)
 		    __field(u8, opcode)
 		    __field(u8, flags)
@@ -113,6 +117,7 @@ TRACE_EVENT(nvme_setup_nvm_cmd,
 		    __array(u8, cdw10, 24)
 	    ),
 	    TP_fast_assign(
+		    __assign_str(name, disk_name);
 		    __entry->qid = qid;
 		    __entry->opcode = cmd->common.opcode;
 		    __entry->flags = cmd->common.flags;
@@ -122,9 +127,9 @@ TRACE_EVENT(nvme_setup_nvm_cmd,
 		    memcpy(__entry->cdw10, cmd->common.cdw10,
 			   sizeof(__entry->cdw10));
 	    ),
-	    TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
-		      __entry->qid, __entry->nsid, __entry->cid,
-		      __entry->flags, __entry->metadata,
+	    TP_printk("%s: qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+		      __get_str(name), __entry->qid, __entry->nsid,
+		      __entry->cid, __entry->flags, __entry->metadata,
 		      show_opcode_name(__entry->opcode),
 		      __parse_nvme_cmd(__entry->opcode, __entry->cdw10))
 );
@@ -133,6 +138,7 @@ TRACE_EVENT(nvme_complete_rq,
 	    TP_PROTO(struct request *req),
 	    TP_ARGS(req),
 	    TP_STRUCT__entry(
+		    __string(name, req->rq_disk->disk_name)
 		    __field(int, qid)
 		    __field(int, cid)
 		    __field(u64, result)
@@ -141,6 +147,7 @@ TRACE_EVENT(nvme_complete_rq,
 		    __field(u16, status)
 	    ),
 	    TP_fast_assign(
+		    __assign_str(name, req->rq_disk->disk_name);
 		    __entry->qid = req->q->id;
 		    __entry->cid = req->tag;
 		    __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
@@ -148,9 +155,10 @@ TRACE_EVENT(nvme_complete_rq,
 		    __entry->flags = nvme_req(req)->flags;
 		    __entry->status = nvme_req(req)->status;
 	    ),
-	    TP_printk("cmdid=%u, qid=%d, res=%llu, retries=%u, flags=0x%x, status=%u",
-		      __entry->cid, __entry->qid, __entry->result,
-		      __entry->retries, __entry->flags, __entry->status)
+	    TP_printk("%s: cmdid=%u, qid=%d, res=%llu, retries=%u, flags=0x%x, status=%u",
+		      __get_str(name), __entry->cid, __entry->qid,
+		      __entry->result, __entry->retries, __entry->flags,
+		      __entry->status)
 
 );
 
-- 
2.16.3

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-05-30 13:20 [PATCH] nvme: trace: add disk name to tracepoints Johannes Thumshirn
@ 2018-05-30 23:06 ` Sagi Grimberg
  2018-05-31 16:47 ` Christoph Hellwig
  1 sibling, 0 replies; 13+ messages in thread
From: Sagi Grimberg @ 2018-05-30 23:06 UTC (permalink / raw)
  To: Johannes Thumshirn, Christoph Hellwig, Keith Busch
  Cc: Linux Kernel Mailinglist, Linux NVMe Mailinglist, Hannes Reinecke

> Add disk name to tracepoints so we can better destinguish between
> individual disks in the trace output.

Looks useful,

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-05-30 13:20 [PATCH] nvme: trace: add disk name to tracepoints Johannes Thumshirn
  2018-05-30 23:06 ` Sagi Grimberg
@ 2018-05-31 16:47 ` Christoph Hellwig
  1 sibling, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2018-05-31 16:47 UTC (permalink / raw)
  To: Johannes Thumshirn
  Cc: Christoph Hellwig, Keith Busch, Sagi Grimberg,
	Linux Kernel Mailinglist, Linux NVMe Mailinglist,
	Hannes Reinecke

On Wed, May 30, 2018 at 03:20:49PM +0200, Johannes Thumshirn wrote:
> Add disk name to tracepoints so we can better destinguish between
> individual disks in the trace output.

Doesn't apply to the nvme-4.18 branch, please resend.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-06-28 14:34             ` Sagi Grimberg
@ 2018-06-28 15:02               ` Keith Busch
  0 siblings, 0 replies; 13+ messages in thread
From: Keith Busch @ 2018-06-28 15:02 UTC (permalink / raw)
  To: Sagi Grimberg
  Cc: Keith Busch, Christoph Hellwig, Linux Kernel Mailinglist,
	Linux NVMe Mailinglist, Johannes Thumshirn

On Thu, Jun 28, 2018 at 05:34:07PM +0300, Sagi Grimberg wrote:
> 
> > Thinking more on this, not using the hw qid really limits the utility
> > out of using these trace events: We may not be able to match a completion
> > to the submission without it since cmdid alone isn't enough to match up
> > the two events.
> > 
> > Here's an updated proposal and actually tested. I was also able to
> > combine admin and io submissions.
> 
> This looks good!
> 
> One thing that is missing is the controller id when we have multiple
> controllers/subsystems in the host like:
> 
> TP_printk("nvme%d: qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx,
> cmd=(%s %s)",
> 
> But Johannes can add it I guess.

Yep, I'm sort of going off on a tangent from what Johannes was adding.
We definitely need both. I have some time today, I'll send out the a
series with the combined results.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-06-28  0:59           ` Keith Busch
  2018-06-28  7:44             ` Johannes Thumshirn
@ 2018-06-28 14:34             ` Sagi Grimberg
  2018-06-28 15:02               ` Keith Busch
  1 sibling, 1 reply; 13+ messages in thread
From: Sagi Grimberg @ 2018-06-28 14:34 UTC (permalink / raw)
  To: Keith Busch
  Cc: Keith Busch, Christoph Hellwig, Linux Kernel Mailinglist,
	Linux NVMe Mailinglist, Johannes Thumshirn


> Thinking more on this, not using the hw qid really limits the utility
> out of using these trace events: We may not be able to match a completion
> to the submission without it since cmdid alone isn't enough to match up
> the two events.
> 
> Here's an updated proposal and actually tested. I was also able to
> combine admin and io submissions.

This looks good!

One thing that is missing is the controller id when we have multiple
controllers/subsystems in the host like:

TP_printk("nvme%d: qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, 
cmd=(%s %s)",

But Johannes can add it I guess.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-06-28  0:59           ` Keith Busch
@ 2018-06-28  7:44             ` Johannes Thumshirn
  2018-06-28 14:34             ` Sagi Grimberg
  1 sibling, 0 replies; 13+ messages in thread
From: Johannes Thumshirn @ 2018-06-28  7:44 UTC (permalink / raw)
  To: Keith Busch
  Cc: Sagi Grimberg, Keith Busch, Christoph Hellwig,
	Linux NVMe Mailinglist, Linux Kernel Mailinglist

On Wed, Jun 27, 2018 at 06:59:43PM -0600, Keith Busch wrote:
> Thinking more on this, not using the hw qid really limits the utility
> out of using these trace events: We may not be able to match a completion
> to the submission without it since cmdid alone isn't enough to match up
> the two events.
> 
> Here's an updated proposal and actually tested. I was also able to
> combine admin and io submissions.

Awesome, thanks.

Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
-- 
Johannes Thumshirn                                          Storage
jthumshirn@suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-06-27 23:43         ` Keith Busch
@ 2018-06-28  0:59           ` Keith Busch
  2018-06-28  7:44             ` Johannes Thumshirn
  2018-06-28 14:34             ` Sagi Grimberg
  0 siblings, 2 replies; 13+ messages in thread
From: Keith Busch @ 2018-06-28  0:59 UTC (permalink / raw)
  To: Sagi Grimberg
  Cc: Keith Busch, Christoph Hellwig, Linux Kernel Mailinglist,
	Linux NVMe Mailinglist, Johannes Thumshirn

Thinking more on this, not using the hw qid really limits the utility
out of using these trace events: We may not be able to match a completion
to the submission without it since cmdid alone isn't enough to match up
the two events.

Here's an updated proposal and actually tested. I was also able to
combine admin and io submissions.

---
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b429d51..c647883 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -466,6 +466,12 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
+unsigned int blk_mq_request_hctx_idx(struct request *rq)
+{
+	return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu)->queue_num;
+}
+EXPORT_SYMBOL_GPL(blk_mq_request_hctx_idx);
+
 static void __blk_mq_free_request(struct request *rq)
 {
 	struct request_queue *q = rq->q;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 46df030..f46bde2 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -652,10 +654,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 	}
 
 	cmd->common.command_id = req->tag;
-	if (ns)
-		trace_nvme_setup_nvm_cmd(req->q->id, cmd);
-	else
-		trace_nvme_setup_admin_cmd(cmd);
+	trace_nvme_setup_nvm_cmd(req, cmd);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 01390f0..bca3451 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -75,34 +75,9 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
 #define __parse_nvme_cmd(opcode, cdw10) \
 	nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
 
-TRACE_EVENT(nvme_setup_admin_cmd,
-	    TP_PROTO(struct nvme_command *cmd),
-	    TP_ARGS(cmd),
-	    TP_STRUCT__entry(
-		    __field(u8, opcode)
-		    __field(u8, flags)
-		    __field(u16, cid)
-		    __field(u64, metadata)
-		    __array(u8, cdw10, 24)
-	    ),
-	    TP_fast_assign(
-		    __entry->opcode = cmd->common.opcode;
-		    __entry->flags = cmd->common.flags;
-		    __entry->cid = cmd->common.command_id;
-		    __entry->metadata = le64_to_cpu(cmd->common.metadata);
-		    memcpy(__entry->cdw10, cmd->common.cdw10,
-			   sizeof(__entry->cdw10));
-	    ),
-	    TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
-		      __entry->cid, __entry->flags, __entry->metadata,
-		      show_admin_opcode_name(__entry->opcode),
-		      __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
-);
-
-
 TRACE_EVENT(nvme_setup_nvm_cmd,
-	    TP_PROTO(int qid, struct nvme_command *cmd),
-	    TP_ARGS(qid, cmd),
+	    TP_PROTO(struct request *req, struct nvme_command *cmd),
+	    TP_ARGS(req, cmd),
 	    TP_STRUCT__entry(
 		    __field(int, qid)
 		    __field(u8, opcode)
@@ -113,7 +88,7 @@ TRACE_EVENT(nvme_setup_nvm_cmd,
 		    __array(u8, cdw10, 24)
 	    ),
 	    TP_fast_assign(
-		    __entry->qid = qid;
+		    __entry->qid = blk_mq_request_hctx_idx(req) + !!req->rq_disk;
 		    __entry->opcode = cmd->common.opcode;
 		    __entry->flags = cmd->common.flags;
 		    __entry->cid = cmd->common.command_id;
@@ -125,8 +100,12 @@ TRACE_EVENT(nvme_setup_nvm_cmd,
 	    TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
 		      __entry->qid, __entry->nsid, __entry->cid,
 		      __entry->flags, __entry->metadata,
-		      show_opcode_name(__entry->opcode),
-		      __parse_nvme_cmd(__entry->opcode, __entry->cdw10))
+		      __entry->qid ?
+				show_opcode_name(__entry->opcode) :
+				show_admin_opcode_name(__entry->opcode),
+		      __entry->qid ?
+				__parse_nvme_cmd(__entry->opcode, __entry->cdw10) :
+				__parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
 );
 
 TRACE_EVENT(nvme_complete_rq,
@@ -141,7 +120,7 @@ TRACE_EVENT(nvme_complete_rq,
 		    __field(u16, status)
 	    ),
 	    TP_fast_assign(
-		    __entry->qid = req->q->id;
+		    __entry->qid = blk_mq_request_hctx_idx(req) + !!req->rq_disk;
 		    __entry->cid = req->tag;
 		    __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
 		    __entry->retries = nvme_req(req)->retries;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3147eb..af91b2d 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -248,6 +248,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 }
 
 
+unsigned int blk_mq_request_hctx_idx(struct request *rq);
 int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);
 void blk_mq_end_request(struct request *rq, blk_status_t error);
--

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-06-27  8:06       ` Sagi Grimberg
  2018-06-27 10:00         ` Johannes Thumshirn
@ 2018-06-27 23:43         ` Keith Busch
  2018-06-28  0:59           ` Keith Busch
  1 sibling, 1 reply; 13+ messages in thread
From: Keith Busch @ 2018-06-27 23:43 UTC (permalink / raw)
  To: Sagi Grimberg
  Cc: Johannes Thumshirn, Keith Busch, Linux Kernel Mailinglist,
	Christoph Hellwig, Linux NVMe Mailinglist

On Wed, Jun 27, 2018 at 11:06:22AM +0300, Sagi Grimberg wrote:
> 
> > > Not related to your patch, but I did notice that the req->q->id isn't
> > > really useful here since that's not the hardware context identifier.
> > > That's just some ida assigned software identifier. For the admin command
> > > completion trace, it's actually a little confusing to see the qid in the
> > > trace.
> > 
> > It was actually requested by Martin so we can easily see which request
> > got dispatched/completed on which request queue.
> 
> Would be good in the future to display the hwqid, but we'll need to work
> before we can do that.

I'd really like to see nvme qid, and it would allow for nice trace
filters.  We could use a little help from blk-mq to get the hctx's
queue_num from a struct request. I think following should get us that
(haven't tested just yet)

---
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b429d515b568..c6478833464d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -466,6 +466,12 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
+unsigned int blk_mq_request_hctx_idx(struct request *rq)
+{
+	return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu)->queue_num;
+}
+EXPORT_SYMBOL_GPL(blk_mq_request_hctx_idx);
+
 static void __blk_mq_free_request(struct request *rq)
 {
 	struct request_queue *q = rq->q;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 46df030b2c3f..6a30c154aa99 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -653,7 +653,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 
 	cmd->common.command_id = req->tag;
 	if (ns)
-		trace_nvme_setup_nvm_cmd(req->q->id, cmd);
+		trace_nvme_setup_nvm_cmd(req, cmd);
 	else
 		trace_nvme_setup_admin_cmd(cmd);
 	return ret;
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 01390f0e1671..95ebe803424b 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -79,6 +79,7 @@ TRACE_EVENT(nvme_setup_admin_cmd,
 	    TP_PROTO(struct nvme_command *cmd),
 	    TP_ARGS(cmd),
 	    TP_STRUCT__entry(
+		    __field(int, qid)
 		    __field(u8, opcode)
 		    __field(u8, flags)
 		    __field(u16, cid)
@@ -86,6 +87,7 @@ TRACE_EVENT(nvme_setup_admin_cmd,
 		    __array(u8, cdw10, 24)
 	    ),
 	    TP_fast_assign(
+		    __entry->qid = 0;
 		    __entry->opcode = cmd->common.opcode;
 		    __entry->flags = cmd->common.flags;
 		    __entry->cid = cmd->common.command_id;
@@ -93,16 +95,16 @@ TRACE_EVENT(nvme_setup_admin_cmd,
 		    memcpy(__entry->cdw10, cmd->common.cdw10,
 			   sizeof(__entry->cdw10));
 	    ),
-	    TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
-		      __entry->cid, __entry->flags, __entry->metadata,
+	    TP_printk("qid=%d, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+		      __entry->qid, __entry->cid, __entry->flags, __entry->metadata,
 		      show_admin_opcode_name(__entry->opcode),
 		      __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
 );
 
 
 TRACE_EVENT(nvme_setup_nvm_cmd,
-	    TP_PROTO(int qid, struct nvme_command *cmd),
-	    TP_ARGS(qid, cmd),
+	    TP_PROTO(struct request *req, struct nvme_command *cmd),
+	    TP_ARGS(req, cmd),
 	    TP_STRUCT__entry(
 		    __field(int, qid)
 		    __field(u8, opcode)
@@ -113,7 +115,7 @@ TRACE_EVENT(nvme_setup_nvm_cmd,
 		    __array(u8, cdw10, 24)
 	    ),
 	    TP_fast_assign(
-		    __entry->qid = qid;
+		    __entry->qid = blk_mq_request_hctx_idx(req) + !!req->rq_disk;
 		    __entry->opcode = cmd->common.opcode;
 		    __entry->flags = cmd->common.flags;
 		    __entry->cid = cmd->common.command_id;
@@ -141,7 +143,7 @@ TRACE_EVENT(nvme_complete_rq,
 		    __field(u16, status)
 	    ),
 	    TP_fast_assign(
-		    __entry->qid = req->q->id;
+		    __entry->qid = blk_mq_request_hctx_idx(req) + !!req->rq_disk;
 		    __entry->cid = req->tag;
 		    __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
 		    __entry->retries = nvme_req(req)->retries;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3147eb74222..af91b2d31a04 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -248,6 +248,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 }
 
 
+unsigned int blk_mq_request_hctx_idx(struct request *rq);
 int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);
 void blk_mq_end_request(struct request *rq, blk_status_t error);
--

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-06-27  8:06       ` Sagi Grimberg
@ 2018-06-27 10:00         ` Johannes Thumshirn
  2018-06-27 23:43         ` Keith Busch
  1 sibling, 0 replies; 13+ messages in thread
From: Johannes Thumshirn @ 2018-06-27 10:00 UTC (permalink / raw)
  To: Sagi Grimberg
  Cc: Keith Busch, Keith Busch, Linux Kernel Mailinglist,
	Christoph Hellwig, Linux NVMe Mailinglist

On Wed, Jun 27, 2018 at 11:06:22AM +0300, Sagi Grimberg wrote:
> Maybe change the display to request_queue_id=%d?

request_queue_id is pretty long, how about rqid?

-- 
Johannes Thumshirn                                          Storage
jthumshirn@suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-06-27  7:33     ` Johannes Thumshirn
@ 2018-06-27  8:06       ` Sagi Grimberg
  2018-06-27 10:00         ` Johannes Thumshirn
  2018-06-27 23:43         ` Keith Busch
  0 siblings, 2 replies; 13+ messages in thread
From: Sagi Grimberg @ 2018-06-27  8:06 UTC (permalink / raw)
  To: Johannes Thumshirn, Keith Busch
  Cc: Keith Busch, Linux Kernel Mailinglist, Christoph Hellwig,
	Linux NVMe Mailinglist


>> Not related to your patch, but I did notice that the req->q->id isn't
>> really useful here since that's not the hardware context identifier.
>> That's just some ida assigned software identifier. For the admin command
>> completion trace, it's actually a little confusing to see the qid in the
>> trace.
> 
> It was actually requested by Martin so we can easily see which request
> got dispatched/completed on which request queue.

Would be good in the future to display the hwqid, but we'll need to work
before we can do that.

Maybe change the display to request_queue_id=%d?

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-06-26 15:01   ` Keith Busch
@ 2018-06-27  7:33     ` Johannes Thumshirn
  2018-06-27  8:06       ` Sagi Grimberg
  0 siblings, 1 reply; 13+ messages in thread
From: Johannes Thumshirn @ 2018-06-27  7:33 UTC (permalink / raw)
  To: Keith Busch
  Cc: Keith Busch, Linux Kernel Mailinglist, Christoph Hellwig,
	Linux NVMe Mailinglist, Sagi Grimberg

On Tue, Jun 26, 2018 at 09:01:06AM -0600, Keith Busch wrote:
> Not related to your patch, but I did notice that the req->q->id isn't
> really useful here since that's not the hardware context identifier.
> That's just some ida assigned software identifier. For the admin command
> completion trace, it's actually a little confusing to see the qid in the
> trace.

It was actually requested by Martin so we can easily see which request
got dispatched/completed on which request queue.

-- 
Johannes Thumshirn                                          Storage
jthumshirn@suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] nvme: trace: add disk name to tracepoints
  2018-06-26 13:51 ` [PATCH] nvme: trace: add " Johannes Thumshirn
@ 2018-06-26 15:01   ` Keith Busch
  2018-06-27  7:33     ` Johannes Thumshirn
  0 siblings, 1 reply; 13+ messages in thread
From: Keith Busch @ 2018-06-26 15:01 UTC (permalink / raw)
  To: Johannes Thumshirn
  Cc: Christoph Hellwig, Sagi Grimberg, Keith Busch,
	Linux Kernel Mailinglist, Linux NVMe Mailinglist

On Tue, Jun 26, 2018 at 03:51:41PM +0200, Johannes Thumshirn wrote:
> @@ -655,9 +655,9 @@ blk_status_t nvme_setup_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
>  	cmd->common.command_id = req->tag;
>  	nvme_req(req)->ctrl = ctrl;
>  	if (ns)
> -		trace_nvme_setup_nvm_cmd(req->q->id, cmd);
> +		trace_nvme_setup_nvm_cmd(req, cmd, ns->disk->disk_name);

Not related to your patch, but I did notice that the req->q->id isn't
really useful here since that's not the hardware context identifier.
That's just some ida assigned software identifier. For the admin command
completion trace, it's actually a little confusing to see the qid in the
trace.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvme: trace: add disk name to tracepoints
  2018-06-26 13:51 [PATCH v4 0/2] nvme: add controller id and " Johannes Thumshirn
@ 2018-06-26 13:51 ` Johannes Thumshirn
  2018-06-26 15:01   ` Keith Busch
  0 siblings, 1 reply; 13+ messages in thread
From: Johannes Thumshirn @ 2018-06-26 13:51 UTC (permalink / raw)
  To: Christoph Hellwig, Sagi Grimberg, Keith Busch
  Cc: Linux NVMe Mailinglist, Linux Kernel Mailinglist, Johannes Thumshirn

Add disk name to tracepoints so we can better distinguish between
individual disks in the trace output and admin commands which
are represented without a disk name.

Signed-off-by: Johannes Thumshirn <jthumshirn@suse.de>
---
 drivers/nvme/host/core.c  |  4 ++--
 drivers/nvme/host/trace.h | 39 +++++++++++++++++++++++++--------------
 2 files changed, 27 insertions(+), 16 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 62602af9eb77..cc1340948245 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -655,9 +655,9 @@ blk_status_t nvme_setup_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	cmd->common.command_id = req->tag;
 	nvme_req(req)->ctrl = ctrl;
 	if (ns)
-		trace_nvme_setup_nvm_cmd(req->q->id, cmd);
+		trace_nvme_setup_nvm_cmd(req, cmd, ns->disk->disk_name);
 	else
-		trace_nvme_setup_admin_cmd(cmd);
+		trace_nvme_setup_admin_cmd(req, cmd);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 01390f0e1671..371a816cd225 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -76,9 +76,10 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
 	nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
 
 TRACE_EVENT(nvme_setup_admin_cmd,
-	    TP_PROTO(struct nvme_command *cmd),
-	    TP_ARGS(cmd),
+	    TP_PROTO(struct request *req, struct nvme_command *cmd),
+	    TP_ARGS(req, cmd),
 	    TP_STRUCT__entry(
+		    __field(int, ctrl_id)
 		    __field(u8, opcode)
 		    __field(u8, flags)
 		    __field(u16, cid)
@@ -86,6 +87,7 @@ TRACE_EVENT(nvme_setup_admin_cmd,
 		    __array(u8, cdw10, 24)
 	    ),
 	    TP_fast_assign(
+		    __entry->ctrl_id = nvme_req(req)->ctrl->cntlid;
 		    __entry->opcode = cmd->common.opcode;
 		    __entry->flags = cmd->common.flags;
 		    __entry->cid = cmd->common.command_id;
@@ -93,17 +95,21 @@ TRACE_EVENT(nvme_setup_admin_cmd,
 		    memcpy(__entry->cdw10, cmd->common.cdw10,
 			   sizeof(__entry->cdw10));
 	    ),
-	    TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
-		      __entry->cid, __entry->flags, __entry->metadata,
+	    TP_printk("nvme%d: cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+		      __entry->ctrl_id, __entry->cid, __entry->flags,
+		      __entry->metadata,
 		      show_admin_opcode_name(__entry->opcode),
 		      __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
 );
 
 
 TRACE_EVENT(nvme_setup_nvm_cmd,
-	    TP_PROTO(int qid, struct nvme_command *cmd),
-	    TP_ARGS(qid, cmd),
+	    TP_PROTO(struct request *req, struct nvme_command *cmd,
+		     char *disk_name),
+	    TP_ARGS(req, cmd, disk_name),
 	    TP_STRUCT__entry(
+		    __string(name, disk_name)
+		    __field(int, ctrl_id)
 		    __field(int, qid)
 		    __field(u8, opcode)
 		    __field(u8, flags)
@@ -113,7 +119,9 @@ TRACE_EVENT(nvme_setup_nvm_cmd,
 		    __array(u8, cdw10, 24)
 	    ),
 	    TP_fast_assign(
-		    __entry->qid = qid;
+		    __assign_str(name, disk_name);
+		    __entry->ctrl_id = nvme_req(req)->ctrl->cntlid;
+		    __entry->qid = req->q->id;
 		    __entry->opcode = cmd->common.opcode;
 		    __entry->flags = cmd->common.flags;
 		    __entry->cid = cmd->common.command_id;
@@ -122,10 +130,10 @@ TRACE_EVENT(nvme_setup_nvm_cmd,
 		    memcpy(__entry->cdw10, cmd->common.cdw10,
 			   sizeof(__entry->cdw10));
 	    ),
-	    TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
-		      __entry->qid, __entry->nsid, __entry->cid,
-		      __entry->flags, __entry->metadata,
-		      show_opcode_name(__entry->opcode),
+	    TP_printk("nvme%d: disk=%s, qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+		      __entry->ctrl_id, __get_str(name), __entry->qid,
+		      __entry->nsid, __entry->cid, __entry->flags,
+		      __entry->metadata, show_opcode_name(__entry->opcode),
 		      __parse_nvme_cmd(__entry->opcode, __entry->cdw10))
 );
 
@@ -133,6 +141,7 @@ TRACE_EVENT(nvme_complete_rq,
 	    TP_PROTO(struct request *req),
 	    TP_ARGS(req),
 	    TP_STRUCT__entry(
+		    __field(int, ctrl_id)
 		    __field(int, qid)
 		    __field(int, cid)
 		    __field(u64, result)
@@ -141,6 +150,7 @@ TRACE_EVENT(nvme_complete_rq,
 		    __field(u16, status)
 	    ),
 	    TP_fast_assign(
+		    __entry->ctrl_id = nvme_req(req)->ctrl->cntlid;
 		    __entry->qid = req->q->id;
 		    __entry->cid = req->tag;
 		    __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
@@ -148,9 +158,10 @@ TRACE_EVENT(nvme_complete_rq,
 		    __entry->flags = nvme_req(req)->flags;
 		    __entry->status = nvme_req(req)->status;
 	    ),
-	    TP_printk("qid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u",
-		      __entry->qid, __entry->cid, __entry->result,
-		      __entry->retries, __entry->flags, __entry->status)
+	    TP_printk("nvme%d: qid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u",
+		      __entry->ctrl_id, __entry->qid, __entry->cid,
+		      __entry->result,  __entry->retries, __entry->flags,
+		      __entry->status)
 
 );
 
-- 
2.16.4


^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2018-06-28 19:08 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-30 13:20 [PATCH] nvme: trace: add disk name to tracepoints Johannes Thumshirn
2018-05-30 23:06 ` Sagi Grimberg
2018-05-31 16:47 ` Christoph Hellwig
2018-06-26 13:51 [PATCH v4 0/2] nvme: add controller id and " Johannes Thumshirn
2018-06-26 13:51 ` [PATCH] nvme: trace: add " Johannes Thumshirn
2018-06-26 15:01   ` Keith Busch
2018-06-27  7:33     ` Johannes Thumshirn
2018-06-27  8:06       ` Sagi Grimberg
2018-06-27 10:00         ` Johannes Thumshirn
2018-06-27 23:43         ` Keith Busch
2018-06-28  0:59           ` Keith Busch
2018-06-28  7:44             ` Johannes Thumshirn
2018-06-28 14:34             ` Sagi Grimberg
2018-06-28 15:02               ` Keith Busch

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).