LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Olof Johansson <olof@lixom.net>
To: Dan Williams <dan.j.williams@intel.com>, christopher.leech@intel.com
Cc: Jeff Garzik <jeff@garzik.org>,
	neilb@suse.de, linux-raid@vger.kernel.org, akpm@osdl.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH] [v2] dmaengine: clean up and abstract function types (was Re: [PATCH 08/19] dmaengine: enable multiple clients and operations)
Date: Fri, 15 Sep 2006 15:02:36 -0500	[thread overview]
Message-ID: <20060915150236.7a907f1c@localhost.localdomain> (raw)
In-Reply-To: <20060915144423.500b529d@localhost.localdomain>

[Bad day, forgot a quilt refresh.]




Clean up dmaengine a bit. Make the client registration specify which
channel functions ("type") the client will use. Also, make devices
register which functions they will provide.

Also exorcise most of the memcpy-specific references from the generic
dma engine code. There's still some left in the iov stuff.


Signed-off-by: Olof Johansson <olof@lixom.net>

Index: linux-2.6/drivers/dma/dmaengine.c
===================================================================
--- linux-2.6.orig/drivers/dma/dmaengine.c
+++ linux-2.6/drivers/dma/dmaengine.c
@@ -73,14 +73,14 @@ static LIST_HEAD(dma_client_list);
 
 /* --- sysfs implementation --- */
 
-static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
+static ssize_t show_count(struct class_device *cd, char *buf)
 {
 	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
 	unsigned long count = 0;
 	int i;
 
 	for_each_possible_cpu(i)
-		count += per_cpu_ptr(chan->local, i)->memcpy_count;
+		count += per_cpu_ptr(chan->local, i)->count;
 
 	return sprintf(buf, "%lu\n", count);
 }
@@ -105,7 +105,7 @@ static ssize_t show_in_use(struct class_
 }
 
 static struct class_device_attribute dma_class_attrs[] = {
-	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
+	__ATTR(count, S_IRUGO, show_count, NULL),
 	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
 	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
 	__ATTR_NULL
@@ -142,6 +142,10 @@ static struct dma_chan *dma_client_chan_
 
 	/* Find a channel, any DMA engine will do */
 	list_for_each_entry(device, &dma_device_list, global_node) {
+		/* Skip devices that don't provide the right function */
+		if (!device->funcs[client->type])
+			continue;
+
 		list_for_each_entry(chan, &device->channels, device_node) {
 			if (chan->client)
 				continue;
@@ -241,7 +245,8 @@ static void dma_chans_rebalance(void)
  * dma_async_client_register - allocate and register a &dma_client
  * @event_callback: callback for notification of channel addition/removal
  */
-struct dma_client *dma_async_client_register(dma_event_callback event_callback)
+struct dma_client *dma_async_client_register(enum dma_function_type type,
+		dma_event_callback event_callback)
 {
 	struct dma_client *client;
 
@@ -254,6 +259,7 @@ struct dma_client *dma_async_client_regi
 	client->chans_desired = 0;
 	client->chan_count = 0;
 	client->event_callback = event_callback;
+	client->type = type;
 
 	mutex_lock(&dma_list_mutex);
 	list_add_tail(&client->global_node, &dma_client_list);
@@ -402,11 +408,11 @@ subsys_initcall(dma_bus_init);
 EXPORT_SYMBOL(dma_async_client_register);
 EXPORT_SYMBOL(dma_async_client_unregister);
 EXPORT_SYMBOL(dma_async_client_chan_request);
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
-EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
-EXPORT_SYMBOL(dma_async_memcpy_complete);
-EXPORT_SYMBOL(dma_async_memcpy_issue_pending);
+EXPORT_SYMBOL(dma_async_buf_to_buf);
+EXPORT_SYMBOL(dma_async_buf_to_pg);
+EXPORT_SYMBOL(dma_async_pg_to_pg);
+EXPORT_SYMBOL(dma_async_complete);
+EXPORT_SYMBOL(dma_async_issue_pending);
 EXPORT_SYMBOL(dma_async_device_register);
 EXPORT_SYMBOL(dma_async_device_unregister);
 EXPORT_SYMBOL(dma_chan_cleanup);
Index: linux-2.6/drivers/dma/ioatdma.c
===================================================================
--- linux-2.6.orig/drivers/dma/ioatdma.c
+++ linux-2.6/drivers/dma/ioatdma.c
@@ -40,6 +40,7 @@
 #define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
 
+
 /* internal functions */
 static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 static void __devexit ioat_remove(struct pci_dev *pdev);
@@ -681,6 +682,14 @@ out:
 	return err;
 }
 
+struct dma_function ioat_memcpy_functions = {
+	.buf_to_buf = ioat_dma_memcpy_buf_to_buf,
+	.buf_to_pg = ioat_dma_memcpy_buf_to_pg,
+	.pg_to_pg = ioat_dma_memcpy_pg_to_pg,
+	.complete = ioat_dma_is_complete,
+	.issue_pending = ioat_dma_memcpy_issue_pending,
+};
+
 static int __devinit ioat_probe(struct pci_dev *pdev,
                                 const struct pci_device_id *ent)
 {
@@ -756,11 +765,8 @@ static int __devinit ioat_probe(struct p
 
 	device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
 	device->common.device_free_chan_resources = ioat_dma_free_chan_resources;
-	device->common.device_memcpy_buf_to_buf = ioat_dma_memcpy_buf_to_buf;
-	device->common.device_memcpy_buf_to_pg = ioat_dma_memcpy_buf_to_pg;
-	device->common.device_memcpy_pg_to_pg = ioat_dma_memcpy_pg_to_pg;
-	device->common.device_memcpy_complete = ioat_dma_is_complete;
-	device->common.device_memcpy_issue_pending = ioat_dma_memcpy_issue_pending;
+	device->common.funcs[DMAFUNC_MEMCPY] = &ioat_memcpy_functions;
+
 	printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n",
 		device->common.chancnt);
 
Index: linux-2.6/include/linux/dmaengine.h
===================================================================
--- linux-2.6.orig/include/linux/dmaengine.h
+++ linux-2.6/include/linux/dmaengine.h
@@ -67,14 +67,14 @@ enum dma_status {
 /**
  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
  * @refcount: local_t used for open-coded "bigref" counting
- * @memcpy_count: transaction counter
+ * @count: transaction counter
  * @bytes_transferred: byte counter
  */
 
 struct dma_chan_percpu {
 	local_t refcount;
 	/* stats */
-	unsigned long memcpy_count;
+	unsigned long count;
 	unsigned long bytes_transferred;
 };
 
@@ -138,6 +138,15 @@ static inline void dma_chan_put(struct d
 typedef void (*dma_event_callback) (struct dma_client *client,
 		struct dma_chan *chan, enum dma_event event);
 
+/*
+ * dma_function_type - one entry for every possible function type provided
+ */
+enum dma_function_type {
+	DMAFUNC_MEMCPY = 0,
+	DMAFUNC_XOR,
+	DMAFUNC_MAX
+};
+
 /**
  * struct dma_client - info on the entity making use of DMA services
  * @event_callback: func ptr to call when something happens
@@ -152,11 +161,35 @@ struct dma_client {
 	unsigned int		chan_count;
 	unsigned int		chans_desired;
 
+	enum dma_function_type	type;
+
 	spinlock_t		lock;
 	struct list_head	channels;
 	struct list_head	global_node;
 };
 
+/* struct dma_function
+ * @buf_to_pg: buf pointer to struct page
+ * @pg_to_pg: struct page/offset to struct page/offset
+ * @complete: poll the status of a DMA transaction
+ * @issue_pending: push appended descriptors to hardware
+ */
+struct dma_function {
+	dma_cookie_t (*buf_to_buf)(struct dma_chan *chan,
+				void *dest, void *src, size_t len);
+	dma_cookie_t (*buf_to_pg)(struct dma_chan *chan,
+				struct page *page, unsigned int offset,
+				void *kdata, size_t len);
+	dma_cookie_t (*pg_to_pg)(struct dma_chan *chan,
+				struct page *dest_pg, unsigned int dest_off,
+				struct page *src_pg, unsigned int src_off,
+				size_t len);
+	enum dma_status (*complete)(struct dma_chan *chan,
+				dma_cookie_t cookie, dma_cookie_t *last,
+				dma_cookie_t *used);
+	void (*issue_pending)(struct dma_chan *chan);
+};
+
 /**
  * struct dma_device - info on the entity supplying DMA services
  * @chancnt: how many DMA channels are supported
@@ -168,14 +201,8 @@ struct dma_client {
  * @device_alloc_chan_resources: allocate resources and return the
  *	number of allocated descriptors
  * @device_free_chan_resources: release DMA channel's resources
- * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer
- * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page
- * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset
- * @device_memcpy_complete: poll the status of an IOAT DMA transaction
- * @device_memcpy_issue_pending: push appended descriptors to hardware
  */
 struct dma_device {
-
 	unsigned int chancnt;
 	struct list_head channels;
 	struct list_head global_node;
@@ -185,31 +212,24 @@ struct dma_device {
 
 	int dev_id;
 
+	struct dma_function *funcs[DMAFUNC_MAX];
+
 	int (*device_alloc_chan_resources)(struct dma_chan *chan);
 	void (*device_free_chan_resources)(struct dma_chan *chan);
-	dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan,
-			void *dest, void *src, size_t len);
-	dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan,
-			struct page *page, unsigned int offset, void *kdata,
-			size_t len);
-	dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan,
-			struct page *dest_pg, unsigned int dest_off,
-			struct page *src_pg, unsigned int src_off, size_t len);
-	enum dma_status (*device_memcpy_complete)(struct dma_chan *chan,
-			dma_cookie_t cookie, dma_cookie_t *last,
-			dma_cookie_t *used);
-	void (*device_memcpy_issue_pending)(struct dma_chan *chan);
 };
 
+#define CHAN2FUNCS(chan) (chan->device->funcs[chan->client->type])
+
 /* --- public DMA engine API --- */
 
-struct dma_client *dma_async_client_register(dma_event_callback event_callback);
+struct dma_client *dma_async_client_register(enum dma_function_type type,
+		dma_event_callback event_callback);
 void dma_async_client_unregister(struct dma_client *client);
 void dma_async_client_chan_request(struct dma_client *client,
 		unsigned int number);
 
 /**
- * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
+ * dma_async_buf_to_buf - offloaded copy between virtual addresses
  * @chan: DMA channel to offload copy to
  * @dest: destination address (virtual)
  * @src: source address (virtual)
@@ -220,19 +240,19 @@ void dma_async_client_chan_request(struc
  * Both @dest and @src must stay memory resident (kernel memory or locked
  * user space pages).
  */
-static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
-	void *dest, void *src, size_t len)
+static inline dma_cookie_t dma_async_buf_to_buf(struct dma_chan *chan,
+		void *dest, void *src, size_t len)
 {
 	int cpu = get_cpu();
 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
-	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+	per_cpu_ptr(chan->local, cpu)->count++;
 	put_cpu();
 
-	return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len);
+	return CHAN2FUNCS(chan)->buf_to_buf(chan, dest, src, len);
 }
 
 /**
- * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
+ * dma_async_buf_to_pg - offloaded copy from address to page
  * @chan: DMA channel to offload copy to
  * @page: destination page
  * @offset: offset in page to copy to
@@ -244,20 +264,21 @@ static inline dma_cookie_t dma_async_mem
  * Both @page/@offset and @kdata must stay memory resident (kernel memory or
  * locked user space pages)
  */
-static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
-	struct page *page, unsigned int offset, void *kdata, size_t len)
+static inline dma_cookie_t dma_async_buf_to_pg(struct dma_chan *chan,
+		struct page *page, unsigned int offset,
+		void *kdata, size_t len)
 {
 	int cpu = get_cpu();
 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
-	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+	per_cpu_ptr(chan->local, cpu)->count++;
 	put_cpu();
 
-	return chan->device->device_memcpy_buf_to_pg(chan, page, offset,
-	                                             kdata, len);
+	return CHAN2FUNCS(chan)->buf_to_pg(chan, page, offset,
+						kdata, len);
 }
 
 /**
- * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
+ * dma_async_pg_to_pg - offloaded copy from page to page
  * @chan: DMA channel to offload copy to
  * @dest_pg: destination page
  * @dest_off: offset in page to copy to
@@ -270,33 +291,33 @@ static inline dma_cookie_t dma_async_mem
  * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
  * (kernel memory or locked user space pages).
  */
-static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
-	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
-	unsigned int src_off, size_t len)
+static inline dma_cookie_t dma_async_pg_to_pg( struct dma_chan *chan,
+		struct page *dest_pg, unsigned int dest_off,
+		struct page *src_pg, unsigned int src_off, size_t len)
 {
 	int cpu = get_cpu();
 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
-	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+	per_cpu_ptr(chan->local, cpu)->count++;
 	put_cpu();
 
-	return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off,
-	                                            src_pg, src_off, len);
+	return CHAN2FUNCS(chan)->pg_to_pg(chan, dest_pg, dest_off,
+						src_pg, src_off, len);
 }
 
 /**
- * dma_async_memcpy_issue_pending - flush pending copies to HW
+ * dma_async_issue_pending - flush pending copies to HW
  * @chan: target DMA channel
  *
  * This allows drivers to push copies to HW in batches,
  * reducing MMIO writes where possible.
  */
-static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan)
+static inline void dma_async_issue_pending(struct dma_chan *chan)
 {
-	return chan->device->device_memcpy_issue_pending(chan);
+	return CHAN2FUNCS(chan)->issue_pending(chan);
 }
 
 /**
- * dma_async_memcpy_complete - poll for transaction completion
+ * dma_async_complete - poll for transaction completion
  * @chan: DMA channel
  * @cookie: transaction identifier to check status of
  * @last: returns last completed cookie, can be NULL
@@ -306,10 +327,11 @@ static inline void dma_async_memcpy_issu
  * internal state and can be used with dma_async_is_complete() to check
  * the status of multiple cookies without re-checking hardware state.
  */
-static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan,
-	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+static inline enum dma_status dma_async_complete(struct dma_chan *chan,
+		dma_cookie_t cookie, dma_cookie_t *last,
+		dma_cookie_t *used)
 {
-	return chan->device->device_memcpy_complete(chan, cookie, last, used);
+	return CHAN2FUNCS(chan)->complete(chan, cookie, last, used);
 }
 
 /**
@@ -318,7 +340,7 @@ static inline enum dma_status dma_async_
  * @last_complete: last know completed transaction
  * @last_used: last cookie value handed out
  *
- * dma_async_is_complete() is used in dma_async_memcpy_complete()
+ * dma_async_is_complete() is used in dma_async_complete()
  * the test logic is seperated for lightweight testing of multiple cookies
  */
 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
Index: linux-2.6/net/core/dev.c
===================================================================
--- linux-2.6.orig/net/core/dev.c
+++ linux-2.6/net/core/dev.c
@@ -1945,7 +1945,7 @@ out:
 		struct dma_chan *chan;
 		rcu_read_lock();
 		list_for_each_entry_rcu(chan, &net_dma_client->channels, client_node)
-			dma_async_memcpy_issue_pending(chan);
+			dma_async_issue_pending(chan);
 		rcu_read_unlock();
 	}
 #endif
@@ -3467,7 +3467,7 @@ static void netdev_dma_event(struct dma_
 static int __init netdev_dma_register(void)
 {
 	spin_lock_init(&net_dma_event_lock);
-	net_dma_client = dma_async_client_register(netdev_dma_event);
+	net_dma_client = dma_async_client_register(DMAFUNC_MEMCPY, netdev_dma_event);
 	if (net_dma_client == NULL)
 		return -ENOMEM;
 
Index: linux-2.6/drivers/dma/iovlock.c
===================================================================
--- linux-2.6.orig/drivers/dma/iovlock.c
+++ linux-2.6/drivers/dma/iovlock.c
@@ -151,7 +151,7 @@ static dma_cookie_t dma_memcpy_to_kernel
 	while (len > 0) {
 		if (iov->iov_len) {
 			int copy = min_t(unsigned int, iov->iov_len, len);
-			dma_cookie = dma_async_memcpy_buf_to_buf(
+			dma_cookie = dma_async_buf_to_buf(
 					chan,
 					iov->iov_base,
 					kdata,
@@ -210,7 +210,7 @@ dma_cookie_t dma_memcpy_to_iovec(struct 
 			copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
 			copy = min_t(int, copy, iov[iovec_idx].iov_len);
 
-			dma_cookie = dma_async_memcpy_buf_to_pg(chan,
+			dma_cookie = dma_async_buf_to_pg(chan,
 					page_list->pages[page_idx],
 					iov_byte_offset,
 					kdata,
@@ -274,7 +274,7 @@ dma_cookie_t dma_memcpy_pg_to_iovec(stru
 			copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
 			copy = min_t(int, copy, iov[iovec_idx].iov_len);
 
-			dma_cookie = dma_async_memcpy_pg_to_pg(chan,
+			dma_cookie = dma_async_pg_to_pg(chan,
 					page_list->pages[page_idx],
 					iov_byte_offset,
 					page,
Index: linux-2.6/net/ipv4/tcp.c
===================================================================
--- linux-2.6.orig/net/ipv4/tcp.c
+++ linux-2.6/net/ipv4/tcp.c
@@ -1431,11 +1431,11 @@ skip_copy:
 		struct sk_buff *skb;
 		dma_cookie_t done, used;
 
-		dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+		dma_async_issue_pending(tp->ucopy.dma_chan);
 
-		while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
-		                                 tp->ucopy.dma_cookie, &done,
-		                                 &used) == DMA_IN_PROGRESS) {
+		while (dma_async_complete(tp->ucopy.dma_chan,
+		                          tp->ucopy.dma_cookie, &done,
+		                          &used) == DMA_IN_PROGRESS) {
 			/* do partial cleanup of sk_async_wait_queue */
 			while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
 			       (dma_async_is_complete(skb->dma_cookie, done,

  reply	other threads:[~2006-09-15 20:04 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2006-09-11 23:00 [PATCH 00/19] Hardware Accelerated MD RAID5: Introduction Dan Williams
2006-09-11 23:17 ` [PATCH 01/19] raid5: raid5_do_soft_block_ops Dan Williams
2006-09-11 23:34   ` Jeff Garzik
2006-09-11 23:17 ` [PATCH 02/19] raid5: move write operations to a workqueue Dan Williams
2006-09-11 23:36   ` Jeff Garzik
2006-09-11 23:17 ` [PATCH 03/19] raid5: move check parity " Dan Williams
2006-09-11 23:17 ` [PATCH 04/19] raid5: move compute block " Dan Williams
2006-09-11 23:18 ` [PATCH 05/19] raid5: move read completion copies " Dan Williams
2006-09-11 23:18 ` [PATCH 06/19] raid5: move the reconstruct write expansion operation " Dan Williams
2006-09-11 23:18 ` [PATCH 07/19] raid5: remove compute_block and compute_parity5 Dan Williams
2006-09-11 23:18 ` [PATCH 08/19] dmaengine: enable multiple clients and operations Dan Williams
2006-09-11 23:44   ` Jeff Garzik
2006-09-12  0:14     ` Dan Williams
2006-09-12  0:52       ` Roland Dreier
2006-09-12  6:18         ` Dan Williams
2006-09-12  9:15           ` Evgeniy Polyakov
2006-09-13  4:04           ` Jeff Garzik
2006-09-15 16:38     ` Olof Johansson
2006-09-15 19:44       ` [PATCH] dmaengine: clean up and abstract function types (was Re: [PATCH 08/19] dmaengine: enable multiple clients and operations) Olof Johansson
2006-09-15 20:02         ` Olof Johansson [this message]
2006-09-18 22:56         ` Dan Williams
2006-09-19  1:05           ` Olof Johansson
2006-09-19 11:20             ` Alan Cox
2006-09-19 16:32               ` Olof Johansson
2006-09-11 23:18 ` [PATCH 09/19] dmaengine: reduce backend address permutations Dan Williams
2006-09-15 14:46   ` Olof Johansson
2006-09-11 23:18 ` [PATCH 10/19] dmaengine: expose per channel dma mapping characteristics to clients Dan Williams
2006-09-11 23:18 ` [PATCH 11/19] dmaengine: add memset as an asynchronous dma operation Dan Williams
2006-09-11 23:50   ` Jeff Garzik
2006-09-11 23:18 ` [PATCH 12/19] dmaengine: dma_async_memcpy_err for DMA engines that do not support memcpy Dan Williams
2006-09-11 23:51   ` Jeff Garzik
2006-09-11 23:18 ` [PATCH 13/19] dmaengine: add support for dma xor zero sum operations Dan Williams
2006-09-11 23:18 ` [PATCH 14/19] dmaengine: add dma_sync_wait Dan Williams
2006-09-11 23:52   ` Jeff Garzik
2006-09-11 23:18 ` [PATCH 15/19] dmaengine: raid5 dma client Dan Williams
2006-09-11 23:54   ` Jeff Garzik
2006-09-11 23:19 ` [PATCH 16/19] dmaengine: Driver for the Intel IOP 32x, 33x, and 13xx RAID engines Dan Williams
2006-09-15 14:57   ` Olof Johansson
2006-09-11 23:19 ` [PATCH 17/19] iop3xx: define IOP3XX_REG_ADDR[32|16|8] and clean up DMA/AAU defs Dan Williams
2006-09-11 23:55   ` Jeff Garzik
2006-09-11 23:19 ` [PATCH 18/19] iop3xx: Give Linux control over PCI (ATU) initialization Dan Williams
2006-09-11 23:56   ` Jeff Garzik
2006-09-11 23:19 ` [PATCH 19/19] iop3xx: IOP 32x and 33x support for the iop-adma driver Dan Williams
2006-09-11 23:38 ` [PATCH 00/19] Hardware Accelerated MD RAID5: Introduction Jeff Garzik
2006-09-11 23:53   ` Dan Williams
2006-09-12  2:41     ` Jeff Garzik
2006-09-12  5:47       ` Dan Williams
2006-09-13  4:05         ` Jeff Garzik
2006-09-13  7:15 ` Jakob Oestergaard
2006-09-13 19:17   ` Dan Williams
2006-09-14  7:42     ` Jakob Oestergaard
2006-10-11  1:46       ` Dan Williams
2006-10-08 22:18 ` Neil Brown
2006-10-10 18:23   ` Dan Williams
2006-10-11  2:44     ` Neil Brown

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20060915150236.7a907f1c@localhost.localdomain \
    --to=olof@lixom.net \
    --cc=akpm@osdl.org \
    --cc=christopher.leech@intel.com \
    --cc=dan.j.williams@intel.com \
    --cc=jeff@garzik.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-raid@vger.kernel.org \
    --cc=neilb@suse.de \
    --subject='[PATCH] [v2] dmaengine: clean up and abstract function types (was Re: [PATCH 08/19] dmaengine: enable multiple clients and operations)' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).