LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [Patch 0/5] prepare XPC and XPNET to support SGI UV
@ 2008-03-25 19:25 dcn
2008-03-25 19:25 ` [Patch 1/5] add multi-page allocation to the uncached allocator dcn
` (4 more replies)
0 siblings, 5 replies; 11+ messages in thread
From: dcn @ 2008-03-25 19:25 UTC (permalink / raw)
To: jes, tony.luck, linux-kernel, linux-ia64
This set of five patches moves XPC and XPNET to drivers/misc/xp in preparation
for enabling X86_64 support.
^ permalink raw reply [flat|nested] 11+ messages in thread
* [Patch 1/5] add multi-page allocation to the uncached allocator
2008-03-25 19:25 [Patch 0/5] prepare XPC and XPNET to support SGI UV dcn
@ 2008-03-25 19:25 ` dcn
2008-03-25 19:25 ` [Patch 3/5] prepare XPC and XPNET for future support of SGIs UV architecture dcn
` (3 subsequent siblings)
4 siblings, 0 replies; 11+ messages in thread
From: dcn @ 2008-03-25 19:25 UTC (permalink / raw)
To: jes, tony.luck, linux-kernel, linux-ia64
[-- Attachment #1: uncached-pages --]
[-- Type: text/plain, Size: 4725 bytes --]
Enable the uncached allocator to allocate multiple pages of contiguous
uncached memory.
Signed-off-by: Dean Nelson <dcn@sgi.com>
---
arch/ia64/kernel/uncached.c | 21 ++++++++++++---------
drivers/char/mspec.c | 12 ++++++------
include/asm-ia64/uncached.h | 4 ++--
3 files changed, 20 insertions(+), 17 deletions(-)
Index: linux-2.6/arch/ia64/kernel/uncached.c
===================================================================
--- linux-2.6.orig/arch/ia64/kernel/uncached.c 2007-10-04 06:52:20.000000000 -0500
+++ linux-2.6/arch/ia64/kernel/uncached.c 2007-11-08 12:14:31.408258515 -0600
@@ -177,12 +177,13 @@
* uncached_alloc_page
*
* @starting_nid: node id of node to start with, or -1
+ * @n_pages: number of contiguous pages to allocate
*
- * Allocate 1 uncached page. Allocates on the requested node. If no
- * uncached pages are available on the requested node, roundrobin starting
- * with the next higher node.
+ * Allocate the specified number of contiguous uncached pages on the
+ * the requested node. If not enough contiguous uncached pages are available
+ * on the requested node, roundrobin starting with the next higher node.
*/
-unsigned long uncached_alloc_page(int starting_nid)
+unsigned long uncached_alloc_page(int starting_nid, int n_pages)
{
unsigned long uc_addr;
struct uncached_pool *uc_pool;
@@ -202,7 +203,8 @@
if (uc_pool->pool == NULL)
continue;
do {
- uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE);
+ uc_addr = gen_pool_alloc(uc_pool->pool,
+ n_pages * PAGE_SIZE);
if (uc_addr != 0)
return uc_addr;
} while (uncached_add_chunk(uc_pool, nid) == 0);
@@ -217,11 +219,12 @@
/*
* uncached_free_page
*
- * @uc_addr: uncached address of page to free
+ * @uc_addr: uncached address of first page to free
+ * @n_pages: number of contiguous pages to free
*
- * Free a single uncached page.
+ * Free the specified number of uncached pages.
*/
-void uncached_free_page(unsigned long uc_addr)
+void uncached_free_page(unsigned long uc_addr, int n_pages)
{
int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
struct gen_pool *pool = uncached_pools[nid].pool;
@@ -232,7 +235,7 @@
if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
panic("uncached_free_page invalid address %lx\n", uc_addr);
- gen_pool_free(pool, uc_addr, PAGE_SIZE);
+ gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
}
EXPORT_SYMBOL(uncached_free_page);
Index: linux-2.6/drivers/char/mspec.c
===================================================================
--- linux-2.6.orig/drivers/char/mspec.c 2007-10-04 06:52:21.000000000 -0500
+++ linux-2.6/drivers/char/mspec.c 2007-11-08 12:07:17.458415225 -0600
@@ -180,7 +180,7 @@
my_page = vdata->maddr[index];
vdata->maddr[index] = 0;
if (!mspec_zero_block(my_page, PAGE_SIZE))
- uncached_free_page(my_page);
+ uncached_free_page(my_page, 1);
else
printk(KERN_WARNING "mspec_close(): "
"failed to zero page %ld\n", my_page);
@@ -209,7 +209,7 @@
index = (address - vdata->vm_start) >> PAGE_SHIFT;
maddr = (volatile unsigned long) vdata->maddr[index];
if (maddr == 0) {
- maddr = uncached_alloc_page(numa_node_id());
+ maddr = uncached_alloc_page(numa_node_id(), 1);
if (maddr == 0)
return NOPFN_OOM;
@@ -218,7 +218,7 @@
vdata->count++;
vdata->maddr[index] = maddr;
} else {
- uncached_free_page(maddr);
+ uncached_free_page(maddr, 1);
maddr = vdata->maddr[index];
}
spin_unlock(&vdata->lock);
@@ -367,7 +367,7 @@
int nasid;
unsigned long phys;
- scratch_page[nid] = uncached_alloc_page(nid);
+ scratch_page[nid] = uncached_alloc_page(nid, 1);
if (scratch_page[nid] == 0)
goto free_scratch_pages;
phys = __pa(scratch_page[nid]);
@@ -414,7 +414,7 @@
free_scratch_pages:
for_each_node(nid) {
if (scratch_page[nid] != 0)
- uncached_free_page(scratch_page[nid]);
+ uncached_free_page(scratch_page[nid], 1);
}
return ret;
}
@@ -431,7 +431,7 @@
for_each_node(nid) {
if (scratch_page[nid] != 0)
- uncached_free_page(scratch_page[nid]);
+ uncached_free_page(scratch_page[nid], 1);
}
}
}
Index: linux-2.6/include/asm-ia64/uncached.h
===================================================================
--- linux-2.6.orig/include/asm-ia64/uncached.h 2007-10-04 06:52:25.000000000 -0500
+++ linux-2.6/include/asm-ia64/uncached.h 2007-11-08 12:05:00.633469891 -0600
@@ -8,5 +8,5 @@
* Prototypes for the uncached page allocator
*/
-extern unsigned long uncached_alloc_page(int nid);
-extern void uncached_free_page(unsigned long);
+extern unsigned long uncached_alloc_page(int, int);
+extern void uncached_free_page(unsigned long, int);
--
^ permalink raw reply [flat|nested] 11+ messages in thread
* [Patch 3/5] prepare XPC and XPNET for future support of SGIs UV architecture
2008-03-25 19:25 [Patch 0/5] prepare XPC and XPNET to support SGI UV dcn
2008-03-25 19:25 ` [Patch 1/5] add multi-page allocation to the uncached allocator dcn
@ 2008-03-25 19:25 ` dcn
2008-03-25 19:25 ` [Patch 4/5] run drivers/misc/xp through scripts/Lindent dcn
` (2 subsequent siblings)
4 siblings, 0 replies; 11+ messages in thread
From: dcn @ 2008-03-25 19:25 UTC (permalink / raw)
To: jes, tony.luck, linux-kernel, linux-ia64
[-- Attachment #1: generic-xp --]
[-- Type: text/plain, Size: 202877 bytes --]
Prepared XPC and XPNET for future support of SGI's UV architecture. Made
changes so it compiles on x86_64. Added support for up to 256 partitions.
Cleaned up BTE error conversion for is64 sn2.
Signed-off-by: Dean Nelson <dcn@sgi.com>
---
arch/ia64/sn/kernel/setup.c | 4
drivers/misc/Kconfig | 2
drivers/misc/xp/Makefile | 5
drivers/misc/xp/xp.h | 382 ++++++++++----------
drivers/misc/xp/xp_main.c | 181 ++++++---
drivers/misc/xp/xp_nofault.S | 2
drivers/misc/xp/xp_sn2.c | 487 ++++++++++++++++++++++++++
drivers/misc/xp/xp_uv.c | 194 ++++++++++
drivers/misc/xp/xpc.h | 594 +++++++++++++++----------------
drivers/misc/xp/xpc_channel.c | 357 +++++++++----------
drivers/misc/xp/xpc_main.c | 273 +++++++-------
drivers/misc/xp/xpc_partition.c | 670 +++++++++++++++---------------------
drivers/misc/xp/xpnet.c | 280 ++++++---------
include/asm-ia64/sn/arch.h | 5
include/asm-ia64/sn/bte.h | 5
15 files changed, 2010 insertions(+), 1431 deletions(-)
Index: linux-2.6/drivers/misc/xp/xp.h
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xp.h 2008-03-25 13:47:59.962929996 -0500
+++ linux-2.6/drivers/misc/xp/xp.h 2008-03-25 13:51:08.822447754 -0500
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
*/
@@ -12,15 +12,28 @@
*/
-#ifndef _ASM_IA64_SN_XP_H
-#define _ASM_IA64_SN_XP_H
+#ifndef _DRIVERS_MISC_XP_XP_H
+#define _DRIVERS_MISC_XP_XP_H
#include <linux/cache.h>
#include <linux/hardirq.h>
#include <linux/mutex.h>
-#include <asm/sn/types.h>
-#include <asm/sn/bte.h>
+#ifdef CONFIG_IA64
+#include <asm/sn/arch.h>
+#endif
+
+/* >>> Add these two #defines to some linux header file some day. */
+#define BYTES_PER_WORD sizeof(void *)
+#define BITS_PER_WORD (BYTES_PER_WORD * BITS_PER_BYTE)
+
+#if defined(CONFIG_IA64)
+#define U64_ELL "l"
+#elif defined(CONFIG_X86_64)
+#define U64_ELL "ll"
+#else
+#error architecture is NOT supported
+#endif
#ifdef USE_DBUG_ON
@@ -30,6 +43,23 @@
#endif
+#ifndef is_shub1
+#define is_shub1() 0
+#endif
+
+#ifndef is_shub2
+#define is_shub2() 0
+#endif
+
+#ifndef is_shub
+#define is_shub() 0
+#endif
+
+#ifndef is_uv
+#define is_uv() 0
+#endif
+
+
/*
* Define the maximum number of logically defined partitions the system
* can support. It is constrained by the maximum number of hardware
@@ -41,60 +71,15 @@
* maximum number of nodes in the entire system divided by the minimum number
* of nodes that comprise an access protection grouping.
*/
-#define XP_MAX_PARTITIONS 64
-
-
-/*
- * Define the number of u64s required to represent all the C-brick nasids
- * as a bitmap. The cross-partition kernel modules deal only with
- * C-brick nasids, thus the need for bitmaps which don't account for
- * odd-numbered (non C-brick) nasids.
- */
-#define XP_MAX_PHYSNODE_ID (MAX_NUMALINK_NODES / 2)
-#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8)
-#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64)
-
+#define XP_NPARTITIONS 64 /* #of partitions allowed */
+#define XP_MAX_NPARTITIONS 256 /* max #of partitions possible */
-/*
- * Wrapper for bte_copy() that should it return a failure status will retry
- * the bte_copy() once in the hope that the failure was due to a temporary
- * aberration (i.e., the link going down temporarily).
- *
- * src - physical address of the source of the transfer.
- * vdst - virtual address of the destination of the transfer.
- * len - number of bytes to transfer from source to destination.
- * mode - see bte_copy() for definition.
- * notification - see bte_copy() for definition.
- *
- * Note: xp_bte_copy() should never be called while holding a spinlock.
- */
-static inline bte_result_t
-xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
-{
- bte_result_t ret;
- u64 pdst = ia64_tpa(vdst);
-
-
- /*
- * Ensure that the physically mapped memory is contiguous.
- *
- * We do this by ensuring that the memory is from region 7 only.
- * If the need should arise to use memory from one of the other
- * regions, then modify the BUG_ON() statement to ensure that the
- * memory from that region is always physically contiguous.
- */
- BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL);
-
- ret = bte_copy(src, pdst, len, mode, notification);
- if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
- if (!in_interrupt()) {
- cond_resched();
- }
- ret = bte_copy(src, pdst, len, mode, notification);
- }
+#if XP_NPARTITIONS > XP_MAX_NPARTITIONS
+#error XP_NPARTITIONS exceeds MAXIMUM possible.
+#endif
- return ret;
-}
+#define XP_MIN_PARTID 1 /* inclusive */
+#define XP_MAX_PARTID (XP_NPARTITIONS - 1) /* inclusive */
/*
@@ -115,11 +100,11 @@ xp_bte_copy(u64 src, u64 vdst, u64 len,
#define XPC_MEM_CHANNEL 0 /* memory channel number */
#define XPC_NET_CHANNEL 1 /* network channel number */
-#define XPC_NCHANNELS 2 /* #of defined channels */
-#define XPC_MAX_NCHANNELS 8 /* max #of channels allowed */
+#define XPC_NCHANNELS 2 /* #of channels allowed */
+#define XPC_MAX_NCHANNELS 8 /* max #of channels possible */
#if XPC_NCHANNELS > XPC_MAX_NCHANNELS
-#error XPC_NCHANNELS exceeds MAXIMUM allowed.
+#error XPC_NCHANNELS exceeds MAXIMUM possible.
#endif
@@ -169,106 +154,103 @@ struct xpc_msg {
/*
* Define the return values and values passed to user's callout functions.
* (It is important to add new value codes at the end just preceding
- * xpcUnknownReason, which must have the highest numerical value.)
+ * xpUnknownReason, which must have the highest numerical value.)
*/
-enum xpc_retval {
- xpcSuccess = 0,
-
- xpcNotConnected, /* 1: channel is not connected */
- xpcConnected, /* 2: channel connected (opened) */
- xpcRETIRED1, /* 3: (formerly xpcDisconnected) */
-
- xpcMsgReceived, /* 4: message received */
- xpcMsgDelivered, /* 5: message delivered and acknowledged */
-
- xpcRETIRED2, /* 6: (formerly xpcTransferFailed) */
-
- xpcNoWait, /* 7: operation would require wait */
- xpcRetry, /* 8: retry operation */
- xpcTimeout, /* 9: timeout in xpc_allocate_msg_wait() */
- xpcInterrupted, /* 10: interrupted wait */
-
- xpcUnequalMsgSizes, /* 11: message size disparity between sides */
- xpcInvalidAddress, /* 12: invalid address */
+enum xp_retval {
+ xpSuccess = 0,
- xpcNoMemory, /* 13: no memory available for XPC structures */
- xpcLackOfResources, /* 14: insufficient resources for operation */
- xpcUnregistered, /* 15: channel is not registered */
- xpcAlreadyRegistered, /* 16: channel is already registered */
+ xpNotConnected, /* 1: channel is not connected */
+ xpConnected, /* 2: channel connected (opened) */
+ xpRETIRED1, /* 3: (formerly xpDisconnected) */
- xpcPartitionDown, /* 17: remote partition is down */
- xpcNotLoaded, /* 18: XPC module is not loaded */
- xpcUnloading, /* 19: this side is unloading XPC module */
+ xpMsgReceived, /* 4: message received */
+ xpMsgDelivered, /* 5: message delivered and acknowledged */
- xpcBadMagic, /* 20: XPC MAGIC string not found */
+ xpRETIRED2, /* 6: (formerly xpTransferFailed) */
- xpcReactivating, /* 21: remote partition was reactivated */
+ xpNoWait, /* 7: operation would require wait */
+ xpRetry, /* 8: retry operation */
+ xpTimeout, /* 9: timeout in xpc_allocate_msg_wait() */
+ xpInterrupted, /* 10: interrupted wait */
- xpcUnregistering, /* 22: this side is unregistering channel */
- xpcOtherUnregistering, /* 23: other side is unregistering channel */
+ xpUnequalMsgSizes, /* 11: message size disparity between sides */
+ xpInvalidAddress, /* 12: invalid address */
- xpcCloneKThread, /* 24: cloning kernel thread */
- xpcCloneKThreadFailed, /* 25: cloning kernel thread failed */
+ xpNoMemory, /* 13: no memory available for XPC structures */
+ xpLackOfResources, /* 14: insufficient resources for operation */
+ xpUnregistered, /* 15: channel is not registered */
+ xpAlreadyRegistered, /* 16: channel is already registered */
- xpcNoHeartbeat, /* 26: remote partition has no heartbeat */
+ xpPartitionDown, /* 17: remote partition is down */
+ xpNotLoaded, /* 18: XPC module is not loaded */
+ xpUnloading, /* 19: this side is unloading XPC module */
- xpcPioReadError, /* 27: PIO read error */
- xpcPhysAddrRegFailed, /* 28: registration of phys addr range failed */
+ xpBadMagic, /* 20: XPC MAGIC string not found */
- xpcBteDirectoryError, /* 29: maps to BTEFAIL_DIR */
- xpcBtePoisonError, /* 30: maps to BTEFAIL_POISON */
- xpcBteWriteError, /* 31: maps to BTEFAIL_WERR */
- xpcBteAccessError, /* 32: maps to BTEFAIL_ACCESS */
- xpcBtePWriteError, /* 33: maps to BTEFAIL_PWERR */
- xpcBtePReadError, /* 34: maps to BTEFAIL_PRERR */
- xpcBteTimeOutError, /* 35: maps to BTEFAIL_TOUT */
- xpcBteXtalkError, /* 36: maps to BTEFAIL_XTERR */
- xpcBteNotAvailable, /* 37: maps to BTEFAIL_NOTAVAIL */
- xpcBteUnmappedError, /* 38: unmapped BTEFAIL_ error */
+ xpReactivating, /* 21: remote partition was reactivated */
- xpcBadVersion, /* 39: bad version number */
- xpcVarsNotSet, /* 40: the XPC variables are not set up */
- xpcNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */
- xpcInvalidPartid, /* 42: invalid partition ID */
- xpcLocalPartid, /* 43: local partition ID */
+ xpUnregistering, /* 22: this side is unregistering channel */
+ xpOtherUnregistering, /* 23: other side is unregistering channel */
- xpcOtherGoingDown, /* 44: other side going down, reason unknown */
- xpcSystemGoingDown, /* 45: system is going down, reason unknown */
- xpcSystemHalt, /* 46: system is being halted */
- xpcSystemReboot, /* 47: system is being rebooted */
- xpcSystemPoweroff, /* 48: system is being powered off */
+ xpCloneKThread, /* 24: cloning kernel thread */
+ xpCloneKThreadFailed, /* 25: cloning kernel thread failed */
- xpcDisconnecting, /* 49: channel disconnecting (closing) */
+ xpNoHeartbeat, /* 26: remote partition has no heartbeat */
- xpcOpenCloseError, /* 50: channel open/close protocol error */
+ xpPioReadError, /* 27: PIO read error */
+ xpPhysAddrRegFailed, /* 28: registration of phys addr range failed */
- xpcDisconnected, /* 51: channel disconnected (closed) */
+ xpRETIRED3, /* 29: (formerly xpBteDirectoryError) */
+ xpRETIRED4, /* 30: (formerly xpBtePoisonError) */
+ xpRETIRED5, /* 31: (formerly xpBteWriteError) */
+ xpRETIRED6, /* 32: (formerly xpBteAccessError) */
+ xpRETIRED7, /* 33: (formerly xpBtePWriteError) */
+ xpRETIRED8, /* 34: (formerly xpBtePReadError) */
+ xpRETIRED9, /* 35: (formerly xpBteTimeOutError) */
+ xpRETIRED10, /* 36: (formerly xpBteXtalkError) */
+ xpRETIRED11, /* 37: (formerly xpBteNotAvailable) */
+ xpRETIRED12, /* 38: (formerly xpBteUnmappedError) */
- xpcBteSh2Start, /* 52: BTE CRB timeout */
+ xpBadVersion, /* 39: bad version number */
+ xpVarsNotSet, /* 40: the XPC variables are not set up */
+ xpNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */
+ xpInvalidPartid, /* 42: invalid partition ID */
+ xpLocalPartid, /* 43: local partition ID */
- /* 53: 0x1 BTE Error Response Short */
- xpcBteSh2RspShort = xpcBteSh2Start + BTEFAIL_SH2_RESP_SHORT,
+ xpOtherGoingDown, /* 44: other side going down, reason unknown */
+ xpSystemGoingDown, /* 45: system is going down, reason unknown */
+ xpSystemHalt, /* 46: system is being halted */
+ xpSystemReboot, /* 47: system is being rebooted */
+ xpSystemPoweroff, /* 48: system is being powered off */
- /* 54: 0x2 BTE Error Response Long */
- xpcBteSh2RspLong = xpcBteSh2Start + BTEFAIL_SH2_RESP_LONG,
+ xpDisconnecting, /* 49: channel disconnecting (closing) */
- /* 56: 0x4 BTE Error Response DSB */
- xpcBteSh2RspDSB = xpcBteSh2Start + BTEFAIL_SH2_RESP_DSP,
+ xpOpenCloseError, /* 50: channel open/close protocol error */
- /* 60: 0x8 BTE Error Response Access */
- xpcBteSh2RspAccess = xpcBteSh2Start + BTEFAIL_SH2_RESP_ACCESS,
+ xpDisconnected, /* 51: channel disconnected (closed) */
- /* 68: 0x10 BTE Error CRB timeout */
- xpcBteSh2CRBTO = xpcBteSh2Start + BTEFAIL_SH2_CRB_TO,
+ xpBteCopyError, /* 52: bte_copy() returned error */
+ xpSalError, /* 53: sn SAL error */
+ xpNeedMoreInfo, /* 54: more info is needed by SAL */
- /* 84: 0x20 BTE Error NACK limit */
- xpcBteSh2NACKLimit = xpcBteSh2Start + BTEFAIL_SH2_NACK_LIMIT,
+ xpUnsupported, /* 55: unsupported functionality or resource */
+ xpUnknownReason /* 56: unknown reason (must be last in list) */
+};
- /* 115: BTE end */
- xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,
- xpcUnknownReason /* 116: unknown reason -- must be last in list */
-};
+/* the following are valid xp_set_amo() ops */
+#define XP_AMO_OR 1 /* set variable to (variable | operand) */
+#define XP_AMO_AND 2 /* set variable to (variable & operand) */
+
+/* the following are valid xp_get_amo() ops */
+#define XP_AMO_LOAD 1 /* get variable contents */
+#define XP_AMO_CLEAR 2 /* get variable contents and clear variable */
+
+/* the following are valid xp_change_memprotect() ops */
+#define XP_MEMPROT_DISALLOW_ALL 0
+#define XP_MEMPROT_ALLOW_CPU_AMO 1
+#define XP_MEMPROT_ALLOW_CPU_MEM 2
+#define XP_MEMPROT_ALLOW_ALL 3 /* Shub 1.1 only */
/*
@@ -302,83 +284,83 @@ enum xpc_retval {
*
* Reason Code | Cause | Optional Data
* =====================+================================+=====================
- * xpcConnected | connection has been established| max #of entries
+ * xpConnected | connection has been established| max #of entries
* | to the specified partition on | allowed in message
* | the specified channel | queue
* ---------------------+--------------------------------+---------------------
- * xpcMsgReceived | an XPC message arrived from | address of payload
+ * xpMsgReceived | an XPC message arrived from | address of payload
* | the specified partition on the |
* | specified channel | [the user must call
* | | xpc_received() when
* | | finished with the
* | | payload]
* ---------------------+--------------------------------+---------------------
- * xpcMsgDelivered | notification that the message | NA
+ * xpMsgDelivered | notification that the message | NA
* | was delivered to the intended |
* | recipient and that they have |
* | acknowledged its receipt by |
* | calling xpc_received() |
* =====================+================================+=====================
- * xpcUnequalMsgSizes | can't connect to the specified | NULL
+ * xpUnequalMsgSizes | can't connect to the specified | NULL
* | partition on the specified |
* | channel because of mismatched |
* | message sizes |
* ---------------------+--------------------------------+---------------------
- * xpcNoMemory | insufficient memory avaiable | NULL
+ * xpNoMemory | insufficient memory avaiable | NULL
* | to allocate message queue |
* ---------------------+--------------------------------+---------------------
- * xpcLackOfResources | lack of resources to create | NULL
+ * xpLackOfResources | lack of resources to create | NULL
* | the necessary kthreads to |
* | support the channel |
* ---------------------+--------------------------------+---------------------
- * xpcUnregistering | this side's user has | NULL or NA
+ * xpUnregistering | this side's user has | NULL or NA
* | unregistered by calling |
* | xpc_disconnect() |
* ---------------------+--------------------------------+---------------------
- * xpcOtherUnregistering| the other side's user has | NULL or NA
+ * xpOtherUnregistering | the other side's user has | NULL or NA
* | unregistered by calling |
* | xpc_disconnect() |
* ---------------------+--------------------------------+---------------------
- * xpcNoHeartbeat | the other side's XPC is no | NULL or NA
+ * xpNoHeartbeat | the other side's XPC is no | NULL or NA
* | longer heartbeating |
* | |
* ---------------------+--------------------------------+---------------------
- * xpcUnloading | this side's XPC module is | NULL or NA
+ * xpUnloading | this side's XPC module is | NULL or NA
* | being unloaded |
* | |
* ---------------------+--------------------------------+---------------------
- * xpcOtherUnloading | the other side's XPC module is | NULL or NA
+ * xpOtherUnloading | the other side's XPC module is | NULL or NA
* | is being unloaded |
* | |
* ---------------------+--------------------------------+---------------------
- * xpcPioReadError | xp_nofault_PIOR() returned an | NULL or NA
+ * xpPioReadError | xp_nofault_PIOR() returned an | NULL or NA
* | error while sending an IPI |
* | |
* ---------------------+--------------------------------+---------------------
- * xpcInvalidAddress | the address either received or | NULL or NA
+ * xpInvalidAddress | the address either received or | NULL or NA
* | sent by the specified partition|
* | is invalid |
* ---------------------+--------------------------------+---------------------
- * xpcBteNotAvailable | attempt to pull data from the | NULL or NA
- * xpcBtePoisonError | specified partition over the |
- * xpcBteWriteError | specified channel via a |
- * xpcBteAccessError | bte_copy() failed |
- * xpcBteTimeOutError | |
- * xpcBteXtalkError | |
- * xpcBteDirectoryError | |
- * xpcBteGenericError | |
- * xpcBteUnmappedError | |
+ * xpBteNotAvailable | attempt to pull data from the | NULL or NA
+ * xpBtePoisonError | specified partition over the |
+ * xpBteWriteError | specified channel via a |
+ * xpBteAccessError | bte_copy() failed |
+ * xpBteTimeOutError | |
+ * xpBteXtalkError | |
+ * xpBteDirectoryError | |
+ * xpBteGenericError | |
+ * xpBteUnmappedError | |
* ---------------------+--------------------------------+---------------------
- * xpcUnknownReason | the specified channel to the | NULL or NA
+ * xpUnknownReason | the specified channel to the | NULL or NA
* | specified partition was |
* | unavailable for unknown reasons|
* =====================+================================+=====================
*/
-typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid,
+typedef void (*xpc_channel_func)(enum xp_retval reason, short partid,
int ch_number, void *data, void *key);
-typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
+typedef void (*xpc_notify_func)(enum xp_retval reason, short partid,
int ch_number, void *key);
@@ -418,12 +400,12 @@ struct xpc_registration {
struct xpc_interface {
void (*connect)(int);
void (*disconnect)(int);
- enum xpc_retval (*allocate)(partid_t, int, u32, void **);
- enum xpc_retval (*send)(partid_t, int, void *);
- enum xpc_retval (*send_notify)(partid_t, int, void *,
+ enum xp_retval (*allocate)(short, int, u32, void **);
+ enum xp_retval (*send)(short, int, void *);
+ enum xp_retval (*send_notify)(short, int, void *,
xpc_notify_func, void *);
- void (*received)(partid_t, int, void *);
- enum xpc_retval (*partid_to_nasids)(partid_t, void *);
+ void (*received)(short, int, void *);
+ enum xp_retval (*partid_to_nasids)(short, void *);
};
@@ -431,55 +413,91 @@ extern struct xpc_interface xpc_interfac
extern void xpc_set_interface(void (*)(int),
void (*)(int),
- enum xpc_retval (*)(partid_t, int, u32, void **),
- enum xpc_retval (*)(partid_t, int, void *),
- enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func,
+ enum xp_retval (*)(short, int, u32, void **),
+ enum xp_retval (*)(short, int, void *),
+ enum xp_retval (*)(short, int, void *, xpc_notify_func,
void *),
- void (*)(partid_t, int, void *),
- enum xpc_retval (*)(partid_t, void *));
+ void (*)(short, int, void *),
+ enum xp_retval (*)(short, void *));
extern void xpc_clear_interface(void);
-extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
+extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16,
u16, u32, u32);
extern void xpc_disconnect(int);
-static inline enum xpc_retval
-xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
+static inline enum xp_retval
+xpc_allocate(short partid, int ch_number, u32 flags, void **payload)
{
return xpc_interface.allocate(partid, ch_number, flags, payload);
}
-static inline enum xpc_retval
-xpc_send(partid_t partid, int ch_number, void *payload)
+static inline enum xp_retval
+xpc_send(short partid, int ch_number, void *payload)
{
return xpc_interface.send(partid, ch_number, payload);
}
-static inline enum xpc_retval
-xpc_send_notify(partid_t partid, int ch_number, void *payload,
+static inline enum xp_retval
+xpc_send_notify(short partid, int ch_number, void *payload,
xpc_notify_func func, void *key)
{
return xpc_interface.send_notify(partid, ch_number, payload, func, key);
}
static inline void
-xpc_received(partid_t partid, int ch_number, void *payload)
+xpc_received(short partid, int ch_number, void *payload)
{
return xpc_interface.received(partid, ch_number, payload);
}
-static inline enum xpc_retval
-xpc_partid_to_nasids(partid_t partid, void *nasids)
+static inline enum xp_retval
+xpc_partid_to_nasids(short partid, void *nasids)
{
return xpc_interface.partid_to_nasids(partid, nasids);
}
+extern short xp_partition_id;
+extern u8 xp_region_size;
+extern unsigned long xp_rtc_cycles_per_second;
+extern enum xp_retval (*xp_remote_memcpy)(void *, const void *, size_t);
+extern enum xp_retval (*xp_register_remote_amos)(u64, size_t);
+extern enum xp_retval (*xp_unregister_remote_amos)(u64, size_t);
+extern int xp_sizeof_nasid_mask;
+extern int xp_sizeof_amo;
+extern u64 *(*xp_alloc_amos)(int);
+extern void (*xp_free_amos)(u64 *, int);
+extern enum xp_retval (*xp_set_amo)(u64 *, int, u64, int);
+extern enum xp_retval (*xp_set_amo_with_interrupt)(u64 *, int, u64, int, int,
+ int, int);
+extern enum xp_retval (*xp_get_amo)(u64 *, int, u64 *);
+extern enum xp_retval (*xp_get_partition_rsvd_page_pa)(u64, u64 *, u64 *,
+ size_t *);
+extern enum xp_retval (*xp_change_memprotect)(u64, size_t, int, u64 *);
+extern void (*xp_change_memprotect_shub_wars_1_1)(int);
+extern void (*xp_allow_IPI_ops)(void);
+extern void (*xp_disallow_IPI_ops)(void);
+
+extern int (*xp_cpu_to_nasid)(int);
+extern int (*xp_node_to_nasid)(int);
extern u64 xp_nofault_PIOR_target;
extern int xp_nofault_PIOR(void *);
extern int xp_error_PIOR(void);
-#endif /* _ASM_IA64_SN_XP_H */
+static inline int
+xp_partid_mask_words(int npartitions)
+{
+ return DIV_ROUND_UP(npartitions, BITS_PER_WORD);
+}
+
+static inline int
+xp_nasid_mask_words(void)
+{
+ return DIV_ROUND_UP(xp_sizeof_nasid_mask, BYTES_PER_WORD);
+}
+
+
+#endif /* _DRIVERS_MISC_XP_XP_H */
Index: linux-2.6/drivers/misc/xp/xp_main.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xp_main.c 2008-03-25 13:47:59.962929996 -0500
+++ linux-2.6/drivers/misc/xp/xp_main.c 2008-03-25 13:51:08.838449746 -0500
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -17,41 +17,77 @@
#include <linux/kernel.h>
-#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/mutex.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/xp.h>
+#include <linux/device.h>
+#include "xp.h"
+/* Define the XP debug device structures to be used with dev_dbg() et al */
+
+struct device_driver xp_dbg_name = {
+ .name = "xp"
+};
+
+struct device xp_dbg_subname = {
+ .bus_id = {0}, /* set to "" */
+ .driver = &xp_dbg_name
+};
+
+struct device *xp = &xp_dbg_subname;
/*
* Target of nofault PIO read.
*/
u64 xp_nofault_PIOR_target;
+short xp_partition_id;
+u8 xp_region_size;
+unsigned long xp_rtc_cycles_per_second;
-/*
- * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
- * users of XPC.
- */
-struct xpc_registration xpc_registrations[XPC_NCHANNELS];
+enum xp_retval (*xp_remote_memcpy)(void *dst, const void *src, size_t len);
+
+enum xp_retval (*xp_register_remote_amos)(u64 paddr, size_t len);
+enum xp_retval (*xp_unregister_remote_amos)(u64 paddr, size_t len);
+
+int xp_sizeof_nasid_mask;
+int xp_sizeof_amo;
+
+u64 *(*xp_alloc_amos)(int n_amos);
+void (*xp_free_amos)(u64 *amos_page, int n_amos);
+
+enum xp_retval (*xp_set_amo)(u64 *amo_va, int op, u64 operand, int remote);
+enum xp_retval (*xp_set_amo_with_interrupt)(u64 *amo_va, int op, u64 operand,
+ int remote, int nasid,
+ int phys_cpuid, int vector);
+
+enum xp_retval (*xp_get_amo)(u64 *amo_va, int op, u64 *amo_value_addr);
+
+enum xp_retval (*xp_get_partition_rsvd_page_pa)(u64 buf, u64 *cookie,
+ u64 *paddr, size_t *len);
+
+enum xp_retval (*xp_change_memprotect)(u64 paddr, size_t len, int request,
+ u64 *nasid_array);
+void (*xp_change_memprotect_shub_wars_1_1)(int request);
+void (*xp_allow_IPI_ops)(void);
+void (*xp_disallow_IPI_ops)(void);
+
+int (*xp_cpu_to_nasid)(int cpuid);
+int (*xp_node_to_nasid)(int nid);
/*
* Initialize the XPC interface to indicate that XPC isn't loaded.
*/
-static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
+static enum xp_retval xpc_notloaded(void) { return xpNotLoaded; }
struct xpc_interface xpc_interface = {
(void (*)(int)) xpc_notloaded,
(void (*)(int)) xpc_notloaded,
- (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded,
- (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded,
- (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *))
+ (enum xp_retval (*)(short, int, u32, void **)) xpc_notloaded,
+ (enum xp_retval (*)(short, int, void *)) xpc_notloaded,
+ (enum xp_retval (*)(short, int, void *, xpc_notify_func, void *))
xpc_notloaded,
- (void (*)(partid_t, int, void *)) xpc_notloaded,
- (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded
+ (void (*)(short, int, void *)) xpc_notloaded,
+ (enum xp_retval (*)(short, void *)) xpc_notloaded
};
@@ -61,12 +97,12 @@ struct xpc_interface xpc_interface = {
void
xpc_set_interface(void (*connect)(int),
void (*disconnect)(int),
- enum xpc_retval (*allocate)(partid_t, int, u32, void **),
- enum xpc_retval (*send)(partid_t, int, void *),
- enum xpc_retval (*send_notify)(partid_t, int, void *,
+ enum xp_retval (*allocate)(short, int, u32, void **),
+ enum xp_retval (*send)(short, int, void *),
+ enum xp_retval (*send_notify)(short, int, void *,
xpc_notify_func, void *),
- void (*received)(partid_t, int, void *),
- enum xpc_retval (*partid_to_nasids)(partid_t, void *))
+ void (*received)(short, int, void *),
+ enum xp_retval (*partid_to_nasids)(short, void *))
{
xpc_interface.connect = connect;
xpc_interface.disconnect = disconnect;
@@ -86,20 +122,27 @@ xpc_clear_interface(void)
{
xpc_interface.connect = (void (*)(int)) xpc_notloaded;
xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
- xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32,
+ xpc_interface.allocate = (enum xp_retval (*)(short, int, u32,
void **)) xpc_notloaded;
- xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *))
+ xpc_interface.send = (enum xp_retval (*)(short, int, void *))
xpc_notloaded;
- xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *,
+ xpc_interface.send_notify = (enum xp_retval (*)(short, int, void *,
xpc_notify_func, void *)) xpc_notloaded;
- xpc_interface.received = (void (*)(partid_t, int, void *))
+ xpc_interface.received = (void (*)(short, int, void *))
xpc_notloaded;
- xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *))
+ xpc_interface.partid_to_nasids = (enum xp_retval (*)(short, void *))
xpc_notloaded;
}
/*
+ * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
+ * users of XPC.
+ */
+struct xpc_registration xpc_registrations[XPC_NCHANNELS];
+
+
+/*
* Register for automatic establishment of a channel connection whenever
* a partition comes up.
*
@@ -117,13 +160,13 @@ xpc_clear_interface(void)
* nentries - max #of XPC message entries a message queue can contain.
* The actual number, which is determined when a connection
* is established and may be less then requested, will be
- * passed to the user via the xpcConnected callout.
+ * passed to the user via the xpConnected callout.
* assigned_limit - max number of kthreads allowed to be processing
* messages (per connection) at any given instant.
* idle_limit - max number of kthreads allowed to be idle at any given
* instant.
*/
-enum xpc_retval
+enum xp_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
u16 nentries, u32 assigned_limit, u32 idle_limit)
{
@@ -138,13 +181,13 @@ xpc_connect(int ch_number, xpc_channel_f
registration = &xpc_registrations[ch_number];
if (mutex_lock_interruptible(®istration->mutex) != 0) {
- return xpcInterrupted;
+ return xpInterrupted;
}
/* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) {
mutex_unlock(®istration->mutex);
- return xpcAlreadyRegistered;
+ return xpAlreadyRegistered;
}
/* register the channel for connection */
@@ -159,7 +202,7 @@ xpc_connect(int ch_number, xpc_channel_f
xpc_interface.connect(ch_number);
- return xpcSuccess;
+ return xpSuccess;
}
@@ -214,43 +257,26 @@ xpc_disconnect(int ch_number)
return;
}
+extern enum xp_retval xp_init_sn2(void);
+extern enum xp_retval xp_init_uv(void);
int __init
xp_init(void)
{
- int ret, ch_number;
- u64 func_addr = *(u64 *) xp_nofault_PIOR;
- u64 err_func_addr = *(u64 *) xp_error_PIOR;
+ enum xp_retval ret;
+ int ch_number;
+ if (is_shub())
+ ret = xp_init_sn2();
+ else if (is_uv())
+ ret = xp_init_uv();
+ else
+ ret = xpUnsupported;
- if (!ia64_platform_is("sn2")) {
+ if (ret != xpSuccess) {
return -ENODEV;
}
- /*
- * Register a nofault code region which performs a cross-partition
- * PIO read. If the PIO read times out, the MCA handler will consume
- * the error and return to a kernel-provided instruction to indicate
- * an error. This PIO read exists because it is guaranteed to timeout
- * if the destination is down (AMO operations do not timeout on at
- * least some CPUs on Shubs <= v1.2, which unfortunately we have to
- * work around).
- */
- if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
- err_func_addr, 1, 1)) != 0) {
- printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
- ret);
- }
- /*
- * Setup the nofault PIO read target. (There is no special reason why
- * SH_IPI_ACCESS was selected.)
- */
- if (is_shub2()) {
- xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
- } else {
- xp_nofault_PIOR_target = SH1_IPI_ACCESS;
- }
-
/* initialize the connection registration mutex */
for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
mutex_init(&xpc_registrations[ch_number].mutex);
@@ -260,17 +286,16 @@ xp_init(void)
}
module_init(xp_init);
+extern void xp_exit_sn2(void);
+extern void xp_exit_uv(void);
void __exit
xp_exit(void)
{
- u64 func_addr = *(u64 *) xp_nofault_PIOR;
- u64 err_func_addr = *(u64 *) xp_error_PIOR;
-
-
- /* unregister the PIO read nofault code region */
- (void) sn_register_nofault_code(func_addr, err_func_addr,
- err_func_addr, 1, 0);
+ if (is_shub())
+ xp_exit_sn2();
+ else if (is_uv())
+ xp_exit_uv();
}
module_exit(xp_exit);
@@ -279,8 +304,26 @@ MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition (XP) base");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(xp_nofault_PIOR);
-EXPORT_SYMBOL(xp_nofault_PIOR_target);
+EXPORT_SYMBOL(xp_partition_id);
+EXPORT_SYMBOL(xp_region_size);
+EXPORT_SYMBOL(xp_rtc_cycles_per_second);
+EXPORT_SYMBOL(xp_remote_memcpy);
+EXPORT_SYMBOL(xp_register_remote_amos);
+EXPORT_SYMBOL(xp_unregister_remote_amos);
+EXPORT_SYMBOL(xp_sizeof_nasid_mask);
+EXPORT_SYMBOL(xp_sizeof_amo);
+EXPORT_SYMBOL(xp_alloc_amos);
+EXPORT_SYMBOL(xp_free_amos);
+EXPORT_SYMBOL(xp_set_amo);
+EXPORT_SYMBOL(xp_set_amo_with_interrupt);
+EXPORT_SYMBOL(xp_get_amo);
+EXPORT_SYMBOL(xp_get_partition_rsvd_page_pa);
+EXPORT_SYMBOL(xp_change_memprotect);
+EXPORT_SYMBOL(xp_change_memprotect_shub_wars_1_1);
+EXPORT_SYMBOL(xp_allow_IPI_ops);
+EXPORT_SYMBOL(xp_disallow_IPI_ops);
+EXPORT_SYMBOL(xp_cpu_to_nasid);
+EXPORT_SYMBOL(xp_node_to_nasid);
EXPORT_SYMBOL(xpc_registrations);
EXPORT_SYMBOL(xpc_interface);
EXPORT_SYMBOL(xpc_clear_interface);
Index: linux-2.6/drivers/misc/xp/xpc.h
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc.h 2008-03-25 13:47:59.962929996 -0500
+++ linux-2.6/drivers/misc/xp/xpc.h 2008-03-25 13:51:08.858452237 -0500
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -11,8 +11,8 @@
* Cross Partition Communication (XPC) structures and macros.
*/
-#ifndef _ASM_IA64_SN_XPC_H
-#define _ASM_IA64_SN_XPC_H
+#ifndef _DRIVERS_MISC_XP_XPC_H
+#define _DRIVERS_MISC_XP_XPC_H
#include <linux/interrupt.h>
@@ -22,12 +22,15 @@
#include <linux/completion.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/mspec.h>
-#include <asm/sn/shub_mmr.h>
-#include <asm/sn/xp.h>
+#if defined(CONFIG_IA64)
+#include <asm/sn/intr.h>
+#elif defined(CONFIG_X86_64)
+#define SGI_XPC_ACTIVATE 0x30
+#define SGI_XPC_NOTIFY 0xe7
+#else
+#error architecture is NOT supported
+#endif
+#include "xp.h"
/*
@@ -43,7 +46,7 @@
/*
* The next macros define word or bit representations for given
* C-brick nasid in either the SAL provided bit array representing
- * nasids in the partition/machine or the AMO_t array used for
+ * nasids in the partition/machine or the array of AMO variables used for
* inter-partition initiation communications.
*
* For SN2 machines, C-Bricks are alway even numbered NASIDs. As
@@ -51,11 +54,7 @@
* passed from SAL always be packed for C-Bricks and the
* cross-partition interrupts use the same packing scheme.
*/
-#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2)
-#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1))
-#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
- (1UL << XPC_NASID_B_INDEX(_n)))
-#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
+#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[BIT_WORD((_n)/2)] & BIT_MASK((_n)/2))
#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */
@@ -80,8 +79,8 @@
* The first cacheline of the reserved page contains the header
* (struct xpc_rsvd_page). Before SAL initialization has completed,
* SAL has set up the following fields of the reserved page header:
- * SAL_signature, SAL_version, partid, and nasids_size. The other
- * fields are set up by XPC. (xpc_rsvd_page points to the local
+ * SAL_signature, SAL_version, SAL_partid, and SAL_nasids_size. The
+ * other fields are set up by XPC. (xpc_rsvd_page points to the local
* partition's reserved page.)
*
* part_nasids mask
@@ -112,16 +111,16 @@
struct xpc_rsvd_page {
u64 SAL_signature; /* SAL: unique signature */
u64 SAL_version; /* SAL: version */
- u8 partid; /* SAL: partition ID */
+ u8 SAL_partid; /* SAL: partition ID */
u8 version;
- u8 pad1[6]; /* align to next u64 in cacheline */
- volatile u64 vars_pa;
+ u8 pad[6];
+ volatile u64 vars_pa; /* physical address of struct xpc_vars */
struct timespec stamp; /* time when reserved page was setup by XPC */
u64 pad2[9]; /* align to last u64 in cacheline */
- u64 nasids_size; /* SAL: size of each nasid mask in bytes */
+ u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */
};
-#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */
+#define XPC_RP_VERSION _XPC_VERSION(2,0) /* version 2.0 of the reserved page */
#define XPC_SUPPORTS_RP_STAMP(_version) \
(_version >= _XPC_VERSION(1,1))
@@ -162,65 +161,109 @@ xpc_compare_stamps(struct timespec *stam
*/
struct xpc_vars {
u8 version;
- u64 heartbeat;
- u64 heartbeating_to_mask;
- u64 heartbeat_offline; /* if 0, heartbeat should be changing */
+ short partid;
+ short npartitions; /* value of XPC_NPARTITIONS */
int act_nasid;
int act_phys_cpuid;
u64 vars_part_pa;
- u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */
- AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
+ u64 amos_page_pa; /* paddr of first page of AMOs variables */
+ u64 *amos_page; /* vaddr of first page of AMOs variables */
+ u64 heartbeat;
+ u64 heartbeat_offline; /* if 0, heartbeat should be changing */
+ u64 heartbeating_to_mask[BITS_TO_LONGS(XP_MAX_NPARTITIONS)];
};
-#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */
+#define XPC_V_VERSION _XPC_VERSION(4,0) /* version 4.0 of the cross vars */
#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
(_version >= _XPC_VERSION(3,1))
static inline int
-xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
+xpc_hb_allowed(short partid, struct xpc_vars *vars)
{
- return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
+ return test_bit(partid, vars->heartbeating_to_mask);
+}
+
+static inline int
+xpc_any_hbs_allowed(struct xpc_vars *vars)
+{
+ return !bitmap_empty((unsigned long *)vars->heartbeating_to_mask,
+ vars->npartitions);
}
static inline void
-xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
+xpc_allow_hb(short partid, struct xpc_vars *vars)
{
- u64 old_mask, new_mask;
+ set_bit(partid, vars->heartbeating_to_mask);
+}
- do {
- old_mask = vars->heartbeating_to_mask;
- new_mask = (old_mask | (1UL << partid));
- } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
- old_mask);
+static inline void
+xpc_disallow_hb(short partid, struct xpc_vars *vars)
+{
+ clear_bit(partid, vars->heartbeating_to_mask);
}
static inline void
-xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
+xpc_disallow_all_hbs(struct xpc_vars *vars)
{
- u64 old_mask, new_mask;
+ int nlongs = BITS_TO_LONGS(vars->npartitions);
+ int i;
- do {
- old_mask = vars->heartbeating_to_mask;
- new_mask = (old_mask & ~(1UL << partid));
- } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
- old_mask);
+ for (i = 0; i < nlongs; i++)
+ vars->heartbeating_to_mask[i] = 0;
}
/*
- * The AMOs page consists of a number of AMO variables which are divided into
- * four groups, The first two groups are used to identify an IRQ's sender.
- * These two groups consist of 64 and 128 AMO variables respectively. The last
- * two groups, consisting of just one AMO variable each, are used to identify
- * the remote partitions that are currently engaged (from the viewpoint of
- * the XPC running on the remote partition).
+ * The AMOs page(s) consists of a number of AMO variables which are divided into
+ * four groups, The first group consists of one AMO per partition, each of which
+ * reflects state changes of up to eight channels and are accompanied by the
+ * receipt of a NOTIFY IRQ. The second group represents a bitmap of nasids by
+ * which to identify an ACTIVATE IRQ's sender. The last two groups, each
+ * representing a bitmap of partids, are used to identify the remote partitions
+ * that are currently engaged (from the viewpoint of the XPC running on the
+ * remote partition).
+ *
+ * The following #defines reflect an AMO index into these AMOS page(s).
*/
-#define XPC_NOTIFY_IRQ_AMOS 0
-#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS)
-#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
-#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
+
+/* get offset to beginning of notify IRQ AMOs */
+static inline int
+xpc_notify_irq_amos(void)
+{
+ return 0;
+}
+
+/* get offset to beginning of activate IRQ AMOs */
+static inline int
+xpc_activate_irq_amos(int npartitions)
+{
+ return xpc_notify_irq_amos() + npartitions;
+}
+
+/* get offset to beginning of engaged partitions AMOs */
+static inline int
+xpc_engaged_partitions_amos(int npartitions)
+{
+ return xpc_activate_irq_amos(npartitions) + xp_nasid_mask_words();
+}
+
+/* get offset to beginning of disengaged request AMOs */
+static inline int
+xpc_disengage_request_amos(int npartitions)
+{
+ return xpc_engaged_partitions_amos(npartitions) +
+ xp_partid_mask_words(npartitions);
+}
+
+/* get total number of AMOs */
+static inline int
+xpc_number_of_amos(int npartitions)
+{
+ return xpc_disengage_request_amos(npartitions) +
+ xp_partid_mask_words(npartitions);
+}
/*
@@ -239,7 +282,7 @@ struct xpc_vars_part {
u64 openclose_args_pa; /* physical address of open and close args */
u64 GPs_pa; /* physical address of Get/Put values */
- u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */
+ u64 IPI_amo_pa; /* physical address of IPI AMO variable */
int IPI_nasid; /* nasid of where to send IPIs */
int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */
@@ -266,10 +309,12 @@ struct xpc_vars_part {
#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars))
-#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE)
-#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
-#define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words)
-#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE)
+#define XPC_RP_PART_NASIDS(_rp) (u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE)
+#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \
+ xp_nasid_mask_words())
+#define XPC_RP_VARS(_rp) (struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
+ xp_nasid_mask_words())
+#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *)((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE)
/*
@@ -428,11 +473,11 @@ struct xpc_notify {
* messages.
*/
struct xpc_channel {
- partid_t partid; /* ID of remote partition connected */
+ short partid; /* ID of remote partition connected */
spinlock_t lock; /* lock for updating this structure */
u32 flags; /* general flags */
- enum xpc_retval reason; /* reason why channel is disconnect'g */
+ enum xp_retval reason; /* reason why channel is disconnect'g */
int reason_line; /* line# disconnect initiated from */
u16 number; /* channel # */
@@ -481,16 +526,11 @@ struct xpc_channel {
/* kthread management related fields */
-// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
-// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
-// >>> dependent on activity over the last interval of time
atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
u32 kthreads_idle_limit; /* limit on #of kthreads idle */
atomic_t kthreads_active; /* #of kthreads actively working */
- // >>> following field is temporary
- u32 kthreads_created; /* total #of kthreads created */
wait_queue_head_t idle_wq; /* idle kthread wait queue */
@@ -538,6 +578,8 @@ struct xpc_partition {
/* XPC HB infrastructure */
u8 remote_rp_version; /* version# of partition's rsvd pg */
+ short remote_npartitions; /* value of XPC_NPARTITIONS */
+ u32 flags; /* general flags */
struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */
u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
u64 remote_vars_pa; /* phys addr of partition's vars */
@@ -547,10 +589,11 @@ struct xpc_partition {
int remote_act_nasid; /* active part's act/deact nasid */
int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
u32 act_IRQ_rcvd; /* IRQs since activation */
- spinlock_t act_lock; /* protect updating of act_state */
+ spinlock_t lock; /* protect updating of act_state and */
+ /* the general flags */
u8 act_state; /* from XPC HB viewpoint */
u8 remote_vars_version; /* version# of partition's vars */
- enum xpc_retval reason; /* reason partition is deactivating */
+ enum xp_retval reason; /* reason partition is deactivating */
int reason_line; /* line# deactivation initiated from */
int reactivate_nasid; /* nasid in partition to reactivate */
@@ -601,9 +644,9 @@ struct xpc_partition {
int remote_IPI_nasid; /* nasid of where to send IPIs */
int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
- AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
+ u64 *remote_IPI_amo_va; /* address of remote IPI AMO variable */
- AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
+ u64 *local_IPI_amo_va; /* address of IPI AMO variable */
u64 local_IPI_amo; /* IPI amo flags yet to be handled */
char IPI_owner[8]; /* IPI owner's name */
struct timer_list dropped_IPI_timer; /* dropped IPI timer */
@@ -618,14 +661,17 @@ struct xpc_partition {
} ____cacheline_aligned;
+/* struct xpc_partition flags */
+
+#define XPC_P_RAMOSREGISTERED 0x00000001 /* remote AMOs were registered */
/* struct xpc_partition act_state values (for XPC HB) */
-#define XPC_P_INACTIVE 0x00 /* partition is not active */
-#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */
-#define XPC_P_ACTIVATING 0x02 /* activation thread started */
-#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
-#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
+#define XPC_P_AS_INACTIVE 0x00 /* partition is not active */
+#define XPC_P_AS_ACTIVATION_REQ 0x01 /* created thread to activate */
+#define XPC_P_AS_ACTIVATING 0x02 /* activation thread started */
+#define XPC_P_AS_ACTIVE 0x03 /* xpc_partition_up() was called */
+#define XPC_P_AS_DEACTIVATING 0x04 /* partition deactivation initiated */
#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
@@ -634,10 +680,10 @@ struct xpc_partition {
/* struct xpc_partition setup_state values */
-#define XPC_P_UNSET 0x00 /* infrastructure was never setup */
-#define XPC_P_SETUP 0x01 /* infrastructure is setup */
-#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
-#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
+#define XPC_P_SS_UNSET 0x00 /* infrastructure was never setup */
+#define XPC_P_SS_SETUP 0x01 /* infrastructure is setup */
+#define XPC_P_SS_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
+#define XPC_P_SS_TORNDOWN 0x03 /* infrastructure is torndown */
@@ -646,7 +692,7 @@ struct xpc_partition {
* dropped IPIs. These occur whenever an IPI amo write doesn't complete until
* after the IPI was received.
*/
-#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
+#define XPC_DROPPED_IPI_WAIT_INTERVAL (0.25 * HZ)
/* number of seconds to wait for other partitions to disengage */
@@ -656,7 +702,7 @@ struct xpc_partition {
#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
-#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
+#define XPC_PARTID(_p) ((short) ((_p) - &xpc_partitions[0]))
@@ -682,41 +728,41 @@ extern int xpc_exiting;
extern struct xpc_vars *xpc_vars;
extern struct xpc_rsvd_page *xpc_rsvd_page;
extern struct xpc_vars_part *xpc_vars_part;
-extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
+extern struct xpc_partition xpc_partitions[XP_NPARTITIONS + 1];
extern char *xpc_remote_copy_buffer;
extern void *xpc_remote_copy_buffer_base;
extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **);
extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
-extern void xpc_allow_IPI_ops(void);
-extern void xpc_restrict_IPI_ops(void);
extern int xpc_identify_act_IRQ_sender(void);
extern int xpc_partition_disengaged(struct xpc_partition *);
-extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
+extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
+extern void xpc_deactivate_partition(const int, struct xpc_partition *,
+ enum xp_retval);
extern void xpc_mark_partition_inactive(struct xpc_partition *);
+extern enum xp_retval xpc_register_remote_amos(struct xpc_partition *);
+extern void xpc_unregister_remote_amos(struct xpc_partition *);
extern void xpc_discovery(void);
extern void xpc_check_remote_hb(void);
-extern void xpc_deactivate_partition(const int, struct xpc_partition *,
- enum xpc_retval);
-extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
+extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
/* found in xpc_channel.c */
extern void xpc_initiate_connect(int);
extern void xpc_initiate_disconnect(int);
-extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);
-extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);
-extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *,
+extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **);
+extern enum xp_retval xpc_initiate_send(short, int, void *);
+extern enum xp_retval xpc_initiate_send_notify(short, int, void *,
xpc_notify_func, void *);
-extern void xpc_initiate_received(partid_t, int, void *);
-extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);
-extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);
+extern void xpc_initiate_received(short, int, void *);
+extern enum xp_retval xpc_setup_infrastructure(struct xpc_partition *);
+extern enum xp_retval xpc_pull_remote_vars_part(struct xpc_partition *);
extern void xpc_process_channel_activity(struct xpc_partition *);
extern void xpc_connected_callout(struct xpc_channel *);
extern void xpc_deliver_msg(struct xpc_channel *);
extern void xpc_disconnect_channel(const int, struct xpc_channel *,
- enum xpc_retval, unsigned long *);
-extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval);
-extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
+ enum xp_retval, unsigned long *);
+extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
+extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
extern void xpc_teardown_infrastructure(struct xpc_partition *);
@@ -769,7 +815,7 @@ xpc_part_deref(struct xpc_partition *par
DBUG_ON(refs < 0);
- if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
+ if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN) {
wake_up(&part->teardown_wq);
}
}
@@ -781,7 +827,7 @@ xpc_part_ref(struct xpc_partition *part)
atomic_inc(&part->references);
- setup = (part->setup_state == XPC_P_SETUP);
+ setup = (part->setup_state == XPC_P_SS_SETUP);
if (!setup) {
xpc_part_deref(part);
}
@@ -811,145 +857,123 @@ xpc_part_ref(struct xpc_partition *part)
static inline void
xpc_mark_partition_engaged(struct xpc_partition *part)
{
- unsigned long irq_flags;
- AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
- (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
-
-
- local_irq_save(irq_flags);
+ u64 *amo_va = __va(part->remote_amos_page_pa +
+ (xpc_engaged_partitions_amos(part->remote_npartitions) +
+ BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
/* set bit corresponding to our partid in remote partition's AMO */
- FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
- (1UL << sn_partition_id));
- /*
- * We must always use the nofault function regardless of whether we
- * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
- * didn't, we'd never know that the other partition is down and would
- * keep sending IPIs and AMOs to it until the heartbeat times out.
- */
- (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
- variable), xp_nofault_PIOR_target));
-
- local_irq_restore(irq_flags);
+ (void)xp_set_amo(amo_va, XP_AMO_OR, BIT_MASK(xp_partition_id), 1);
}
static inline void
xpc_mark_partition_disengaged(struct xpc_partition *part)
{
- unsigned long irq_flags;
- AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
- (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
-
-
- local_irq_save(irq_flags);
+ u64 *amo_va = __va(part->remote_amos_page_pa +
+ (xpc_engaged_partitions_amos(part->remote_npartitions) +
+ BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
/* clear bit corresponding to our partid in remote partition's AMO */
- FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
- ~(1UL << sn_partition_id));
- /*
- * We must always use the nofault function regardless of whether we
- * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
- * didn't, we'd never know that the other partition is down and would
- * keep sending IPIs and AMOs to it until the heartbeat times out.
- */
- (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
- variable), xp_nofault_PIOR_target));
-
- local_irq_restore(irq_flags);
+ (void)xp_set_amo(amo_va, XP_AMO_AND, ~BIT_MASK(xp_partition_id), 1);
}
static inline void
xpc_request_partition_disengage(struct xpc_partition *part)
{
- unsigned long irq_flags;
- AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
- (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
-
-
- local_irq_save(irq_flags);
+ u64 *amo_va = __va(part->remote_amos_page_pa +
+ (xpc_disengage_request_amos(part->remote_npartitions) +
+ BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
/* set bit corresponding to our partid in remote partition's AMO */
- FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
- (1UL << sn_partition_id));
- /*
- * We must always use the nofault function regardless of whether we
- * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
- * didn't, we'd never know that the other partition is down and would
- * keep sending IPIs and AMOs to it until the heartbeat times out.
- */
- (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
- variable), xp_nofault_PIOR_target));
-
- local_irq_restore(irq_flags);
+ (void)xp_set_amo(amo_va, XP_AMO_OR, BIT_MASK(xp_partition_id), 1);
}
static inline void
xpc_cancel_partition_disengage_request(struct xpc_partition *part)
{
- unsigned long irq_flags;
- AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
- (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
-
-
- local_irq_save(irq_flags);
+ u64 *amo_va = __va(part->remote_amos_page_pa +
+ (xpc_disengage_request_amos(part->remote_npartitions) +
+ BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
/* clear bit corresponding to our partid in remote partition's AMO */
- FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
- ~(1UL << sn_partition_id));
- /*
- * We must always use the nofault function regardless of whether we
- * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
- * didn't, we'd never know that the other partition is down and would
- * keep sending IPIs and AMOs to it until the heartbeat times out.
- */
- (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
- variable), xp_nofault_PIOR_target));
-
- local_irq_restore(irq_flags);
+ (void)xp_set_amo(amo_va, XP_AMO_AND, ~BIT_MASK(xp_partition_id), 1);
}
-static inline u64
-xpc_partition_engaged(u64 partid_mask)
+static inline int
+xpc_any_partition_engaged(void)
{
- AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
+ enum xp_retval ret;
+ int w_index;
+ u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page +
+ xpc_engaged_partitions_amos(xpc_vars->npartitions) *
+ xp_sizeof_amo);
+ u64 amo;
+
+ for (w_index = 0; w_index < xp_partid_mask_words(xpc_vars->npartitions);
+ w_index++) {
+ ret = xp_get_amo(amo_va, XP_AMO_LOAD, &amo);
+ BUG_ON(ret != xpSuccess); /* should never happen */
+ if (amo != 0)
+ return 1;
-
- /* return our partition's AMO variable ANDed with partid_mask */
- return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
- partid_mask);
+ amo_va = (u64 *)((u64)amo_va + xp_sizeof_amo);
+ }
+ return 0;
}
static inline u64
-xpc_partition_disengage_requested(u64 partid_mask)
+xpc_partition_engaged(short partid)
{
- AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
+ enum xp_retval ret;
+ u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page +
+ (xpc_engaged_partitions_amos(xpc_vars->npartitions) +
+ BIT_WORD(partid)) * xp_sizeof_amo);
+ u64 amo;
+
+ /* return our partition's AMO variable ANDed with partid mask */
+ ret = xp_get_amo(amo_va, XP_AMO_LOAD, &amo);
+ BUG_ON(ret != xpSuccess); /* should never happen */
+ return (amo & BIT_MASK(partid));
+}
+static inline u64
+xpc_partition_disengage_requested(short partid)
+{
+ enum xp_retval ret;
+ u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page +
+ (xpc_disengage_request_amos(xpc_vars->npartitions) +
+ BIT_WORD(partid)) * xp_sizeof_amo);
+ u64 amo;
- /* return our partition's AMO variable ANDed with partid_mask */
- return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
- partid_mask);
+ /* return our partition's AMO variable ANDed with partid mask */
+ ret = xp_get_amo(amo_va, XP_AMO_LOAD, &amo);
+ BUG_ON(ret != xpSuccess); /* should never happen */
+ return (amo & BIT_MASK(partid));
}
static inline void
-xpc_clear_partition_engaged(u64 partid_mask)
+xpc_clear_partition_engaged(short partid)
{
- AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
+ enum xp_retval ret;
+ u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page +
+ (xpc_engaged_partitions_amos(xpc_vars->npartitions) +
+ BIT_WORD(partid)) * xp_sizeof_amo);
-
- /* clear bit(s) based on partid_mask in our partition's AMO */
- FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
- ~partid_mask);
+ /* clear bit corresponding to partid in our partition's AMO */
+ ret = xp_set_amo(amo_va, XP_AMO_AND, ~BIT_MASK(partid), 0);
+ BUG_ON(ret != xpSuccess); /* should never happen */
}
static inline void
-xpc_clear_partition_disengage_request(u64 partid_mask)
+xpc_clear_partition_disengage_request(short partid)
{
- AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
-
+ enum xp_retval ret;
+ u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page +
+ (xpc_disengage_request_amos(xpc_vars->npartitions) +
+ BIT_WORD(partid)) * xp_sizeof_amo);
- /* clear bit(s) based on partid_mask in our partition's AMO */
- FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
- ~partid_mask);
+ /* clear bit corresponding to partid in our partition's AMO */
+ ret = xp_set_amo(amo_va, XP_AMO_AND, ~BIT_MASK(partid), 0);
+ BUG_ON(ret != xpSuccess); /* should never happen */
}
@@ -961,40 +985,6 @@ xpc_clear_partition_disengage_request(u6
* the other that is associated with channel activity (SGI_XPC_NOTIFY).
*/
-static inline u64
-xpc_IPI_receive(AMO_t *amo)
-{
- return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
-}
-
-
-static inline enum xpc_retval
-xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
-{
- int ret = 0;
- unsigned long irq_flags;
-
-
- local_irq_save(irq_flags);
-
- FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
- sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
-
- /*
- * We must always use the nofault function regardless of whether we
- * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
- * didn't, we'd never know that the other partition is down and would
- * keep sending IPIs and AMOs to it until the heartbeat times out.
- */
- ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
- xp_nofault_PIOR_target));
-
- local_irq_restore(irq_flags);
-
- return ((ret == 0) ? xpcSuccess : xpcPioReadError);
-}
-
-
/*
* IPIs associated with SGI_XPC_ACTIVATE IRQ.
*/
@@ -1004,44 +994,53 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int n
*/
static inline void
xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
- int to_phys_cpuid)
+ int to_phys_cpuid, int remote_amo, int npartitions)
{
- int w_index = XPC_NASID_W_INDEX(from_nasid);
- int b_index = XPC_NASID_B_INDEX(from_nasid);
- AMO_t *amos = (AMO_t *) __va(amos_page_pa +
- (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
-
-
- (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
- to_phys_cpuid, SGI_XPC_ACTIVATE);
+ enum xp_retval ret;
+ /* SN nodes are always even numbered nasids */
+ u64 *amo_va = (u64 *)__va(amos_page_pa +
+ (xpc_activate_irq_amos(npartitions) +
+ BIT_WORD(from_nasid/2)) * xp_sizeof_amo);
+
+ ret = xp_set_amo_with_interrupt(amo_va, XP_AMO_OR,
+ BIT_MASK(from_nasid/2),
+ remote_amo, to_nasid,
+ to_phys_cpuid, SGI_XPC_ACTIVATE);
+ BUG_ON(!remote_amo && ret != xpSuccess); /* should never happen*/
}
static inline void
xpc_IPI_send_activate(struct xpc_vars *vars)
{
- xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
- vars->act_nasid, vars->act_phys_cpuid);
+ xpc_activate_IRQ_send(vars->amos_page_pa, xp_node_to_nasid(0),
+ vars->act_nasid, vars->act_phys_cpuid, 1,
+ vars->npartitions);
}
static inline void
xpc_IPI_send_activated(struct xpc_partition *part)
{
- xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
- part->remote_act_nasid, part->remote_act_phys_cpuid);
+ xpc_activate_IRQ_send(part->remote_amos_page_pa, xp_node_to_nasid(0),
+ part->remote_act_nasid,
+ part->remote_act_phys_cpuid, 1,
+ part->remote_npartitions);
}
static inline void
xpc_IPI_send_reactivate(struct xpc_partition *part)
{
xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
- xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
+ xpc_vars->act_nasid, xpc_vars->act_phys_cpuid, 0,
+ xpc_vars->npartitions);
}
static inline void
xpc_IPI_send_disengage(struct xpc_partition *part)
{
- xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
- part->remote_act_nasid, part->remote_act_phys_cpuid);
+ xpc_activate_IRQ_send(part->remote_amos_page_pa, xp_node_to_nasid(0),
+ part->remote_act_nasid,
+ part->remote_act_phys_cpuid, 1,
+ part->remote_npartitions);
}
@@ -1061,26 +1060,25 @@ xpc_notify_IRQ_send(struct xpc_channel *
unsigned long *irq_flags)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
- enum xpc_retval ret;
+ enum xp_retval ret;
+
+ if (unlikely(part->act_state == XPC_P_AS_DEACTIVATING))
+ return;
- if (likely(part->act_state != XPC_P_DEACTIVATING)) {
- ret = xpc_IPI_send(part->remote_IPI_amo_va,
- (u64) ipi_flag << (ch->number * 8),
+ ret = xp_set_amo_with_interrupt(part->remote_IPI_amo_va, XP_AMO_OR,
+ ((u64)ipi_flag << (ch->number * 8)), 1,
part->remote_IPI_nasid,
part->remote_IPI_phys_cpuid,
SGI_XPC_NOTIFY);
- dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
- ipi_flag_string, ch->partid, ch->number, ret);
- if (unlikely(ret != xpcSuccess)) {
- if (irq_flags != NULL) {
- spin_unlock_irqrestore(&ch->lock, *irq_flags);
- }
- XPC_DEACTIVATE_PARTITION(part, ret);
- if (irq_flags != NULL) {
- spin_lock_irqsave(&ch->lock, *irq_flags);
- }
- }
+ dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
+ ipi_flag_string, ch->partid, ch->number, ret);
+ if (unlikely(ret != xpSuccess)) {
+ if (irq_flags != NULL)
+ spin_unlock_irqrestore(&ch->lock, *irq_flags);
+ XPC_DEACTIVATE_PARTITION(part, ret);
+ if (irq_flags != NULL)
+ spin_lock_irqsave(&ch->lock, *irq_flags);
}
}
@@ -1097,11 +1095,14 @@ static inline void
xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
char *ipi_flag_string)
{
- struct xpc_partition *part = &xpc_partitions[ch->partid];
+ enum xp_retval ret;
+ u64 *amo_va = xpc_partitions[ch->partid].local_IPI_amo_va;
+ /* set IPI flag corresponding to channel in partition's local AMO */
+ ret = xp_set_amo(amo_va, XP_AMO_OR, ((u64)ipi_flag << (ch->number * 8)),
+ 0);
+ BUG_ON(ret != xpSuccess); /* should never happen */
- FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
- FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
ipi_flag_string, ch->partid, ch->number);
}
@@ -1126,8 +1127,8 @@ xpc_notify_IRQ_send_local(struct xpc_cha
#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
-#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f))
-#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010))
+#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
+#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL)
static inline void
@@ -1185,53 +1186,16 @@ xpc_IPI_send_local_msgrequest(struct xpc
}
-/*
- * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
- * pages are located in the lowest granule. The lowest granule uses 4k pages
- * for cached references and an alternate TLB handler to never provide a
- * cacheable mapping for the entire region. This will prevent speculative
- * reading of cached copies of our lines from being issued which will cause
- * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
- * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an
- * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition
- * activation and 2 AMO variables for partition deactivation.
- */
-static inline AMO_t *
+static inline u64 *
xpc_IPI_init(int index)
{
- AMO_t *amo = xpc_vars->amos_page + index;
-
-
- (void) xpc_IPI_receive(amo); /* clear AMO variable */
- return amo;
-}
-
-
-
-static inline enum xpc_retval
-xpc_map_bte_errors(bte_result_t error)
-{
- if (error == BTE_SUCCESS)
- return xpcSuccess;
-
- if (is_shub2()) {
- if (BTE_VALID_SH2_ERROR(error))
- return xpcBteSh2Start + error;
- return xpcBteUnmappedError;
- }
- switch (error) {
- case BTE_SUCCESS: return xpcSuccess;
- case BTEFAIL_DIR: return xpcBteDirectoryError;
- case BTEFAIL_POISON: return xpcBtePoisonError;
- case BTEFAIL_WERR: return xpcBteWriteError;
- case BTEFAIL_ACCESS: return xpcBteAccessError;
- case BTEFAIL_PWERR: return xpcBtePWriteError;
- case BTEFAIL_PRERR: return xpcBtePReadError;
- case BTEFAIL_TOUT: return xpcBteTimeOutError;
- case BTEFAIL_XTERR: return xpcBteXtalkError;
- case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable;
- default: return xpcBteUnmappedError;
- }
+ enum xp_retval ret;
+ u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page + index *
+ xp_sizeof_amo);
+
+ ret = xp_get_amo(amo_va, XP_AMO_CLEAR, NULL);
+ BUG_ON(ret != xpSuccess); /* should never happen */
+ return amo_va;
}
@@ -1243,11 +1207,13 @@ xpc_map_bte_errors(bte_result_t error)
static inline void
xpc_check_for_channel_activity(struct xpc_partition *part)
{
+ enum xp_retval ret;
u64 IPI_amo;
unsigned long irq_flags;
- IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
+ ret = xp_get_amo(part->local_IPI_amo_va, XP_AMO_CLEAR, &IPI_amo);
+ BUG_ON(ret != xpSuccess); /* should never happen */
if (IPI_amo == 0) {
return;
}
@@ -1256,12 +1222,12 @@ xpc_check_for_channel_activity(struct xp
part->local_IPI_amo |= IPI_amo;
spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
- dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
- XPC_PARTID(part), IPI_amo);
+ dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%" U64_ELL
+ "x\n", XPC_PARTID(part), IPI_amo);
xpc_wakeup_channel_mgr(part);
}
-#endif /* _ASM_IA64_SN_XPC_H */
+#endif /* _DRIVERS_MISC_XP_XPC_H */
Index: linux-2.6/drivers/misc/xp/xpc_channel.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc_channel.c 2008-03-25 13:47:59.962929996 -0500
+++ linux-2.6/drivers/misc/xp/xpc_channel.c 2008-03-25 13:51:08.874454229 -0500
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -23,9 +23,11 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/completion.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/xpc.h>
+#include "xpc.h"
+
+#ifdef CONFIG_X86_64
+#define cmpxchg_rel(ptr,o,n) cmpxchg(ptr,o,n)
+#endif
/*
@@ -57,7 +59,7 @@ xpc_kzalloc_cacheline_aligned(size_t siz
* Set up the initial values for the XPartition Communication channels.
*/
static void
-xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
+xpc_initialize_channels(struct xpc_partition *part, short partid)
{
int ch_number;
struct xpc_channel *ch;
@@ -96,12 +98,12 @@ xpc_initialize_channels(struct xpc_parti
* Setup the infrastructure necessary to support XPartition Communication
* between the specified remote partition and the local one.
*/
-enum xpc_retval
+enum xp_retval
xpc_setup_infrastructure(struct xpc_partition *part)
{
int ret, cpuid;
struct timer_list *timer;
- partid_t partid = XPC_PARTID(part);
+ short partid = XPC_PARTID(part);
/*
@@ -121,7 +123,7 @@ xpc_setup_infrastructure(struct xpc_part
GFP_KERNEL);
if (part->channels == NULL) {
dev_err(xpc_chan, "can't get memory for channels\n");
- return xpcNoMemory;
+ return xpNoMemory;
}
part->nchannels = XPC_NCHANNELS;
@@ -136,7 +138,7 @@ xpc_setup_infrastructure(struct xpc_part
part->channels = NULL;
dev_err(xpc_chan, "can't get memory for local get/put "
"values\n");
- return xpcNoMemory;
+ return xpNoMemory;
}
part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
@@ -148,7 +150,7 @@ xpc_setup_infrastructure(struct xpc_part
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
- return xpcNoMemory;
+ return xpNoMemory;
}
@@ -165,7 +167,7 @@ xpc_setup_infrastructure(struct xpc_part
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
- return xpcNoMemory;
+ return xpNoMemory;
}
part->remote_openclose_args = xpc_kzalloc_cacheline_aligned(
@@ -181,7 +183,7 @@ xpc_setup_infrastructure(struct xpc_part
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
- return xpcNoMemory;
+ return xpNoMemory;
}
@@ -193,8 +195,8 @@ xpc_setup_infrastructure(struct xpc_part
/* local_IPI_amo were set to 0 by an earlier memset() */
- /* Initialize this partitions AMO_t structure */
- part->local_IPI_amo_va = xpc_IPI_init(partid);
+ /* Initialize this partitions AMO structure */
+ part->local_IPI_amo_va = xpc_IPI_init(xpc_notify_irq_amos() + partid);
spin_lock_init(&part->IPI_lock);
@@ -217,7 +219,7 @@ xpc_setup_infrastructure(struct xpc_part
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
- return xpcLackOfResources;
+ return xpLackOfResources;
}
/* Setup a timer to check for dropped IPIs */
@@ -225,14 +227,14 @@ xpc_setup_infrastructure(struct xpc_part
init_timer(timer);
timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
timer->data = (unsigned long) part;
- timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
+ timer->expires = jiffies + XPC_DROPPED_IPI_WAIT_INTERVAL;
add_timer(timer);
/*
- * With the setting of the partition setup_state to XPC_P_SETUP, we're
- * declaring that this partition is ready to go.
+ * With the setting of the partition setup_state to XPC_P_SS_SETUP,
+ * we're declaring that this partition is ready to go.
*/
- part->setup_state = XPC_P_SETUP;
+ part->setup_state = XPC_P_SS_SETUP;
/*
@@ -247,12 +249,12 @@ xpc_setup_infrastructure(struct xpc_part
__pa(part->local_openclose_args);
xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
- xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
+ xpc_vars_part[partid].IPI_nasid = xp_cpu_to_nasid(cpuid);
xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid);
xpc_vars_part[partid].nchannels = part->nchannels;
xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
- return xpcSuccess;
+ return xpSuccess;
}
@@ -260,35 +262,32 @@ xpc_setup_infrastructure(struct xpc_part
* Create a wrapper that hides the underlying mechanism for pulling a cacheline
* (or multiple cachelines) from a remote partition.
*
- * src must be a cacheline aligned physical address on the remote partition.
- * dst must be a cacheline aligned virtual address on this partition.
- * cnt must be an cacheline sized
+ * src must be a cacheline-aligned physical address on the remote partition.
+ * dst must be a cacheline-aligned virtual address on this partition.
+ * cnt must be cacheline sized
*/
-static enum xpc_retval
+static enum xp_retval
xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
const void *src, size_t cnt)
{
- bte_result_t bte_ret;
+ enum xp_retval ret;
DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
- if (part->act_state == XPC_P_DEACTIVATING) {
+ if (part->act_state == XPC_P_AS_DEACTIVATING) {
return part->reason;
}
- bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt,
- (BTE_NORMAL | BTE_WACQUIRE), NULL);
- if (bte_ret == BTE_SUCCESS) {
- return xpcSuccess;
+ ret = xp_remote_memcpy(dst, src, cnt);
+ if (ret != xpSuccess) {
+ dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed,"
+ " ret=%d\n", XPC_PARTID(part), ret);
}
- dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
- XPC_PARTID(part), bte_ret);
-
- return xpc_map_bte_errors(bte_ret);
+ return ret;
}
@@ -296,7 +295,7 @@ xpc_pull_remote_cachelines(struct xpc_pa
* Pull the remote per partition specific variables from the specified
* partition.
*/
-enum xpc_retval
+enum xp_retval
xpc_pull_remote_vars_part(struct xpc_partition *part)
{
u8 buffer[L1_CACHE_BYTES * 2];
@@ -304,8 +303,8 @@ xpc_pull_remote_vars_part(struct xpc_par
(struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
struct xpc_vars_part *pulled_entry;
u64 remote_entry_cacheline_pa, remote_entry_pa;
- partid_t partid = XPC_PARTID(part);
- enum xpc_retval ret;
+ short partid = XPC_PARTID(part);
+ enum xp_retval ret;
/* pull the cacheline that contains the variables we're interested in */
@@ -315,7 +314,7 @@ xpc_pull_remote_vars_part(struct xpc_par
DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
remote_entry_pa = part->remote_vars_part_pa +
- sn_partition_id * sizeof(struct xpc_vars_part);
+ xp_partition_id * sizeof(struct xpc_vars_part);
remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
@@ -325,7 +324,7 @@ xpc_pull_remote_vars_part(struct xpc_par
ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
(void *) remote_entry_cacheline_pa,
L1_CACHE_BYTES);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
"partition %d, ret=%d\n", partid, ret);
return ret;
@@ -339,13 +338,14 @@ xpc_pull_remote_vars_part(struct xpc_par
if (pulled_entry->magic != 0) {
dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
- "partition %d has bad magic value (=0x%lx)\n",
- partid, sn_partition_id, pulled_entry->magic);
- return xpcBadMagic;
+ "partition %d has bad magic value (=0x%"
+ U64_ELL "x)\n", partid, xp_partition_id,
+ pulled_entry->magic);
+ return xpBadMagic;
}
/* they've not been initialized yet */
- return xpcRetry;
+ return xpRetry;
}
if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
@@ -358,8 +358,8 @@ xpc_pull_remote_vars_part(struct xpc_par
dev_err(xpc_chan, "partition %d's XPC vars_part for "
"partition %d are not valid\n", partid,
- sn_partition_id);
- return xpcInvalidAddress;
+ xp_partition_id);
+ return xpInvalidAddress;
}
/* the variables we imported look to be valid */
@@ -367,8 +367,7 @@ xpc_pull_remote_vars_part(struct xpc_par
part->remote_GPs_pa = pulled_entry->GPs_pa;
part->remote_openclose_args_pa =
pulled_entry->openclose_args_pa;
- part->remote_IPI_amo_va =
- (AMO_t *) __va(pulled_entry->IPI_amo_pa);
+ part->remote_IPI_amo_va = __va(pulled_entry->IPI_amo_pa);
part->remote_IPI_nasid = pulled_entry->IPI_nasid;
part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
@@ -382,10 +381,10 @@ xpc_pull_remote_vars_part(struct xpc_par
}
if (pulled_entry->magic == XPC_VP_MAGIC1) {
- return xpcRetry;
+ return xpRetry;
}
- return xpcSuccess;
+ return xpSuccess;
}
@@ -397,7 +396,7 @@ xpc_get_IPI_flags(struct xpc_partition *
{
unsigned long irq_flags;
u64 IPI_amo;
- enum xpc_retval ret;
+ enum xp_retval ret;
/*
@@ -416,7 +415,7 @@ xpc_get_IPI_flags(struct xpc_partition *
part->remote_openclose_args,
(void *) part->remote_openclose_args_pa,
XPC_OPENCLOSE_ARGS_SIZE);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret);
dev_dbg(xpc_chan, "failed to pull openclose args from "
@@ -432,7 +431,7 @@ xpc_get_IPI_flags(struct xpc_partition *
ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
(void *) part->remote_GPs_pa,
XPC_GP_SIZE);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret);
dev_dbg(xpc_chan, "failed to pull GPs from partition "
@@ -450,18 +449,13 @@ xpc_get_IPI_flags(struct xpc_partition *
/*
* Allocate the local message queue and the notify queue.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_allocate_local_msgqueue(struct xpc_channel *ch)
{
unsigned long irq_flags;
int nentries;
size_t nbytes;
-
- // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
- // >>> iterations of the for-loop, bail if set?
-
- // >>> should we impose a minimum #of entries? like 4 or 8?
for (nentries = ch->local_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size;
@@ -489,19 +483,19 @@ xpc_allocate_local_msgqueue(struct xpc_c
ch->local_nentries = nentries;
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcSuccess;
+ return xpSuccess;
}
dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
"queue, partid=%d, channel=%d\n", ch->partid, ch->number);
- return xpcNoMemory;
+ return xpNoMemory;
}
/*
* Allocate the cached remote message queue.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
{
unsigned long irq_flags;
@@ -511,10 +505,6 @@ xpc_allocate_remote_msgqueue(struct xpc_
DBUG_ON(ch->remote_nentries <= 0);
- // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
- // >>> iterations of the for-loop, bail if set?
-
- // >>> should we impose a minimum #of entries? like 4 or 8?
for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size;
@@ -534,12 +524,12 @@ xpc_allocate_remote_msgqueue(struct xpc_
ch->remote_nentries = nentries;
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcSuccess;
+ return xpSuccess;
}
dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
- return xpcNoMemory;
+ return xpNoMemory;
}
@@ -548,20 +538,20 @@ xpc_allocate_remote_msgqueue(struct xpc_
*
* Note: Assumes all of the channel sizes are filled in.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_allocate_msgqueues(struct xpc_channel *ch)
{
unsigned long irq_flags;
- enum xpc_retval ret;
+ enum xp_retval ret;
DBUG_ON(ch->flags & XPC_C_SETUP);
- if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
+ if ((ret = xpc_allocate_local_msgqueue(ch)) != xpSuccess) {
return ret;
}
- if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
+ if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpSuccess) {
kfree(ch->local_msgqueue_base);
ch->local_msgqueue = NULL;
kfree(ch->notify_queue);
@@ -573,7 +563,7 @@ xpc_allocate_msgqueues(struct xpc_channe
ch->flags |= XPC_C_SETUP;
spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcSuccess;
+ return xpSuccess;
}
@@ -586,7 +576,7 @@ xpc_allocate_msgqueues(struct xpc_channe
static void
xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
- enum xpc_retval ret;
+ enum xp_retval ret;
DBUG_ON(!spin_is_locked(&ch->lock));
@@ -603,7 +593,7 @@ xpc_process_connect(struct xpc_channel *
ret = xpc_allocate_msgqueues(ch);
spin_lock_irqsave(&ch->lock, *irq_flags);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
}
if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
@@ -641,7 +631,7 @@ xpc_process_connect(struct xpc_channel *
* Notify those who wanted to be notified upon delivery of their message.
*/
static void
-xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
+xpc_notify_senders(struct xpc_channel *ch, enum xp_retval reason, s64 put)
{
struct xpc_notify *notify;
u8 notify_type;
@@ -671,16 +661,17 @@ xpc_notify_senders(struct xpc_channel *c
if (notify->func != NULL) {
dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
- "msg_number=%ld, partid=%d, channel=%d\n",
- (void *) notify, get, ch->partid, ch->number);
+ "msg_number=%" U64_ELL "d, partid=%d, "
+ "channel=%d\n", (void *) notify, get,
+ ch->partid, ch->number);
notify->func(reason, ch->partid, ch->number,
notify->key);
dev_dbg(xpc_chan, "notify->func() returned, "
- "notify=0x%p, msg_number=%ld, partid=%d, "
- "channel=%d\n", (void *) notify, get,
- ch->partid, ch->number);
+ "notify=0x%p, msg_number=%" U64_ELL "d, "
+ "partid=%d, channel=%d\n", (void *) notify,
+ get, ch->partid, ch->number);
}
}
}
@@ -761,9 +752,9 @@ xpc_process_disconnect(struct xpc_channe
DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
- if (part->act_state == XPC_P_DEACTIVATING) {
+ if (part->act_state == XPC_P_AS_DEACTIVATING) {
/* can't proceed until the other side disengages from us */
- if (xpc_partition_engaged(1UL << ch->partid)) {
+ if (xpc_partition_engaged(ch->partid)) {
return;
}
@@ -795,7 +786,7 @@ xpc_process_disconnect(struct xpc_channe
if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
spin_unlock_irqrestore(&ch->lock, *irq_flags);
- xpc_disconnect_callout(ch, xpcDisconnected);
+ xpc_disconnect_callout(ch, xpDisconnected);
spin_lock_irqsave(&ch->lock, *irq_flags);
}
@@ -816,7 +807,7 @@ xpc_process_disconnect(struct xpc_channe
/* we won't lose the CPU since we're holding ch->lock */
complete(&ch->wdisconnect_wait);
} else if (ch->delayed_IPI_flags) {
- if (part->act_state != XPC_P_DEACTIVATING) {
+ if (part->act_state != XPC_P_AS_DEACTIVATING) {
/* time to take action on any delayed IPI flags */
spin_lock(&part->IPI_lock);
XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
@@ -839,7 +830,7 @@ xpc_process_openclose_IPI(struct xpc_par
struct xpc_openclose_args *args =
&part->remote_openclose_args[ch_number];
struct xpc_channel *ch = &part->channels[ch_number];
- enum xpc_retval reason;
+ enum xp_retval reason;
@@ -921,10 +912,10 @@ again:
if (!(ch->flags & XPC_C_DISCONNECTING)) {
reason = args->reason;
- if (reason <= xpcSuccess || reason > xpcUnknownReason) {
- reason = xpcUnknownReason;
- } else if (reason == xpcUnregistering) {
- reason = xpcOtherUnregistering;
+ if (reason <= xpSuccess || reason > xpUnknownReason) {
+ reason = xpUnknownReason;
+ } else if (reason == xpUnregistering) {
+ reason = xpOtherUnregistering;
}
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
@@ -944,7 +935,7 @@ again:
" channel=%d\n", ch->partid, ch->number);
if (ch->flags & XPC_C_DISCONNECTED) {
- DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
+ DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
@@ -981,7 +972,7 @@ again:
"channel=%d\n", args->msg_size, args->local_nentries,
ch->partid, ch->number);
- if (part->act_state == XPC_P_DEACTIVATING ||
+ if (part->act_state == XPC_P_AS_DEACTIVATING ||
(ch->flags & XPC_C_ROPENREQUEST)) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
@@ -1014,7 +1005,7 @@ again:
if (ch->flags & XPC_C_OPENREQUEST) {
if (args->msg_size != ch->msg_size) {
- XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
+ XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
@@ -1034,18 +1025,18 @@ again:
if (IPI_flags & XPC_IPI_OPENREPLY) {
- dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
- "local_nentries=%d, remote_nentries=%d) received from "
- "partid=%d, channel=%d\n", args->local_msgqueue_pa,
- args->local_nentries, args->remote_nentries,
- ch->partid, ch->number);
+ dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%"
+ U64_ELL "x, local_nentries=%d, remote_nentries=%d) "
+ "received from partid=%d, channel=%d\n",
+ args->local_msgqueue_pa, args->local_nentries,
+ args->remote_nentries, ch->partid, ch->number);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
if (!(ch->flags & XPC_C_OPENREQUEST)) {
- XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
+ XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
@@ -1097,7 +1088,7 @@ again:
/*
* Attempt to establish a channel connection to a remote partition.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_connect_channel(struct xpc_channel *ch)
{
unsigned long irq_flags;
@@ -1105,12 +1096,12 @@ xpc_connect_channel(struct xpc_channel *
if (mutex_trylock(®istration->mutex) == 0) {
- return xpcRetry;
+ return xpRetry;
}
if (!XPC_CHANNEL_REGISTERED(ch->number)) {
mutex_unlock(®istration->mutex);
- return xpcUnregistered;
+ return xpUnregistered;
}
spin_lock_irqsave(&ch->lock, irq_flags);
@@ -1153,10 +1144,10 @@ xpc_connect_channel(struct xpc_channel *
* the channel lock as needed.
*/
mutex_unlock(®istration->mutex);
- XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
+ XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcUnequalMsgSizes;
+ return xpUnequalMsgSizes;
}
} else {
ch->msg_size = registration->msg_size;
@@ -1179,7 +1170,7 @@ xpc_connect_channel(struct xpc_channel *
spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcSuccess;
+ return xpSuccess;
}
@@ -1268,7 +1259,7 @@ xpc_process_msg_IPI(struct xpc_partition
* Notify senders that messages sent have been
* received and delivered by the other side.
*/
- xpc_notify_senders(ch, xpcMsgDelivered,
+ xpc_notify_senders(ch, xpMsgDelivered,
ch->remote_GP.get);
}
@@ -1280,9 +1271,9 @@ xpc_process_msg_IPI(struct xpc_partition
ch->w_remote_GP.get = ch->remote_GP.get;
- dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
- "channel=%d\n", ch->w_remote_GP.get, ch->partid,
- ch->number);
+ dev_dbg(xpc_chan, "w_remote_GP.get changed to %" U64_ELL "d, "
+ "partid=%d, channel=%d\n", ch->w_remote_GP.get,
+ ch->partid, ch->number);
/*
* If anyone was waiting for message queue entries to become
@@ -1308,9 +1299,9 @@ xpc_process_msg_IPI(struct xpc_partition
ch->w_remote_GP.put = ch->remote_GP.put;
- dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
- "channel=%d\n", ch->w_remote_GP.put, ch->partid,
- ch->number);
+ dev_dbg(xpc_chan, "w_remote_GP.put changed to %" U64_ELL "d, "
+ "partid=%d, channel=%d\n", ch->w_remote_GP.put,
+ ch->partid, ch->number);
nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
if (nmsgs_sent > 0) {
@@ -1371,7 +1362,7 @@ xpc_process_channel_activity(struct xpc_
continue;
}
- if (part->act_state == XPC_P_DEACTIVATING) {
+ if (part->act_state == XPC_P_AS_DEACTIVATING) {
continue;
}
@@ -1411,7 +1402,7 @@ xpc_process_channel_activity(struct xpc_
* at the same time.
*/
void
-xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
+xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
{
unsigned long irq_flags;
int ch_number;
@@ -1454,7 +1445,7 @@ xpc_partition_going_down(struct xpc_part
void
xpc_teardown_infrastructure(struct xpc_partition *part)
{
- partid_t partid = XPC_PARTID(part);
+ short partid = XPC_PARTID(part);
/*
@@ -1468,8 +1459,8 @@ xpc_teardown_infrastructure(struct xpc_p
DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
DBUG_ON(atomic_read(&part->nchannels_active) != 0);
- DBUG_ON(part->setup_state != XPC_P_SETUP);
- part->setup_state = XPC_P_WTEARDOWN;
+ DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
+ part->setup_state = XPC_P_SS_WTEARDOWN;
xpc_vars_part[partid].magic = 0;
@@ -1486,7 +1477,7 @@ xpc_teardown_infrastructure(struct xpc_p
/* now we can begin tearing down the infrastructure */
- part->setup_state = XPC_P_TORNDOWN;
+ part->setup_state = XPC_P_SS_TORNDOWN;
/* in case we've still got outstanding timers registered... */
del_timer_sync(&part->dropped_IPI_timer);
@@ -1512,14 +1503,14 @@ xpc_teardown_infrastructure(struct xpc_p
void
xpc_initiate_connect(int ch_number)
{
- partid_t partid;
+ short partid;
struct xpc_partition *part;
struct xpc_channel *ch;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+ for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
@@ -1542,13 +1533,13 @@ xpc_connected_callout(struct xpc_channel
/* let the registerer know that a connection has been established */
if (ch->func != NULL) {
- dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, "
+ dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
- ch->func(xpcConnected, ch->partid, ch->number,
+ ch->func(xpConnected, ch->partid, ch->number,
(void *) (u64) ch->local_nentries, ch->key);
- dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
+ dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
}
}
@@ -1571,7 +1562,7 @@ void
xpc_initiate_disconnect(int ch_number)
{
unsigned long irq_flags;
- partid_t partid;
+ short partid;
struct xpc_partition *part;
struct xpc_channel *ch;
@@ -1579,7 +1570,7 @@ xpc_initiate_disconnect(int ch_number)
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
/* initiate the channel disconnect for every active partition */
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+ for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
@@ -1591,7 +1582,7 @@ xpc_initiate_disconnect(int ch_number)
if (!(ch->flags & XPC_C_DISCONNECTED)) {
ch->flags |= XPC_C_WDISCONNECT;
- XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
+ XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
&irq_flags);
}
@@ -1617,7 +1608,7 @@ xpc_initiate_disconnect(int ch_number)
*/
void
xpc_disconnect_channel(const int line, struct xpc_channel *ch,
- enum xpc_retval reason, unsigned long *irq_flags)
+ enum xp_retval reason, unsigned long *irq_flags)
{
u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
@@ -1654,7 +1645,7 @@ xpc_disconnect_channel(const int line, s
} else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
- /* start a kthread that will do the xpcDisconnecting callout */
+ /* start a kthread that will do the xpDisconnecting callout */
xpc_create_kthreads(ch, 1, 1);
}
@@ -1668,7 +1659,7 @@ xpc_disconnect_channel(const int line, s
void
-xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
+xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
{
/*
* Let the channel's registerer know that the channel is being
@@ -1692,14 +1683,14 @@ xpc_disconnect_callout(struct xpc_channe
* Wait for a message entry to become available for the specified channel,
* but don't wait any longer than 1 jiffy.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
- enum xpc_retval ret;
+ enum xp_retval ret;
if (ch->flags & XPC_C_DISCONNECTING) {
- DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
+ DBUG_ON(ch->reason == xpInterrupted);
return ch->reason;
}
@@ -1709,11 +1700,11 @@ xpc_allocate_msg_wait(struct xpc_channel
if (ch->flags & XPC_C_DISCONNECTING) {
ret = ch->reason;
- DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
+ DBUG_ON(ch->reason == xpInterrupted);
} else if (ret == 0) {
- ret = xpcTimeout;
+ ret = xpTimeout;
} else {
- ret = xpcInterrupted;
+ ret = xpInterrupted;
}
return ret;
@@ -1724,12 +1715,12 @@ xpc_allocate_msg_wait(struct xpc_channel
* Allocate an entry for a message from the message queue associated with the
* specified channel.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
struct xpc_msg **address_of_msg)
{
struct xpc_msg *msg;
- enum xpc_retval ret;
+ enum xp_retval ret;
s64 put;
@@ -1742,7 +1733,7 @@ xpc_allocate_msg(struct xpc_channel *ch,
}
if (!(ch->flags & XPC_C_CONNECTED)) {
xpc_msgqueue_deref(ch);
- return xpcNotConnected;
+ return xpNotConnected;
}
@@ -1751,7 +1742,7 @@ xpc_allocate_msg(struct xpc_channel *ch,
* If none are available, we'll make sure that we grab the latest
* GP values.
*/
- ret = xpcTimeout;
+ ret = xpTimeout;
while (1) {
@@ -1783,17 +1774,17 @@ xpc_allocate_msg(struct xpc_channel *ch,
* that will cause the IPI handler to fetch the latest
* GP values as if an IPI was sent by the other side.
*/
- if (ret == xpcTimeout) {
+ if (ret == xpTimeout) {
xpc_IPI_send_local_msgrequest(ch);
}
if (flags & XPC_NOWAIT) {
xpc_msgqueue_deref(ch);
- return xpcNoWait;
+ return xpNoWait;
}
ret = xpc_allocate_msg_wait(ch);
- if (ret != xpcInterrupted && ret != xpcTimeout) {
+ if (ret != xpInterrupted && ret != xpTimeout) {
xpc_msgqueue_deref(ch);
return ret;
}
@@ -1808,13 +1799,13 @@ xpc_allocate_msg(struct xpc_channel *ch,
DBUG_ON(msg->flags != 0);
msg->number = put;
- dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
- "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
+ dev_dbg(xpc_chan, "w_local_GP.put changed to %" U64_ELL "d; msg=0x%p, "
+ "msg_number=%" U64_ELL "d, partid=%d, channel=%d\n", put + 1,
(void *) msg, msg->number, ch->partid, ch->number);
*address_of_msg = msg;
- return xpcSuccess;
+ return xpSuccess;
}
@@ -1831,15 +1822,15 @@ xpc_allocate_msg(struct xpc_channel *ch,
* payload - address of the allocated payload area pointer (filled in on
* return) in which the user-defined message is constructed.
*/
-enum xpc_retval
-xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
+enum xp_retval
+xpc_initiate_allocate(short partid, int ch_number, u32 flags, void **payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
- enum xpc_retval ret = xpcUnknownReason;
+ enum xp_retval ret = xpUnknownReason;
struct xpc_msg *msg = NULL;
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+ DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
*payload = NULL;
@@ -1901,8 +1892,8 @@ xpc_send_msgs(struct xpc_channel *ch, s6
/* we just set the new value of local_GP->put */
- dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
- "channel=%d\n", put, ch->partid, ch->number);
+ dev_dbg(xpc_chan, "local_GP->put changed to %" U64_ELL "d, "
+ "partid=%d, channel=%d\n", put, ch->partid, ch->number);
send_IPI = 1;
@@ -1925,11 +1916,11 @@ xpc_send_msgs(struct xpc_channel *ch, s6
* local message queue's Put value and sends an IPI to the partition the
* message is being sent to.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
xpc_notify_func func, void *key)
{
- enum xpc_retval ret = xpcSuccess;
+ enum xp_retval ret = xpSuccess;
struct xpc_notify *notify = notify;
s64 put, msg_number = msg->number;
@@ -1959,7 +1950,7 @@ xpc_send_msg(struct xpc_channel *ch, str
notify->key = key;
notify->type = notify_type;
- // >>> is a mb() needed here?
+ /* >>> is a mb() needed here? */
if (ch->flags & XPC_C_DISCONNECTING) {
/*
@@ -2022,18 +2013,18 @@ xpc_send_msg(struct xpc_channel *ch, str
* payload - pointer to the payload area allocated via
* xpc_initiate_allocate().
*/
-enum xpc_retval
-xpc_initiate_send(partid_t partid, int ch_number, void *payload)
+enum xp_retval
+xpc_initiate_send(short partid, int ch_number, void *payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
- enum xpc_retval ret;
+ enum xp_retval ret;
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
partid, ch_number);
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+ DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
DBUG_ON(msg == NULL);
@@ -2073,19 +2064,19 @@ xpc_initiate_send(partid_t partid, int c
* receipt. THIS FUNCTION MUST BE NON-BLOCKING.
* key - user-defined key to be passed to the function when it's called.
*/
-enum xpc_retval
-xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
+enum xp_retval
+xpc_initiate_send_notify(short partid, int ch_number, void *payload,
xpc_notify_func func, void *key)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
- enum xpc_retval ret;
+ enum xp_retval ret;
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
partid, ch_number);
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+ DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
DBUG_ON(msg == NULL);
DBUG_ON(func == NULL);
@@ -2103,7 +2094,7 @@ xpc_pull_remote_msg(struct xpc_channel *
struct xpc_msg *remote_msg, *msg;
u32 msg_index, nmsgs;
u64 msg_offset;
- enum xpc_retval ret;
+ enum xp_retval ret;
if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
@@ -2133,12 +2124,13 @@ xpc_pull_remote_msg(struct xpc_channel *
msg_offset);
if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
- nmsgs * ch->msg_size)) != xpcSuccess) {
+ nmsgs * ch->msg_size)) != xpSuccess) {
dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
- " msg %ld from partition %d, channel=%d, "
- "ret=%d\n", nmsgs, ch->next_msg_to_pull,
- ch->partid, ch->number, ret);
+ " msg %" U64_ELL "d from partition %d, "
+ "channel=%d, ret=%d\n", nmsgs,
+ ch->next_msg_to_pull, ch->partid, ch->number,
+ ret);
XPC_DEACTIVATE_PARTITION(part, ret);
@@ -2191,8 +2183,8 @@ xpc_get_deliverable_msg(struct xpc_chann
if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
/* we got the entry referenced by get */
- dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
- "partid=%d, channel=%d\n", get + 1,
+ dev_dbg(xpc_chan, "w_local_GP.get changed to %" U64_ELL
+ "d, partid=%d, channel=%d\n", get + 1,
ch->partid, ch->number);
/* pull the message from the remote partition */
@@ -2234,18 +2226,18 @@ xpc_deliver_msg(struct xpc_channel *ch)
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
- "msg_number=%ld, partid=%d, channel=%d\n",
- (void *) msg, msg->number, ch->partid,
- ch->number);
+ "msg_number=%" U64_ELL "d, partid=%d, "
+ "channel=%d\n", (void *) msg, msg->number,
+ ch->partid, ch->number);
/* deliver the message to its intended recipient */
- ch->func(xpcMsgReceived, ch->partid, ch->number,
+ ch->func(xpMsgReceived, ch->partid, ch->number,
&msg->payload, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
- "msg_number=%ld, partid=%d, channel=%d\n",
- (void *) msg, msg->number, ch->partid,
- ch->number);
+ "msg_number=%" U64_ELL "d, partid=%d, "
+ "channel=%d\n", (void *) msg, msg->number,
+ ch->partid, ch->number);
}
atomic_dec(&ch->kthreads_active);
@@ -2299,8 +2291,8 @@ xpc_acknowledge_msgs(struct xpc_channel
/* we just set the new value of local_GP->get */
- dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
- "channel=%d\n", get, ch->partid, ch->number);
+ dev_dbg(xpc_chan, "local_GP->get changed to %" U64_ELL "d, "
+ "partid=%d, channel=%d\n", get, ch->partid, ch->number);
send_IPI = (msg_flags & XPC_M_INTERRUPT);
@@ -2336,7 +2328,7 @@ xpc_acknowledge_msgs(struct xpc_channel
* xpc_initiate_allocate().
*/
void
-xpc_initiate_received(partid_t partid, int ch_number, void *payload)
+xpc_initiate_received(short partid, int ch_number, void *payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_channel *ch;
@@ -2344,13 +2336,14 @@ xpc_initiate_received(partid_t partid, i
s64 get, msg_number = msg->number;
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+ DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
ch = &part->channels[ch_number];
- dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
- (void *) msg, msg_number, ch->partid, ch->number);
+ dev_dbg(xpc_chan, "msg=0x%p, msg_number=%" U64_ELL "d, partid=%d, "
+ "channel=%d\n", (void *) msg, msg_number, ch->partid,
+ ch->number);
DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
msg_number % ch->remote_nentries);
Index: linux-2.6/drivers/misc/xp/xpc_main.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc_main.c 2008-03-25 13:47:59.962929996 -0500
+++ linux-2.6/drivers/misc/xp/xpc_main.c 2008-03-25 13:51:08.894456720 -0500
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -56,10 +56,16 @@
#include <linux/reboot.h>
#include <linux/completion.h>
#include <linux/kdebug.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn_sal.h>
+#if defined(CONFIG_IA64)
+#include <asm/sn/clksupport.h>
+#include <asm/sn/shub_mmr.h>
+#elif defined(CONFIG_X86_64)
+#define rtc_time() 1 /* will deal with this on X86_64 shortly */
+#else
+#error architecture is NOT supported
+#endif
#include <asm/uaccess.h>
-#include <asm/sn/xpc.h>
+#include "xpc.h"
/* define two XPC debug device structures to be used with dev_dbg() et al */
@@ -204,7 +210,7 @@ xpc_timeout_partition_disengage_request(
(void) xpc_partition_disengaged(part);
DBUG_ON(part->disengage_request_timeout != 0);
- DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
+ DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)) != 0);
}
@@ -343,14 +349,14 @@ xpc_initiate_discovery(void *ignore)
* the XPC per partition variables from the remote partition and waiting for
* the remote partition to pull ours.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_make_first_contact(struct xpc_partition *part)
{
- enum xpc_retval ret;
+ enum xp_retval ret;
- while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
- if (ret != xpcRetry) {
+ while ((ret = xpc_pull_remote_vars_part(part)) != xpSuccess) {
+ if (ret != xpRetry) {
XPC_DEACTIVATE_PARTITION(part, ret);
return ret;
}
@@ -361,7 +367,7 @@ xpc_make_first_contact(struct xpc_partit
/* wait a 1/4 of a second or so */
(void) msleep_interruptible(250);
- if (part->act_state == XPC_P_DEACTIVATING) {
+ if (part->act_state == XPC_P_AS_DEACTIVATING) {
return part->reason;
}
}
@@ -385,7 +391,7 @@ xpc_make_first_contact(struct xpc_partit
static void
xpc_channel_mgr(struct xpc_partition *part)
{
- while (part->act_state != XPC_P_DEACTIVATING ||
+ while (part->act_state != XPC_P_AS_DEACTIVATING ||
atomic_read(&part->nchannels_active) > 0 ||
!xpc_partition_disengaged(part)) {
@@ -410,13 +416,10 @@ xpc_channel_mgr(struct xpc_partition *pa
(atomic_read(&part->channel_mgr_requests) > 0 ||
(volatile u64) part->local_IPI_amo != 0 ||
((volatile u8) part->act_state ==
- XPC_P_DEACTIVATING &&
+ XPC_P_AS_DEACTIVATING &&
atomic_read(&part->nchannels_active) == 0 &&
xpc_partition_disengaged(part))));
atomic_set(&part->channel_mgr_requests, 1);
-
- // >>> Does it need to wakeup periodically as well? In case we
- // >>> miscalculated the #of kthreads to wakeup or create?
}
}
@@ -443,7 +446,7 @@ xpc_partition_up(struct xpc_partition *p
dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
- if (xpc_setup_infrastructure(part) != xpcSuccess) {
+ if (xpc_setup_infrastructure(part) != xpSuccess) {
return;
}
@@ -456,7 +459,7 @@ xpc_partition_up(struct xpc_partition *p
(void) xpc_part_ref(part); /* this will always succeed */
- if (xpc_make_first_contact(part) == xpcSuccess) {
+ if (xpc_make_first_contact(part) == xpSuccess) {
xpc_channel_mgr(part);
}
@@ -469,30 +472,30 @@ xpc_partition_up(struct xpc_partition *p
static int
xpc_activating(void *__partid)
{
- partid_t partid = (u64) __partid;
+ short partid = (u64) __partid;
struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags;
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
int ret;
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+ DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
- spin_lock_irqsave(&part->act_lock, irq_flags);
+ spin_lock_irqsave(&part->lock, irq_flags);
- if (part->act_state == XPC_P_DEACTIVATING) {
- part->act_state = XPC_P_INACTIVE;
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ if (part->act_state == XPC_P_AS_DEACTIVATING) {
+ part->act_state = XPC_P_AS_INACTIVE;
+ spin_unlock_irqrestore(&part->lock, irq_flags);
part->remote_rp_pa = 0;
return 0;
}
/* indicate the thread is activating */
- DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
- part->act_state = XPC_P_ACTIVATING;
+ DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
+ part->act_state = XPC_P_AS_ACTIVATING;
XPC_SET_REASON(part, 0, 0);
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ spin_unlock_irqrestore(&part->lock, irq_flags);
dev_dbg(xpc_part, "bringing partition %d up\n", partid);
@@ -512,24 +515,20 @@ xpc_activating(void *__partid)
set_cpus_allowed(current, CPU_MASK_ALL);
/*
- * Register the remote partition's AMOs with SAL so it can handle
- * and cleanup errors within that address range should the remote
- * partition go down. We don't unregister this range because it is
- * difficult to tell when outstanding writes to the remote partition
- * are finished and thus when it is safe to unregister. This should
- * not result in wasted space in the SAL xp_addr_region table because
- * we should get the same page for remote_amos_page_pa after module
- * reloads and system reboots.
+ * Register the remote partition's AMOs so any errors within that
+ * address range can be handled and cleaned up should the remote
+ * partition go down.
*/
- if (sn_register_xp_addr_region(part->remote_amos_page_pa,
- PAGE_SIZE, 1) < 0) {
- dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
- "xp_addr region\n", partid);
-
- spin_lock_irqsave(&part->act_lock, irq_flags);
- part->act_state = XPC_P_INACTIVE;
- XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__);
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ ret = xpc_register_remote_amos(part);
+ if (ret != xpSuccess) {
+ dev_warn(xpc_part, "xpc_activating() failed to register remote "
+ "AMOs for partition %d, ret=%d\n", partid,
+ ret);
+
+ spin_lock_irqsave(&part->lock, irq_flags);
+ part->act_state = XPC_P_AS_INACTIVE;
+ XPC_SET_REASON(part, xpPhysAddrRegFailed, __LINE__);
+ spin_unlock_irqrestore(&part->lock, irq_flags);
part->remote_rp_pa = 0;
return 0;
}
@@ -540,14 +539,16 @@ xpc_activating(void *__partid)
/*
* xpc_partition_up() holds this thread and marks this partition as
- * XPC_P_ACTIVE by calling xpc_hb_mark_active().
+ * XPC_P_AS_ACTIVE by calling xpc_hb_mark_active().
*/
- (void) xpc_partition_up(part);
+ (void)xpc_partition_up(part);
xpc_disallow_hb(partid, xpc_vars);
xpc_mark_partition_inactive(part);
- if (part->reason == xpcReactivating) {
+ xpc_unregister_remote_amos(part);
+
+ if (part->reason == xpReactivating) {
/* interrupting ourselves results in activating partition */
xpc_IPI_send_reactivate(part);
}
@@ -559,27 +560,27 @@ xpc_activating(void *__partid)
void
xpc_activate_partition(struct xpc_partition *part)
{
- partid_t partid = XPC_PARTID(part);
+ short partid = XPC_PARTID(part);
unsigned long irq_flags;
pid_t pid;
- spin_lock_irqsave(&part->act_lock, irq_flags);
+ spin_lock_irqsave(&part->lock, irq_flags);
- DBUG_ON(part->act_state != XPC_P_INACTIVE);
+ DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
- part->act_state = XPC_P_ACTIVATION_REQ;
- XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
+ part->act_state = XPC_P_AS_ACTIVATION_REQ;
+ XPC_SET_REASON(part, xpCloneKThread, __LINE__);
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ spin_unlock_irqrestore(&part->lock, irq_flags);
pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
if (unlikely(pid <= 0)) {
- spin_lock_irqsave(&part->act_lock, irq_flags);
- part->act_state = XPC_P_INACTIVE;
- XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ spin_lock_irqsave(&part->lock, irq_flags);
+ part->act_state = XPC_P_AS_INACTIVE;
+ XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
+ spin_unlock_irqrestore(&part->lock, irq_flags);
}
}
@@ -588,7 +589,7 @@ xpc_activate_partition(struct xpc_partit
* Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
* partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
* than one partition, we use an AMO_t structure per partition to indicate
- * whether a partition has sent an IPI or not. >>> If it has, then wake up the
+ * whether a partition has sent an IPI or not. If it has, then wake up the
* associated kthread to handle it.
*
* All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
@@ -603,11 +604,11 @@ xpc_activate_partition(struct xpc_partit
irqreturn_t
xpc_notify_IRQ_handler(int irq, void *dev_id)
{
- partid_t partid = (partid_t) (u64) dev_id;
+ short partid = (short) (u64) dev_id;
struct xpc_partition *part = &xpc_partitions[partid];
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+ DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
if (xpc_part_ref(part)) {
xpc_check_for_channel_activity(part);
@@ -630,7 +631,7 @@ xpc_dropped_IPI_check(struct xpc_partiti
xpc_check_for_channel_activity(part);
part->dropped_IPI_timer.expires = jiffies +
- XPC_P_DROPPED_IPI_WAIT;
+ XPC_DROPPED_IPI_WAIT_INTERVAL;
add_timer(&part->dropped_IPI_timer);
xpc_part_deref(part);
}
@@ -664,7 +665,6 @@ xpc_activate_kthreads(struct xpc_channel
if (needed + assigned > ch->kthreads_assigned_limit) {
needed = ch->kthreads_assigned_limit - assigned;
- // >>>should never be less than 0
if (needed <= 0) {
return;
}
@@ -718,7 +718,7 @@ xpc_kthread_waitmsgs(struct xpc_partitio
static int
xpc_daemonize_kthread(void *args)
{
- partid_t partid = XPC_UNPACK_ARG1(args);
+ short partid = XPC_UNPACK_ARG1(args);
u16 ch_number = XPC_UNPACK_ARG2(args);
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_channel *ch;
@@ -775,7 +775,7 @@ xpc_daemonize_kthread(void *args)
ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
spin_unlock_irqrestore(&ch->lock, irq_flags);
- xpc_disconnect_callout(ch, xpcDisconnecting);
+ xpc_disconnect_callout(ch, xpDisconnecting);
spin_lock_irqsave(&ch->lock, irq_flags);
ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
@@ -856,8 +856,8 @@ xpc_create_kthreads(struct xpc_channel *
* then we'll deadlock if all other kthreads assigned
* to this channel are blocked in the channel's
* registerer, because the only thing that will unblock
- * them is the xpcDisconnecting callout that this
- * failed kernel_thread would have made.
+ * them is the xpDisconnecting callout that this failed
+ * kernel_thread would have made.
*/
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
@@ -876,14 +876,12 @@ xpc_create_kthreads(struct xpc_channel *
* to function.
*/
spin_lock_irqsave(&ch->lock, irq_flags);
- XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
+ XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
break;
}
-
- ch->kthreads_created++; // >>> temporary debug only!!!
}
}
@@ -892,14 +890,14 @@ void
xpc_disconnect_wait(int ch_number)
{
unsigned long irq_flags;
- partid_t partid;
+ short partid;
struct xpc_partition *part;
struct xpc_channel *ch;
int wakeup_channel_mgr;
/* now wait for all callouts to the caller's function to cease */
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+ for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
part = &xpc_partitions[partid];
if (!xpc_part_ref(part)) {
@@ -920,7 +918,7 @@ xpc_disconnect_wait(int ch_number)
wakeup_channel_mgr = 0;
if (ch->delayed_IPI_flags) {
- if (part->act_state != XPC_P_DEACTIVATING) {
+ if (part->act_state != XPC_P_AS_DEACTIVATING) {
spin_lock(&part->IPI_lock);
XPC_SET_IPI_FLAGS(part->local_IPI_amo,
ch->number, ch->delayed_IPI_flags);
@@ -943,9 +941,9 @@ xpc_disconnect_wait(int ch_number)
static void
-xpc_do_exit(enum xpc_retval reason)
+xpc_do_exit(enum xp_retval reason)
{
- partid_t partid;
+ short partid;
int active_part_count, printed_waiting_msg = 0;
struct xpc_partition *part;
unsigned long printmsg_time, disengage_request_timeout = 0;
@@ -984,11 +982,13 @@ xpc_do_exit(enum xpc_retval reason)
do {
active_part_count = 0;
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+ for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID;
+ partid++) {
part = &xpc_partitions[partid];
if (xpc_partition_disengaged(part) &&
- part->act_state == XPC_P_INACTIVE) {
+ part->act_state == XPC_P_AS_INACTIVE) {
+ xpc_unregister_remote_amos(part);
continue;
}
@@ -1003,7 +1003,7 @@ xpc_do_exit(enum xpc_retval reason)
}
}
- if (xpc_partition_engaged(-1UL)) {
+ if (xpc_any_partition_engaged()) {
if (time_after(jiffies, printmsg_time)) {
dev_info(xpc_part, "waiting for remote "
"partitions to disengage, timeout in "
@@ -1035,7 +1035,7 @@ xpc_do_exit(enum xpc_retval reason)
} while (1);
- DBUG_ON(xpc_partition_engaged(-1UL));
+ DBUG_ON(xpc_any_partition_engaged());
/* indicate to others that our reserved page is uninitialized */
@@ -1043,9 +1043,9 @@ xpc_do_exit(enum xpc_retval reason)
/* now it's time to eliminate our heartbeat */
del_timer_sync(&xpc_hb_timer);
- DBUG_ON(xpc_vars->heartbeating_to_mask != 0);
+ DBUG_ON(xpc_any_hbs_allowed(xpc_vars) != 0);
- if (reason == xpcUnloading) {
+ if (reason == xpUnloading) {
/* take ourselves off of the reboot_notifier_list */
(void) unregister_reboot_notifier(&xpc_reboot_notifier);
@@ -1054,7 +1054,8 @@ xpc_do_exit(enum xpc_retval reason)
}
/* close down protections for IPI operations */
- xpc_restrict_IPI_ops();
+ xp_disallow_IPI_ops();
+ xp_change_memprotect_shub_wars_1_1(XP_MEMPROT_DISALLOW_ALL);
/* clear the interface to XPC's functions */
@@ -1074,21 +1075,21 @@ xpc_do_exit(enum xpc_retval reason)
static int
xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{
- enum xpc_retval reason;
+ enum xp_retval reason;
switch (event) {
case SYS_RESTART:
- reason = xpcSystemReboot;
+ reason = xpSystemReboot;
break;
case SYS_HALT:
- reason = xpcSystemHalt;
+ reason = xpSystemHalt;
break;
case SYS_POWER_OFF:
- reason = xpcSystemPoweroff;
+ reason = xpSystemPoweroff;
break;
default:
- reason = xpcSystemGoingDown;
+ reason = xpSystemGoingDown;
}
xpc_do_exit(reason);
@@ -1096,6 +1097,7 @@ xpc_system_reboot(struct notifier_block
}
+#ifdef CONFIG_IA64
/*
* Notify other partitions to disengage from all references to our memory.
*/
@@ -1103,29 +1105,28 @@ static void
xpc_die_disengage(void)
{
struct xpc_partition *part;
- partid_t partid;
- unsigned long engaged;
+ short partid;
long time, printmsg_time, disengage_request_timeout;
/* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1;
- xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */
+ xpc_disallow_all_hbs(xpc_vars); /* indicate we're deactivated */
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+ for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
part = &xpc_partitions[partid];
if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
remote_vars_version)) {
/* just in case it was left set by an earlier XPC */
- xpc_clear_partition_engaged(1UL << partid);
+ xpc_clear_partition_engaged(partid);
continue;
}
- if (xpc_partition_engaged(1UL << partid) ||
- part->act_state != XPC_P_INACTIVE) {
+ if (xpc_partition_engaged(partid) ||
+ part->act_state != XPC_P_AS_INACTIVE) {
xpc_request_partition_disengage(part);
xpc_mark_partition_disengaged(part);
xpc_IPI_send_disengage(part);
@@ -1134,23 +1135,23 @@ xpc_die_disengage(void)
time = rtc_time();
printmsg_time = time +
- (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
+ (XPC_DISENGAGE_PRINTMSG_INTERVAL * xp_rtc_cycles_per_second);
disengage_request_timeout = time +
- (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
+ (xpc_disengage_request_timelimit * xp_rtc_cycles_per_second);
/* wait for all other partitions to disengage from us */
while (1) {
- engaged = xpc_partition_engaged(-1UL);
- if (!engaged) {
+ if (!xpc_any_partition_engaged()) {
dev_info(xpc_part, "all partitions have disengaged\n");
break;
}
time = rtc_time();
if (time >= disengage_request_timeout) {
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
- if (engaged & (1UL << partid)) {
+ for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID;
+ partid++) {
+ if (xpc_partition_engaged(partid)) {
dev_info(xpc_part, "disengage from "
"remote partition %d timed "
"out\n", partid);
@@ -1163,13 +1164,14 @@ xpc_die_disengage(void)
dev_info(xpc_part, "waiting for remote partitions to "
"disengage, timeout in %ld seconds\n",
(disengage_request_timeout - time) /
- sn_rtc_cycles_per_second);
+ xp_rtc_cycles_per_second);
printmsg_time = time +
(XPC_DISENGAGE_PRINTMSG_INTERVAL *
- sn_rtc_cycles_per_second);
+ xp_rtc_cycles_per_second);
}
}
}
+#endif /* CONFIG_IA64 */
/*
@@ -1183,6 +1185,7 @@ xpc_die_disengage(void)
static int
xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
{
+#ifdef CONFIG_IA64 /* >>> will deal with notify_die events on X86_64 shortly */
switch (event) {
case DIE_MACHINE_RESTART:
case DIE_MACHINE_HALT:
@@ -1213,7 +1216,7 @@ xpc_system_die(struct notifier_block *nb
xpc_vars->heartbeat_offline = 0;
break;
}
-
+#endif /* CONFIG_IA64 */
return NOTIFY_DONE;
}
@@ -1222,23 +1225,21 @@ int __init
xpc_init(void)
{
int ret;
- partid_t partid;
+ short partid;
struct xpc_partition *part;
pid_t pid;
size_t buf_size;
-
- if (!ia64_platform_is("sn2")) {
+ if (is_shub()) {
+ /*
+ * The ia64-sn architecture supports at most 64 partitions. And
+ * the inability to unregister remote AMOs restricts us further
+ * to only support 64 partitions on this architecture.
+ */
+ if (XP_NPARTITIONS != 64)
+ return -EINVAL;
+ } else if (!is_uv())
return -ENODEV;
- }
-
-
- buf_size = max(XPC_RP_VARS_SIZE,
- XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
- xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
- GFP_KERNEL, &xpc_remote_copy_buffer_base);
- if (xpc_remote_copy_buffer == NULL)
- return -ENOMEM;
snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
@@ -1253,14 +1254,14 @@ xpc_init(void)
* ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
* PARTITION HAS BEEN ACTIVATED.
*/
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+ for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
part = &xpc_partitions[partid];
DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part));
part->act_IRQ_rcvd = 0;
- spin_lock_init(&part->act_lock);
- part->act_state = XPC_P_INACTIVE;
+ spin_lock_init(&part->lock);
+ part->act_state = XPC_P_AS_INACTIVE;
XPC_SET_REASON(part, 0, 0);
init_timer(&part->disengage_request_timer);
@@ -1268,7 +1269,7 @@ xpc_init(void)
xpc_timeout_partition_disengage_request;
part->disengage_request_timer.data = (unsigned long) part;
- part->setup_state = XPC_P_UNSET;
+ part->setup_state = XPC_P_SS_UNSET;
init_waitqueue_head(&part->teardown_wq);
atomic_set(&part->references, 0);
}
@@ -1277,7 +1278,8 @@ xpc_init(void)
* Open up protections for IPI operations (and AMO operations on
* Shub 1.1 systems).
*/
- xpc_allow_IPI_ops();
+ xp_allow_IPI_ops();
+ xp_change_memprotect_shub_wars_1_1(XP_MEMPROT_ALLOW_ALL);
/*
* Interrupts being processed will increment this atomic variable and
@@ -1297,13 +1299,13 @@ xpc_init(void)
dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
"errno=%d\n", -ret);
- xpc_restrict_IPI_ops();
+ xp_disallow_IPI_ops();
+ xp_change_memprotect_shub_wars_1_1(XP_MEMPROT_DISALLOW_ALL);
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
- kfree(xpc_remote_copy_buffer_base);
return -EBUSY;
}
@@ -1317,16 +1319,36 @@ xpc_init(void)
dev_err(xpc_part, "could not setup our reserved page\n");
free_irq(SGI_XPC_ACTIVATE, NULL);
- xpc_restrict_IPI_ops();
+ xp_disallow_IPI_ops();
+ xp_change_memprotect_shub_wars_1_1(XP_MEMPROT_DISALLOW_ALL);
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
- kfree(xpc_remote_copy_buffer_base);
return -EBUSY;
}
+ buf_size = max(XPC_RP_VARS_SIZE,
+ XPC_RP_HEADER_SIZE + xp_sizeof_nasid_mask);
+ xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
+ GFP_KERNEL, &xpc_remote_copy_buffer_base);
+ if (xpc_remote_copy_buffer == NULL) {
+ dev_err(xpc_part, "could not allocate remote copy buffer\n");
+
+ /* indicate to others that our reserved page is uninitialized */
+ xpc_rsvd_page->vars_pa = 0;
+
+ free_irq(SGI_XPC_ACTIVATE, NULL);
+ xp_disallow_IPI_ops();
+ xp_change_memprotect_shub_wars_1_1(XP_MEMPROT_DISALLOW_ALL);
+
+ if (xpc_sysctl) {
+ unregister_sysctl_table(xpc_sysctl);
+ }
+ return -ENOMEM;
+ }
+
/* add ourselves to the reboot_notifier_list */
ret = register_reboot_notifier(&xpc_reboot_notifier);
@@ -1362,7 +1384,8 @@ xpc_init(void)
del_timer_sync(&xpc_hb_timer);
free_irq(SGI_XPC_ACTIVATE, NULL);
- xpc_restrict_IPI_ops();
+ xp_disallow_IPI_ops();
+ xp_change_memprotect_shub_wars_1_1(XP_MEMPROT_DISALLOW_ALL);
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
@@ -1385,7 +1408,7 @@ xpc_init(void)
/* mark this new thread as a non-starter */
complete(&xpc_discovery_exited);
- xpc_do_exit(xpcUnloading);
+ xpc_do_exit(xpUnloading);
return -EBUSY;
}
@@ -1404,7 +1427,7 @@ module_init(xpc_init);
void __exit
xpc_exit(void)
{
- xpc_do_exit(xpcUnloading);
+ xpc_do_exit(xpUnloading);
}
module_exit(xpc_exit);
Index: linux-2.6/drivers/misc/xp/xpc_partition.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc_partition.c 2008-03-25 13:47:59.962929996 -0500
+++ linux-2.6/drivers/misc/xp/xpc_partition.c 2008-03-25 13:51:08.910458712 -0500
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -22,31 +22,21 @@
#include <linux/cache.h>
#include <linux/mmzone.h>
#include <linux/nodemask.h>
-#include <asm/uncached.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/xpc.h>
+#include "xpc.h"
+
+#if defined(CONFIG_IA64)
+#define xp_pa(_a) ia64_tpa(_a)
+#elif defined(CONFIG_X86_64)
+#define xp_pa(_a) __pa(_a)
+#else
+#error architecture is NOT supported
+#endif
/* XPC is exiting flag */
int xpc_exiting;
-/* SH_IPI_ACCESS shub register value on startup */
-static u64 xpc_sh1_IPI_access;
-static u64 xpc_sh2_IPI_access0;
-static u64 xpc_sh2_IPI_access1;
-static u64 xpc_sh2_IPI_access2;
-static u64 xpc_sh2_IPI_access3;
-
-
-/* original protection values for each node */
-u64 xpc_prot_vec[MAX_NUMNODES];
-
-
/* this partition's reserved page pointers */
struct xpc_rsvd_page *xpc_rsvd_page;
static u64 *xpc_part_nasids;
@@ -54,9 +44,6 @@ static u64 *xpc_mach_nasids;
struct xpc_vars *xpc_vars;
struct xpc_vars_part *xpc_vars_part;
-static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */
-static int xp_nasid_mask_words; /* actual size in words of nasid mask */
-
/*
* For performance reasons, each entry of xpc_partitions[] is cacheline
@@ -64,7 +51,7 @@ static int xp_nasid_mask_words; /* actua
* end so that the last legitimate entry doesn't share its cacheline with
* another variable.
*/
-struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
+struct xpc_partition xpc_partitions[XP_NPARTITIONS + 1];
/*
@@ -108,57 +95,54 @@ xpc_kmalloc_cacheline_aligned(size_t siz
static u64
xpc_get_rsvd_page_pa(int nasid)
{
- bte_result_t bte_res;
- s64 status;
- u64 cookie = 0;
u64 rp_pa = nasid; /* seed with nasid */
- u64 len = 0;
+ enum xp_retval ret;
+ u64 cookie = 0;
+ size_t len = 0;
u64 buf = buf;
- u64 buf_len = 0;
+ size_t buf_len = 0;
void *buf_base = NULL;
while (1) {
- status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
- &len);
+ ret = xp_get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);
- dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
- "0x%016lx, address=0x%016lx, len=0x%016lx\n",
- status, cookie, rp_pa, len);
+ dev_dbg(xpc_part, "SAL returned ret=%d cookie=0x%016" U64_ELL
+ "x, address=0x%016" U64_ELL "x len=0x%016lx\n", ret,
+ cookie, rp_pa, len);
- if (status != SALRET_MORE_PASSES) {
+ if (ret != xpNeedMoreInfo) {
break;
}
if (L1_CACHE_ALIGN(len) > buf_len) {
kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
- buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len,
+ buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
GFP_KERNEL, &buf_base);
if (buf_base == NULL) {
dev_err(xpc_part, "unable to kmalloc "
"len=0x%016lx\n", buf_len);
- status = SALRET_ERROR;
+ ret = xpNoMemory;
break;
}
}
- bte_res = xp_bte_copy(rp_pa, buf, buf_len,
- (BTE_NOTIFY | BTE_WACQUIRE), NULL);
- if (bte_res != BTE_SUCCESS) {
- dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
- status = SALRET_ERROR;
+ ret = xp_remote_memcpy((void *)buf, (void *)rp_pa, buf_len);
+ if (ret != xpSuccess) {
+ dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
break;
}
}
kfree(buf_base);
- if (status != SALRET_OK) {
+ if (ret != xpSuccess) {
rp_pa = 0;
}
- dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
+ dev_dbg(xpc_part, "reserved page at phys address 0x%016" U64_ELL "x\n",
+ rp_pa);
return rp_pa;
}
@@ -172,15 +156,21 @@ struct xpc_rsvd_page *
xpc_rsvd_page_init(void)
{
struct xpc_rsvd_page *rp;
- AMO_t *amos_page;
- u64 rp_pa, nasid_array = 0;
- int i, ret;
+ int n_amos;
+ u64 *amos_page;
+ u64 rp_pa;
+ int i;
+ u64 nasid_array = 0;
+ int activate_irq_amos;
+ int engaged_partitions_amos;
+ int disengage_request_amos;
+ int ret;
/* get the local reserved page's address */
preempt_disable();
- rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id()));
+ rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id()));
preempt_enable();
if (rp_pa == 0) {
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
@@ -188,21 +178,14 @@ xpc_rsvd_page_init(void)
}
rp = (struct xpc_rsvd_page *) __va(rp_pa);
- if (rp->partid != sn_partition_id) {
- dev_err(xpc_part, "the reserved page's partid of %d should be "
- "%d\n", rp->partid, sn_partition_id);
- return NULL;
- }
-
rp->version = XPC_RP_VERSION;
/* establish the actual sizes of the nasid masks */
if (rp->SAL_version == 1) {
- /* SAL_version 1 didn't set the nasids_size field */
- rp->nasids_size = 128;
+ /* SAL_version 1 didn't set the SAL_nasids_size field */
+ rp->SAL_nasids_size = 128;
}
- xp_nasid_mask_bytes = rp->nasids_size;
- xp_nasid_mask_words = xp_nasid_mask_bytes / 8;
+ xp_sizeof_nasid_mask = rp->SAL_nasids_size;
/* setup the pointers to the various items in the reserved page */
xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
@@ -211,78 +194,70 @@ xpc_rsvd_page_init(void)
xpc_vars_part = XPC_RP_VARS_PART(rp);
/*
- * Before clearing xpc_vars, see if a page of AMOs had been previously
- * allocated. If not we'll need to allocate one and set permissions
- * so that cross-partition AMOs are allowed.
+ * Before clearing xpc_vars, see if a page (or pages) of AMOs had been
+ * previously allocated. If not we'll need to allocate one (or more)
+ * and set permissions so that cross-partition AMOs are allowed.
*
- * The allocated AMO page needs MCA reporting to remain disabled after
+ * The allocated AMO page(s) need MCA reporting to remain disabled after
* XPC has unloaded. To make this work, we keep a copy of the pointer
- * to this page (i.e., amos_page) in the struct xpc_vars structure,
- * which is pointed to by the reserved page, and re-use that saved copy
- * on subsequent loads of XPC. This AMO page is never freed, and its
- * memory protections are never restricted.
+ * to this page (or pages) in the struct xpc_vars structure (i.e.,
+ * amos_page), which is pointed to by the reserved page, and re-use
+ * that saved copy on subsequent loads of XPC. This AMO page is never
+ * freed, and its memory protections are never restricted.
*/
if ((amos_page = xpc_vars->amos_page) == NULL) {
- amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
+ n_amos = xpc_number_of_amos(XP_NPARTITIONS);
+ amos_page = xp_alloc_amos(n_amos);
if (amos_page == NULL) {
dev_err(xpc_part, "can't allocate page of AMOs\n");
return NULL;
}
/*
- * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
- * when xpc_allow_IPI_ops() is called via xpc_hb_init().
- */
- if (!enable_shub_wars_1_1()) {
- ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
- PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
- &nasid_array);
- if (ret != 0) {
- dev_err(xpc_part, "can't change memory "
- "protections\n");
- uncached_free_page(__IA64_UNCACHED_OFFSET |
- TO_PHYS((u64) amos_page));
- return NULL;
- }
- }
- } else if (!IS_AMO_ADDRESS((u64) amos_page)) {
- /*
- * EFI's XPBOOT can also set amos_page in the reserved page,
- * but it happens to leave it as an uncached physical address
- * and we need it to be an uncached virtual, so we'll have to
- * convert it.
+ * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
+ * when xp_allow_IPI_ops() is called via xpc_init().
*/
- if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
- dev_err(xpc_part, "previously used amos_page address "
- "is bad = 0x%p\n", (void *) amos_page);
+ ret = xp_change_memprotect(xp_pa((u64)amos_page),
+ n_amos * xp_sizeof_amo,
+ XP_MEMPROT_ALLOW_CPU_AMO,
+ &nasid_array);
+ if (ret != xpSuccess) {
+ dev_err(xpc_part, "can't change memory protections\n");
+ xp_free_amos(amos_page, n_amos);
return NULL;
}
- amos_page = (AMO_t *) TO_AMO((u64) amos_page);
}
/* clear xpc_vars */
memset(xpc_vars, 0, sizeof(struct xpc_vars));
xpc_vars->version = XPC_V_VERSION;
- xpc_vars->act_nasid = cpuid_to_nasid(0);
+ xpc_vars->partid = xp_partition_id;
+ xpc_vars->npartitions = XP_NPARTITIONS;
+ xpc_vars->act_nasid = xp_cpu_to_nasid(0);
xpc_vars->act_phys_cpuid = cpu_physical_id(0);
xpc_vars->vars_part_pa = __pa(xpc_vars_part);
- xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page);
+ xpc_vars->amos_page_pa = xp_pa((u64)amos_page);
xpc_vars->amos_page = amos_page; /* save for next load of XPC */
/* clear xpc_vars_part */
- memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
- XP_MAX_PARTITIONS);
+ memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
+ XP_NPARTITIONS);
/* initialize the activate IRQ related AMO variables */
- for (i = 0; i < xp_nasid_mask_words; i++) {
- (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
+ activate_irq_amos = xpc_activate_irq_amos(XP_NPARTITIONS);
+ for (i = 0; i < xp_nasid_mask_words(); i++) {
+ (void)xpc_IPI_init(activate_irq_amos + i);
}
/* initialize the engaged remote partitions related AMO variables */
- (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
- (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
+ engaged_partitions_amos = xpc_engaged_partitions_amos(XP_NPARTITIONS);
+ disengage_request_amos = xpc_disengage_request_amos(XP_NPARTITIONS);
+ for (i = 0; i < xp_partid_mask_words(XP_NPARTITIONS); i++) {
+ (void)xpc_IPI_init(engaged_partitions_amos + i);
+ (void)xpc_IPI_init(disengage_request_amos + i);
+ }
/* timestamp of when reserved page was setup by XPC */
rp->stamp = CURRENT_TIME;
@@ -298,118 +273,6 @@ xpc_rsvd_page_init(void)
/*
- * Change protections to allow IPI operations (and AMO operations on
- * Shub 1.1 systems).
- */
-void
-xpc_allow_IPI_ops(void)
-{
- int node;
- int nasid;
-
-
- // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
-
- if (is_shub2()) {
- xpc_sh2_IPI_access0 =
- (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
- xpc_sh2_IPI_access1 =
- (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
- xpc_sh2_IPI_access2 =
- (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
- xpc_sh2_IPI_access3 =
- (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
- -1UL);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
- -1UL);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
- -1UL);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
- -1UL);
- }
-
- } else {
- xpc_sh1_IPI_access =
- (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
- -1UL);
-
- /*
- * Since the BIST collides with memory operations on
- * SHUB 1.1 sn_change_memprotect() cannot be used.
- */
- if (enable_shub_wars_1_1()) {
- /* open up everything */
- xpc_prot_vec[node] = (u64) HUB_L((u64 *)
- GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0));
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0),
- -1UL);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQRP_MMR_DIR_PRIVEC0),
- -1UL);
- }
- }
- }
-}
-
-
-/*
- * Restrict protections to disallow IPI operations (and AMO operations on
- * Shub 1.1 systems).
- */
-void
-xpc_restrict_IPI_ops(void)
-{
- int node;
- int nasid;
-
-
- // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
-
- if (is_shub2()) {
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
- xpc_sh2_IPI_access0);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
- xpc_sh2_IPI_access1);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
- xpc_sh2_IPI_access2);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
- xpc_sh2_IPI_access3);
- }
-
- } else {
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
- xpc_sh1_IPI_access);
-
- if (enable_shub_wars_1_1()) {
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0),
- xpc_prot_vec[node]);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQRP_MMR_DIR_PRIVEC0),
- xpc_prot_vec[node]);
- }
- }
- }
-}
-
-
-/*
* At periodic intervals, scan through all active partitions and ensure
* their heartbeat is still active. If not, the partition is deactivated.
*/
@@ -418,51 +281,49 @@ xpc_check_remote_hb(void)
{
struct xpc_vars *remote_vars;
struct xpc_partition *part;
- partid_t partid;
- bte_result_t bres;
+ short partid;
+ enum xp_retval ret;
remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+ for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
if (xpc_exiting) {
break;
}
- if (partid == sn_partition_id) {
+ if (partid == xp_partition_id) {
continue;
}
part = &xpc_partitions[partid];
- if (part->act_state == XPC_P_INACTIVE ||
- part->act_state == XPC_P_DEACTIVATING) {
+ if (part->act_state == XPC_P_AS_INACTIVE ||
+ part->act_state == XPC_P_AS_DEACTIVATING) {
continue;
}
/* pull the remote_hb cache line */
- bres = xp_bte_copy(part->remote_vars_pa,
- (u64) remote_vars,
- XPC_RP_VARS_SIZE,
- (BTE_NOTIFY | BTE_WACQUIRE), NULL);
- if (bres != BTE_SUCCESS) {
- XPC_DEACTIVATE_PARTITION(part,
- xpc_map_bte_errors(bres));
+ ret = xp_remote_memcpy(remote_vars,
+ (void *)part->remote_vars_pa,
+ XPC_RP_VARS_SIZE);
+ if (ret != xpSuccess) {
+ XPC_DEACTIVATE_PARTITION(part, ret);
continue;
}
- dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
- " = %ld, heartbeat_offline = %ld, HB_mask = 0x%lx\n",
- partid, remote_vars->heartbeat, part->last_heartbeat,
- remote_vars->heartbeat_offline,
- remote_vars->heartbeating_to_mask);
+ dev_dbg(xpc_part, "partid = %d, heartbeat = %" U64_ELL "d, "
+ "last_heartbeat = %" U64_ELL "d, heartbeat_offline = %"
+ U64_ELL "d\n", partid,
+ remote_vars->heartbeat, part->last_heartbeat,
+ remote_vars->heartbeat_offline);
if (((remote_vars->heartbeat == part->last_heartbeat) &&
(remote_vars->heartbeat_offline == 0)) ||
- !xpc_hb_allowed(sn_partition_id, remote_vars)) {
+ !xpc_hb_allowed(xp_partition_id, remote_vars)) {
- XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
+ XPC_DEACTIVATE_PARTITION(part, xpNoHeartbeat);
continue;
}
@@ -478,27 +339,27 @@ xpc_check_remote_hb(void)
* is large enough to contain a copy of their reserved page header and
* part_nasids mask.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
{
- int bres, i;
+ int i;
+ enum xp_retval ret;
/* get the reserved page's physical address */
*remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
if (*remote_rp_pa == 0) {
- return xpcNoRsvdPageAddr;
+ return xpNoRsvdPageAddr;
}
/* pull over the reserved page header and part_nasids mask */
- bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp,
- XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
- (BTE_NOTIFY | BTE_WACQUIRE), NULL);
- if (bres != BTE_SUCCESS) {
- return xpc_map_bte_errors(bres);
+ ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa,
+ XPC_RP_HEADER_SIZE + xp_sizeof_nasid_mask);
+ if (ret != xpSuccess) {
+ return ret;
}
@@ -506,30 +367,17 @@ xpc_get_remote_rp(int nasid, u64 *discov
u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
- for (i = 0; i < xp_nasid_mask_words; i++) {
+ for (i = 0; i < xp_nasid_mask_words(); i++) {
discovered_nasids[i] |= remote_part_nasids[i];
}
}
-
- /* check that the partid is for another partition */
-
- if (remote_rp->partid < 1 ||
- remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
- return xpcInvalidPartid;
- }
-
- if (remote_rp->partid == sn_partition_id) {
- return xpcLocalPartid;
- }
-
-
if (XPC_VERSION_MAJOR(remote_rp->version) !=
XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
- return xpcBadVersion;
+ return xpBadVersion;
}
- return xpcSuccess;
+ return xpSuccess;
}
@@ -539,29 +387,35 @@ xpc_get_remote_rp(int nasid, u64 *discov
* remote_vars points to a buffer that is cacheline aligned for BTE copies and
* assumed to be of size XPC_RP_VARS_SIZE.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
{
- int bres;
-
+ enum xp_retval ret;
if (remote_vars_pa == 0) {
- return xpcVarsNotSet;
+ return xpVarsNotSet;
}
/* pull over the cross partition variables */
- bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE,
- (BTE_NOTIFY | BTE_WACQUIRE), NULL);
- if (bres != BTE_SUCCESS) {
- return xpc_map_bte_errors(bres);
+ ret = xp_remote_memcpy(remote_vars, (void *)remote_vars_pa,
+ XPC_RP_VARS_SIZE);
+ if (ret != xpSuccess) {
+ return ret;
}
if (XPC_VERSION_MAJOR(remote_vars->version) !=
XPC_VERSION_MAJOR(XPC_V_VERSION)) {
- return xpcBadVersion;
+ return xpBadVersion;
}
- return xpcSuccess;
+ /* check that the partid is for another partition */
+ if (remote_vars->partid < XP_MIN_PARTID ||
+ remote_vars->partid > XP_MAX_PARTID)
+ return xpInvalidPartid;
+ if (remote_vars->partid == xp_partition_id)
+ return xpLocalPartid;
+
+ return xpSuccess;
}
@@ -582,18 +436,23 @@ xpc_update_partition_info(struct xpc_par
part->remote_rp_stamp.tv_sec, part->remote_rp_stamp.tv_nsec);
part->remote_rp_pa = remote_rp_pa;
- dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
+ dev_dbg(xpc_part, " remote_rp_pa = 0x%016" U64_ELL "x\n",
+ part->remote_rp_pa);
+
+ part->remote_npartitions = remote_vars->npartitions;
+ dev_dbg(xpc_part, " remote_npartitions = %d\n",
+ part->remote_npartitions);
part->remote_vars_pa = remote_vars_pa;
- dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
+ dev_dbg(xpc_part, " remote_vars_pa = 0x%016" U64_ELL "x\n",
part->remote_vars_pa);
part->last_heartbeat = remote_vars->heartbeat;
- dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
+ dev_dbg(xpc_part, " last_heartbeat = 0x%016" U64_ELL "x\n",
part->last_heartbeat);
part->remote_vars_part_pa = remote_vars->vars_part_pa;
- dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
+ dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016" U64_ELL "x\n",
part->remote_vars_part_pa);
part->remote_act_nasid = remote_vars->act_nasid;
@@ -605,7 +464,7 @@ xpc_update_partition_info(struct xpc_par
part->remote_act_phys_cpuid);
part->remote_amos_page_pa = remote_vars->amos_page_pa;
- dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
+ dev_dbg(xpc_part, " remote_amos_page_pa = 0x%" U64_ELL "x\n",
part->remote_amos_page_pa);
part->remote_vars_version = remote_vars->version;
@@ -639,9 +498,9 @@ xpc_identify_act_IRQ_req(int nasid)
int reactivate = 0;
int stamp_diff;
struct timespec remote_rp_stamp = { 0, 0 };
- partid_t partid;
+ short partid;
struct xpc_partition *part;
- enum xpc_retval ret;
+ enum xp_retval ret;
/* pull over the reserved page structure */
@@ -649,7 +508,7 @@ xpc_identify_act_IRQ_req(int nasid)
remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
"which sent interrupt, reason=%d\n", nasid, ret);
return;
@@ -660,40 +519,36 @@ xpc_identify_act_IRQ_req(int nasid)
if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
remote_rp_stamp = remote_rp->stamp;
}
- partid = remote_rp->partid;
- part = &xpc_partitions[partid];
-
/* pull over the cross partition variables */
remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
- if (ret != xpcSuccess) {
-
+ if (ret != xpSuccess) {
dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
"which sent interrupt, reason=%d\n", nasid, ret);
-
- XPC_DEACTIVATE_PARTITION(part, ret);
return;
}
+ partid = remote_vars->partid;
+ part = &xpc_partitions[partid];
part->act_IRQ_rcvd++;
dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
- "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
- remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
+ "%" U64_ELL "d\n", (int) nasid, (int) partid,
+ part->act_IRQ_rcvd, remote_vars->heartbeat);
if (xpc_partition_disengaged(part) &&
- part->act_state == XPC_P_INACTIVE) {
+ part->act_state == XPC_P_AS_INACTIVE) {
xpc_update_partition_info(part, remote_rp_version,
&remote_rp_stamp, remote_rp_pa,
remote_vars_pa, remote_vars);
if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
- if (xpc_partition_disengage_requested(1UL << partid)) {
+ if (xpc_partition_disengage_requested(partid)) {
/*
* Other side is waiting on us to disengage,
* even though we already have.
@@ -702,7 +557,7 @@ xpc_identify_act_IRQ_req(int nasid)
}
} else {
/* other side doesn't support disengage requests */
- xpc_clear_partition_disengage_request(1UL << partid);
+ xpc_clear_partition_disengage_request(partid);
}
xpc_activate_partition(part);
@@ -722,7 +577,7 @@ xpc_identify_act_IRQ_req(int nasid)
/* see if the other side rebooted */
if (part->remote_amos_page_pa ==
remote_vars->amos_page_pa &&
- xpc_hb_allowed(sn_partition_id,
+ xpc_hb_allowed(xp_partition_id,
remote_vars)) {
/* doesn't look that way, so ignore the IPI */
return;
@@ -738,7 +593,7 @@ xpc_identify_act_IRQ_req(int nasid)
&remote_rp_stamp, remote_rp_pa,
remote_vars_pa, remote_vars);
part->reactivate_nasid = nasid;
- XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
+ XPC_DEACTIVATE_PARTITION(part, xpReactivating);
return;
}
@@ -752,8 +607,8 @@ xpc_identify_act_IRQ_req(int nasid)
* disengage request, but the new one doesn't.
*/
- xpc_clear_partition_engaged(1UL << partid);
- xpc_clear_partition_disengage_request(1UL << partid);
+ xpc_clear_partition_engaged(partid);
+ xpc_clear_partition_disengage_request(partid);
xpc_update_partition_info(part, remote_rp_version,
&remote_rp_stamp, remote_rp_pa,
@@ -773,9 +628,8 @@ xpc_identify_act_IRQ_req(int nasid)
* the disengage request, as does the new one.
*/
- DBUG_ON(xpc_partition_engaged(1UL << partid));
- DBUG_ON(xpc_partition_disengage_requested(1UL <<
- partid));
+ DBUG_ON(xpc_partition_engaged(partid));
+ DBUG_ON(xpc_partition_disengage_requested(partid));
xpc_update_partition_info(part, remote_rp_version,
&remote_rp_stamp, remote_rp_pa,
@@ -792,11 +646,11 @@ xpc_identify_act_IRQ_req(int nasid)
if (reactivate) {
part->reactivate_nasid = nasid;
- XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
+ XPC_DEACTIVATE_PARTITION(part, xpReactivating);
} else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
- xpc_partition_disengage_requested(1UL << partid)) {
- XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
+ xpc_partition_disengage_requested(partid)) {
+ XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
}
}
@@ -811,50 +665,54 @@ xpc_identify_act_IRQ_req(int nasid)
int
xpc_identify_act_IRQ_sender(void)
{
- int word, bit;
+ enum xp_retval ret;
+ int w_index, b_index;
+ u64 *amo_va;
u64 nasid_mask;
u64 nasid; /* remote nasid */
int n_IRQs_detected = 0;
- AMO_t *act_amos;
-
- act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
+ amo_va = (u64 *)((u64)xpc_vars->amos_page +
+ xpc_activate_irq_amos(xpc_vars->npartitions) *
+ xp_sizeof_amo);
- /* scan through act AMO variable looking for non-zero entries */
- for (word = 0; word < xp_nasid_mask_words; word++) {
+ /* scan through activation AMO variables looking for non-zero entries */
+ for (w_index = 0; w_index < xp_nasid_mask_words(); w_index++) {
if (xpc_exiting) {
break;
}
- nasid_mask = xpc_IPI_receive(&act_amos[word]);
+ ret = xp_get_amo(amo_va, XP_AMO_CLEAR, &nasid_mask);
+ BUG_ON(ret != xpSuccess); /* should never happen */
+ amo_va = (u64 *)((u64)amo_va + xp_sizeof_amo); /* next amo */
if (nasid_mask == 0) {
/* no IRQs from nasids in this variable */
continue;
}
- dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
- nasid_mask);
+ dev_dbg(xpc_part, "AMO[%d] gave back 0x%" U64_ELL "x\n",
+ w_index, nasid_mask);
/*
- * If this nasid has been added to the machine since
- * our partition was reset, this will retain the
- * remote nasid in our reserved pages machine mask.
+ * If any nasid(s) in mask have been added to the machine
+ * since our partition was reset, this will retain the
+ * remote nasid(s) in our reserved pages machine mask.
* This is used in the event of module reload.
*/
- xpc_mach_nasids[word] |= nasid_mask;
+ xpc_mach_nasids[w_index] |= nasid_mask;
/* locate the nasid(s) which sent interrupts */
- for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
- if (nasid_mask & (1UL << bit)) {
+ for (b_index = 0; b_index < BITS_PER_LONG; b_index++) {
+ if (nasid_mask & (1UL << b_index)) {
n_IRQs_detected++;
- nasid = XPC_NASID_FROM_W_B(word, bit);
- dev_dbg(xpc_part, "interrupt from nasid %ld\n",
- nasid);
+ nasid = (w_index * BITS_PER_LONG + b_index) * 2;
+ dev_dbg(xpc_part, "interrupt from nasid %"
+ U64_ELL "d\n", nasid);
xpc_identify_act_IRQ_req(nasid);
}
}
@@ -870,11 +728,11 @@ xpc_identify_act_IRQ_sender(void)
int
xpc_partition_disengaged(struct xpc_partition *part)
{
- partid_t partid = XPC_PARTID(part);
+ short partid = XPC_PARTID(part);
int disengaged;
- disengaged = (xpc_partition_engaged(1UL << partid) == 0);
+ disengaged = (xpc_partition_engaged(partid) == 0);
if (part->disengage_request_timeout) {
if (!disengaged) {
if (jiffies < part->disengage_request_timeout) {
@@ -890,7 +748,7 @@ xpc_partition_disengaged(struct xpc_part
dev_info(xpc_part, "disengage from remote partition %d "
"timed out\n", partid);
xpc_disengage_request_timedout = 1;
- xpc_clear_partition_engaged(1UL << partid);
+ xpc_clear_partition_engaged(partid);
disengaged = 1;
}
part->disengage_request_timeout = 0;
@@ -901,9 +759,9 @@ xpc_partition_disengaged(struct xpc_part
disengage_request_timer);
}
- DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
- part->act_state != XPC_P_INACTIVE);
- if (part->act_state != XPC_P_INACTIVE) {
+ DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
+ part->act_state != XPC_P_AS_INACTIVE);
+ if (part->act_state != XPC_P_AS_INACTIVE) {
xpc_wakeup_channel_mgr(part);
}
@@ -918,24 +776,24 @@ xpc_partition_disengaged(struct xpc_part
/*
* Mark specified partition as active.
*/
-enum xpc_retval
+enum xp_retval
xpc_mark_partition_active(struct xpc_partition *part)
{
unsigned long irq_flags;
- enum xpc_retval ret;
+ enum xp_retval ret;
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
- spin_lock_irqsave(&part->act_lock, irq_flags);
- if (part->act_state == XPC_P_ACTIVATING) {
- part->act_state = XPC_P_ACTIVE;
- ret = xpcSuccess;
+ spin_lock_irqsave(&part->lock, irq_flags);
+ if (part->act_state == XPC_P_AS_ACTIVATING) {
+ part->act_state = XPC_P_AS_ACTIVE;
+ ret = xpSuccess;
} else {
- DBUG_ON(part->reason == xpcSuccess);
+ DBUG_ON(part->reason == xpSuccess);
ret = part->reason;
}
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ spin_unlock_irqrestore(&part->lock, irq_flags);
return ret;
}
@@ -946,35 +804,35 @@ xpc_mark_partition_active(struct xpc_par
*/
void
xpc_deactivate_partition(const int line, struct xpc_partition *part,
- enum xpc_retval reason)
+ enum xp_retval reason)
{
unsigned long irq_flags;
- spin_lock_irqsave(&part->act_lock, irq_flags);
+ spin_lock_irqsave(&part->lock, irq_flags);
- if (part->act_state == XPC_P_INACTIVE) {
+ if (part->act_state == XPC_P_AS_INACTIVE) {
XPC_SET_REASON(part, reason, line);
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
- if (reason == xpcReactivating) {
+ spin_unlock_irqrestore(&part->lock, irq_flags);
+ if (reason == xpReactivating) {
/* we interrupt ourselves to reactivate partition */
xpc_IPI_send_reactivate(part);
}
return;
}
- if (part->act_state == XPC_P_DEACTIVATING) {
- if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
- reason == xpcReactivating) {
+ if (part->act_state == XPC_P_AS_DEACTIVATING) {
+ if ((part->reason == xpUnloading && reason != xpUnloading) ||
+ reason == xpReactivating) {
XPC_SET_REASON(part, reason, line);
}
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ spin_unlock_irqrestore(&part->lock, irq_flags);
return;
}
- part->act_state = XPC_P_DEACTIVATING;
+ part->act_state = XPC_P_AS_DEACTIVATING;
XPC_SET_REASON(part, reason, line);
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ spin_unlock_irqrestore(&part->lock, irq_flags);
if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
xpc_request_partition_disengage(part);
@@ -1007,14 +865,60 @@ xpc_mark_partition_inactive(struct xpc_p
dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
XPC_PARTID(part));
- spin_lock_irqsave(&part->act_lock, irq_flags);
- part->act_state = XPC_P_INACTIVE;
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ spin_lock_irqsave(&part->lock, irq_flags);
+ part->act_state = XPC_P_AS_INACTIVE;
+ spin_unlock_irqrestore(&part->lock, irq_flags);
part->remote_rp_pa = 0;
}
/*
+ * Register the remote partition's AMOs so any errors within that address
+ * range can be handled and cleaned up should the remote partition go down.
+ */
+enum xp_retval
+xpc_register_remote_amos(struct xpc_partition *part)
+{
+ unsigned long irq_flags;
+ size_t len;
+ enum xp_retval ret;
+
+ if (part->flags & XPC_P_RAMOSREGISTERED)
+ return xpSuccess;
+
+ len = xpc_number_of_amos(part->remote_npartitions) * xp_sizeof_amo;
+ ret = xp_register_remote_amos(part->remote_amos_page_pa, len);
+ if (ret == xpSuccess) {
+ spin_lock_irqsave(&part->lock, irq_flags);
+ part->flags |= XPC_P_RAMOSREGISTERED;
+ spin_unlock_irqrestore(&part->lock, irq_flags);
+ }
+ return ret;
+}
+
+void
+xpc_unregister_remote_amos(struct xpc_partition *part)
+{
+ unsigned long irq_flags;
+ size_t len;
+ enum xp_retval ret;
+
+ if (!(part->flags & XPC_P_RAMOSREGISTERED))
+ return;
+
+ len = xpc_number_of_amos(part->remote_npartitions) * xp_sizeof_amo;
+ ret = xp_unregister_remote_amos(part->remote_amos_page_pa, len);
+ if (ret != xpSuccess)
+ dev_warn(xpc_part, "failed to unregister remote AMOs for "
+ "partition %d, ret=%d\n", XPC_PARTID(part), ret);
+
+ spin_lock_irqsave(&part->lock, irq_flags);
+ part->flags &= ~XPC_P_RAMOSREGISTERED;
+ spin_unlock_irqrestore(&part->lock, irq_flags);
+}
+
+
+/*
* SAL has provided a partition and machine mask. The partition mask
* contains a bit for each even nasid in our partition. The machine
* mask contains a bit for each even nasid in the entire machine.
@@ -1036,23 +940,23 @@ xpc_discovery(void)
int max_regions;
int nasid;
struct xpc_rsvd_page *rp;
- partid_t partid;
+ short partid;
struct xpc_partition *part;
u64 *discovered_nasids;
- enum xpc_retval ret;
+ enum xp_retval ret;
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
- xp_nasid_mask_bytes,
- GFP_KERNEL, &remote_rp_base);
+ xp_sizeof_nasid_mask,
+ GFP_KERNEL, &remote_rp_base);
if (remote_rp == NULL) {
return;
}
remote_vars = (struct xpc_vars *) remote_rp;
- discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
- GFP_KERNEL);
+ discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words(),
+ GFP_KERNEL);
if (discovered_nasids == NULL) {
kfree(remote_rp_base);
return;
@@ -1066,7 +970,7 @@ xpc_discovery(void)
* protection is in regards to memory, IOI and IPI.
*/
max_regions = 64;
- region_size = sn_region_size;
+ region_size = xp_region_size;
switch (region_size) {
case 128:
@@ -1124,61 +1028,50 @@ xpc_discovery(void)
ret = xpc_get_remote_rp(nasid, discovered_nasids,
remote_rp, &remote_rp_pa);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_dbg(xpc_part, "unable to get reserved page "
"from nasid %d, reason=%d\n", nasid,
ret);
-
- if (ret == xpcLocalPartid) {
- break;
- }
continue;
}
remote_vars_pa = remote_rp->vars_pa;
- partid = remote_rp->partid;
- part = &xpc_partitions[partid];
-
-
/* pull over the cross partition variables */
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_dbg(xpc_part, "unable to get XPC variables "
"from nasid %d, reason=%d\n", nasid,
ret);
-
- XPC_DEACTIVATE_PARTITION(part, ret);
+ if (ret == xpLocalPartid)
+ break;
continue;
}
- if (part->act_state != XPC_P_INACTIVE) {
+ partid = remote_vars->partid;
+ part = &xpc_partitions[partid];
+
+ if (part->act_state != XPC_P_AS_INACTIVE) {
dev_dbg(xpc_part, "partition %d on nasid %d is "
"already activating\n", partid, nasid);
break;
}
/*
- * Register the remote partition's AMOs with SAL so it
- * can handle and cleanup errors within that address
- * range should the remote partition go down. We don't
- * unregister this range because it is difficult to
- * tell when outstanding writes to the remote partition
- * are finished and thus when it is thus safe to
- * unregister. This should not result in wasted space
- * in the SAL xp_addr_region table because we should
- * get the same page for remote_act_amos_pa after
- * module reloads and system reboots.
+ * Register the remote partition's AMOs so any errors
+ * within that address range can be handled and
+ * cleaned up should the remote partition go down.
*/
- if (sn_register_xp_addr_region(
- remote_vars->amos_page_pa,
- PAGE_SIZE, 1) < 0) {
- dev_dbg(xpc_part, "partition %d failed to "
- "register xp_addr region 0x%016lx\n",
- partid, remote_vars->amos_page_pa);
+ part->remote_npartitions = remote_vars->npartitions;
+ part->remote_amos_page_pa = remote_vars->amos_page_pa;
+ ret = xpc_register_remote_amos(part);
+ if (ret != xpSuccess) {
+ dev_warn(xpc_part, "xpc_discovery() failed to "
+ "register remote AMOs for partition %d,"
+ "ret=%d\n", partid, ret);
- XPC_SET_REASON(part, xpcPhysAddrRegFailed,
+ XPC_SET_REASON(part, xpPhysAddrRegFailed,
__LINE__);
break;
}
@@ -1188,8 +1081,8 @@ xpc_discovery(void)
* Send an interrupt to that nasid to notify
* it that we are ready to begin activation.
*/
- dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
- "nasid %d, phys_cpuid 0x%x\n",
+ dev_dbg(xpc_part, "sending an interrupt to AMO 0x%"
+ U64_ELL "x, nasid %d, phys_cpuid 0x%x\n",
remote_vars->amos_page_pa,
remote_vars->act_nasid,
remote_vars->act_phys_cpuid);
@@ -1214,26 +1107,23 @@ xpc_discovery(void)
* Given a partid, get the nasids owned by that partition from the
* remote partition's reserved page.
*/
-enum xpc_retval
-xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
+enum xp_retval
+xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
{
struct xpc_partition *part;
u64 part_nasid_pa;
- int bte_res;
part = &xpc_partitions[partid];
if (part->remote_rp_pa == 0) {
- return xpcPartitionDown;
+ return xpPartitionDown;
}
- memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
+ memset(nasid_mask, 0, xp_sizeof_nasid_mask);
part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
- bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask,
- xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
-
- return xpc_map_bte_errors(bte_res);
+ return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa,
+ xp_sizeof_nasid_mask);
}
Index: linux-2.6/drivers/misc/xp/xpnet.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpnet.c 2008-03-25 13:47:59.962929996 -0500
+++ linux-2.6/drivers/misc/xp/xpnet.c 2008-03-25 13:51:08.930461203 -0500
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1999,2001-2008 Silicon Graphics, Inc. All rights reserved.
*/
@@ -33,12 +33,9 @@
#include <linux/mii.h>
#include <linux/smp.h>
#include <linux/string.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_sal.h>
#include <asm/types.h>
#include <asm/atomic.h>
-#include <asm/sn/xp.h>
+#include "xp.h"
/*
@@ -110,7 +107,6 @@ struct xpnet_message {
* then be released.
*/
struct xpnet_pending_msg {
- struct list_head free_list;
struct sk_buff *skb;
atomic_t use_count;
};
@@ -126,7 +122,7 @@ struct net_device *xpnet_device;
* When we are notified of other partitions activating, we add them to
* our bitmask of partitions to which we broadcast.
*/
-static u64 xpnet_broadcast_partitions;
+static u64 xpnet_broadcast_partitions[BITS_TO_LONGS(XP_NPARTITIONS)];
/* protect above */
static DEFINE_SPINLOCK(xpnet_broadcast_lock);
@@ -147,17 +143,14 @@ static DEFINE_SPINLOCK(xpnet_broadcast_l
/*
- * The partition id is encapsulated in the MAC address. The following
- * define locates the octet the partid is in.
+ * The partid is encapsulated in the MAC address beginning in the following
+ * octet.
*/
-#define XPNET_PARTID_OCTET 1
-#define XPNET_LICENSE_OCTET 2
+#define XPNET_PARTID_OCTET 2 /* consists of 2 octets total */
-/*
- * Define the XPNET debug device structure that is to be used with dev_dbg(),
- * dev_err(), dev_warn(), and dev_info().
- */
+/* Define the XPNET debug device structures to be used with dev_dbg() et al */
+
struct device_driver xpnet_dbg_name = {
.name = "xpnet"
};
@@ -173,10 +166,10 @@ struct device *xpnet = &xpnet_dbg_subnam
* Packet was recevied by XPC and forwarded to us.
*/
static void
-xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
+xpnet_receive(short partid, int channel, struct xpnet_message *msg)
{
struct sk_buff *skb;
- bte_result_t bret;
+ enum xp_retval ret;
struct xpnet_dev_private *priv =
(struct xpnet_dev_private *) xpnet_device->priv;
@@ -191,8 +184,8 @@ xpnet_receive(partid_t partid, int chann
return;
}
- dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
- msg->leadin_ignore, msg->tailout_ignore);
+ dev_dbg(xpnet, "received 0x%" U64_ELL "x, %d, %d, %d\n", msg->buf_pa,
+ msg->size, msg->leadin_ignore, msg->tailout_ignore);
/* reserve an extra cache line */
@@ -239,19 +232,21 @@ xpnet_receive(partid_t partid, int chann
(void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
msg->size);
- bret = bte_copy(msg->buf_pa,
- __pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
- msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
-
- if (bret != BTE_SUCCESS) {
- // >>> Need better way of cleaning skb. Currently skb
- // >>> appears in_use and we can't just call
- // >>> dev_kfree_skb.
- dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
- "error=0x%x\n", (void *)msg->buf_pa,
+ ret = xp_remote_memcpy((void *)((u64)skb->data &
+ ~(L1_CACHE_BYTES - 1)),
+ (void *)msg->buf_pa, msg->size);
+
+ if (ret != xpSuccess) {
+ /*
+ * >>> Need better way of cleaning skb. Currently skb
+ * >>> appears in_use and we can't just call
+ * >>> dev_kfree_skb.
+ */
+ dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) "
+ "returned error=0x%x\n",
(void *)__pa((u64)skb->data &
- ~(L1_CACHE_BYTES - 1)),
- msg->size, bret);
+ ~(L1_CACHE_BYTES - 1)),
+ (void *)msg->buf_pa, msg->size, ret);
xpc_received(partid, channel, (void *) msg);
@@ -290,50 +285,43 @@ xpnet_receive(partid_t partid, int chann
* state or message reception on a connection.
*/
static void
-xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
+xpnet_connection_activity(enum xp_retval reason, short partid, int channel,
void *data, void *key)
{
- long bp;
-
-
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+ DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
DBUG_ON(channel != XPC_NET_CHANNEL);
switch(reason) {
- case xpcMsgReceived: /* message received */
+ case xpMsgReceived: /* message received */
DBUG_ON(data == NULL);
xpnet_receive(partid, channel, (struct xpnet_message *) data);
break;
- case xpcConnected: /* connection completed to a partition */
+ case xpConnected: /* connection completed to a partition */
spin_lock_bh(&xpnet_broadcast_lock);
- xpnet_broadcast_partitions |= 1UL << (partid -1 );
- bp = xpnet_broadcast_partitions;
+ __set_bit(partid, xpnet_broadcast_partitions);
spin_unlock_bh(&xpnet_broadcast_lock);
netif_carrier_on(xpnet_device);
- dev_dbg(xpnet, "%s connection created to partition %d; "
- "xpnet_broadcast_partitions=0x%lx\n",
- xpnet_device->name, partid, bp);
+ dev_dbg(xpnet, "%s connected to partition %d\n",
+ xpnet_device->name, partid);
break;
default:
spin_lock_bh(&xpnet_broadcast_lock);
- xpnet_broadcast_partitions &= ~(1UL << (partid -1 ));
- bp = xpnet_broadcast_partitions;
+ __clear_bit(partid, xpnet_broadcast_partitions);
spin_unlock_bh(&xpnet_broadcast_lock);
- if (bp == 0) {
+ if (bitmap_empty((unsigned long *)xpnet_broadcast_partitions,
+ XP_NPARTITIONS)) {
netif_carrier_off(xpnet_device);
}
- dev_dbg(xpnet, "%s disconnected from partition %d; "
- "xpnet_broadcast_partitions=0x%lx\n",
- xpnet_device->name, partid, bp);
+ dev_dbg(xpnet, "%s disconnected from partition %d\n",
+ xpnet_device->name, partid);
break;
-
}
}
@@ -341,18 +329,18 @@ xpnet_connection_activity(enum xpc_retva
static int
xpnet_dev_open(struct net_device *dev)
{
- enum xpc_retval ret;
+ enum xp_retval ret;
- dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
- "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
- XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
- XPNET_MAX_IDLE_KTHREADS);
+ dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %" U64_ELL "d, %"
+ U64_ELL "d, %" U64_ELL "d, %" U64_ELL "d)\n", XPC_NET_CHANNEL,
+ xpnet_connection_activity, XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
+ XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
"ret=%d\n", dev->name, ret);
@@ -425,7 +413,7 @@ xpnet_dev_get_stats(struct net_device *d
* release the skb and then release our pending message structure.
*/
static void
-xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
+xpnet_send_completed(enum xp_retval reason, short partid, int channel,
void *__qm)
{
struct xpnet_pending_msg *queued_msg =
@@ -447,30 +435,67 @@ xpnet_send_completed(enum xpc_retval rea
}
+static void
+xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
+ u64 start_addr, u64 end_addr, u16 embedded_bytes, int dest_partid)
+{
+ struct xpnet_message *msg;
+ enum xp_retval ret;
+
+
+ ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT,
+ (void **)&msg);
+ if (unlikely(ret != xpSuccess))
+ return;
+
+ msg->embedded_bytes = embedded_bytes;
+ if (unlikely(embedded_bytes != 0)) {
+ msg->version = XPNET_VERSION_EMBED;
+ dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
+ &msg->data, skb->data, (size_t)embedded_bytes);
+ skb_copy_from_linear_data(skb, &msg->data,
+ (size_t)embedded_bytes);
+ } else {
+ msg->version = XPNET_VERSION;
+ }
+ msg->magic = XPNET_MAGIC;
+ msg->size = end_addr - start_addr;
+ msg->leadin_ignore = (u64)skb->data - start_addr;
+ msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
+ msg->buf_pa = __pa(start_addr);
+
+ dev_dbg(xpnet, "sending XPC message to %d:%d\n"
+ KERN_DEBUG "msg->buf_pa=0x%" U64_ELL "x, msg->size=%u, "
+ "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
+ dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
+ msg->leadin_ignore, msg->tailout_ignore);
+
+ atomic_inc(&queued_msg->use_count);
+
+ ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
+ xpnet_send_completed, queued_msg);
+ if (unlikely(ret != xpSuccess))
+ atomic_dec(&queued_msg->use_count);
+}
+
+
/*
* Network layer has formatted a packet (skb) and is ready to place it
* "on the wire". Prepare and send an xpnet_message to all partitions
* which have connected with us and are targets of this packet.
*
* MAC-NOTE: For the XPNET driver, the MAC address contains the
- * destination partition_id. If the destination partition id word
- * is 0xff, this packet is to broadcast to all partitions.
+ * destination partid. If the destination partid octets are 0xffff,
+ * this packet is to broadcast to all connected partitions.
*/
static int
xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xpnet_pending_msg *queued_msg;
- enum xpc_retval ret;
- struct xpnet_message *msg;
u64 start_addr, end_addr;
- long dp;
- u8 second_mac_octet;
- partid_t dest_partid;
- struct xpnet_dev_private *priv;
- u16 embedded_bytes;
-
-
- priv = (struct xpnet_dev_private *) dev->priv;
+ short dest_partid;
+ struct xpnet_dev_private *priv = (struct xpnet_dev_private *)dev->priv;
+ u16 embedded_bytes = 0;
dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
@@ -478,6 +503,11 @@ xpnet_dev_hard_start_xmit(struct sk_buff
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len);
+ /* >>> What does 0x33 represent? ifconfig makes it happen */
+ if (skb->data[0] == 0x33) {
+ dev_kfree_skb(skb);
+ return 0; /* nothing needed to be done */
+ }
/*
* The xpnet_pending_msg tracks how many outstanding
@@ -500,7 +530,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff
end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
/* calculate how many bytes to embed in the XPC message */
- embedded_bytes = 0;
if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
/* skb->data does fit so embed */
embedded_bytes = skb->len;
@@ -517,89 +546,27 @@ xpnet_dev_hard_start_xmit(struct sk_buff
atomic_set(&queued_msg->use_count, 1);
queued_msg->skb = skb;
-
- second_mac_octet = skb->data[XPNET_PARTID_OCTET];
- if (second_mac_octet == 0xff) {
+ if (skb->data[0] == 0xff) {
/* we are being asked to broadcast to all partitions */
- dp = xpnet_broadcast_partitions;
- } else if (second_mac_octet != 0) {
- dp = xpnet_broadcast_partitions &
- (1UL << (second_mac_octet - 1));
- } else {
- /* 0 is an invalid partid. Ignore */
- dp = 0;
- }
- dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
-
- /*
- * If we wanted to allow promiscuous mode to work like an
- * unswitched network, this would be a good point to OR in a
- * mask of partitions which should be receiving all packets.
- */
-
- /*
- * Main send loop.
- */
- for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
- dest_partid++) {
-
-
- if (!(dp & (1UL << (dest_partid - 1)))) {
- /* not destined for this partition */
- continue;
- }
-
- /* remove this partition from the destinations mask */
- dp &= ~(1UL << (dest_partid - 1));
-
-
- /* found a partition to send to */
-
- ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
- XPC_NOWAIT, (void **)&msg);
- if (unlikely(ret != xpcSuccess)) {
- continue;
+ for_each_bit(dest_partid,
+ (unsigned long *)xpnet_broadcast_partitions,
+ XP_NPARTITIONS) {
+ xpnet_send(skb, queued_msg, start_addr, end_addr,
+ embedded_bytes, dest_partid);
}
+ } else {
+ dest_partid = (short)skb->data[XPNET_PARTID_OCTET + 1];
+ dest_partid |= (short)skb->data[XPNET_PARTID_OCTET + 0] << 8;
- msg->embedded_bytes = embedded_bytes;
- if (unlikely(embedded_bytes != 0)) {
- msg->version = XPNET_VERSION_EMBED;
- dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
- &msg->data, skb->data, (size_t) embedded_bytes);
- skb_copy_from_linear_data(skb, &msg->data,
- (size_t)embedded_bytes);
- } else {
- msg->version = XPNET_VERSION;
- }
- msg->magic = XPNET_MAGIC;
- msg->size = end_addr - start_addr;
- msg->leadin_ignore = (u64) skb->data - start_addr;
- msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
- msg->buf_pa = __pa(start_addr);
-
- dev_dbg(xpnet, "sending XPC message to %d:%d\n"
- KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, "
- "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
- dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
- msg->leadin_ignore, msg->tailout_ignore);
-
-
- atomic_inc(&queued_msg->use_count);
-
- ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
- xpnet_send_completed, queued_msg);
- if (unlikely(ret != xpcSuccess)) {
- atomic_dec(&queued_msg->use_count);
- continue;
+ if (dest_partid >= XP_MIN_PARTID &&
+ dest_partid <= XP_MAX_PARTID &&
+ test_bit(dest_partid, xpnet_broadcast_partitions) != 0) {
+ xpnet_send(skb, queued_msg, start_addr, end_addr,
+ embedded_bytes, dest_partid);
}
-
}
if (atomic_dec_return(&queued_msg->use_count) == 0) {
- dev_dbg(xpnet, "no partitions to receive packet destined for "
- "%d\n", dest_partid);
-
-
dev_kfree_skb(skb);
kfree(queued_msg);
}
@@ -615,7 +582,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff
* Deal with transmit timeouts coming from the network layer.
*/
static void
-xpnet_dev_tx_timeout (struct net_device *dev)
+xpnet_dev_tx_timeout(struct net_device *dev)
{
struct xpnet_dev_private *priv;
@@ -630,12 +597,11 @@ xpnet_dev_tx_timeout (struct net_device
static int __init
xpnet_init(void)
{
- int i;
- u32 license_num;
+ short partid;
int result = -ENOMEM;
- if (!ia64_platform_is("sn2")) {
+ if (!is_shub() && !is_uv()) {
return -ENODEV;
}
@@ -667,14 +633,12 @@ xpnet_init(void)
* MAC addresses. We chose the first octet of the MAC to be unlikely
* to collide with any vendor's officially issued MAC.
*/
- xpnet_device->dev_addr[0] = 0xfe;
- xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id;
- license_num = sn_partition_serial_number_val();
- for (i = 3; i >= 0; i--) {
- xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
- license_num & 0xff;
- license_num = license_num >> 8;
- }
+ xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
+
+ partid = xp_partition_id;
+
+ xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = partid & 0xff;
+ xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] |= (partid >> 8) & 0xff;
/*
* ether_setup() sets this to a multicast device. We are
Index: linux-2.6/drivers/misc/xp/Makefile
===================================================================
--- linux-2.6.orig/drivers/misc/xp/Makefile 2008-03-25 13:47:59.962929996 -0500
+++ linux-2.6/drivers/misc/xp/Makefile 2008-03-25 13:51:08.950463694 -0500
@@ -3,7 +3,10 @@
#
obj-$(CONFIG_SGI_XP) += xp.o
-xp-y := xp_main.o xp_nofault.o
+xp-y := xp_main.o xp_uv.o
+xp-$(CONFIG_IA64) += xp_sn2.o xp_nofault.o
+
obj-$(CONFIG_SGI_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
+
obj-$(CONFIG_SGI_XP) += xpnet.o
Index: linux-2.6/drivers/misc/xp/xp_nofault.S
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xp_nofault.S 2008-03-25 13:47:59.962929996 -0500
+++ linux-2.6/drivers/misc/xp/xp_nofault.S 2008-03-25 13:51:08.970466184 -0500
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
Index: linux-2.6/include/asm-ia64/sn/bte.h
===================================================================
--- linux-2.6.orig/include/asm-ia64/sn/bte.h 2008-03-25 13:47:59.966930495 -0500
+++ linux-2.6/include/asm-ia64/sn/bte.h 2008-03-25 13:51:08.998469671 -0500
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -177,9 +177,6 @@ typedef enum {
#define BTE_GET_ERROR_STATUS(_status) \
(BTE_SHUB2_ERROR(_status) & ~IBLS_ERROR)
-#define BTE_VALID_SH2_ERROR(value) \
- ((value >= BTEFAIL_SH2_RESP_SHORT) && (value <= BTEFAIL_SH2_ALL))
-
/*
* Structure defining a bte. An instance of this
* structure is created in the nodepda for each
Index: linux-2.6/drivers/misc/xp/xp_uv.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6/drivers/misc/xp/xp_uv.c 2008-03-25 13:51:09.018472162 -0500
@@ -0,0 +1,194 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition (XP) uv-based functions.
+ *
+ * Architecture specific implementation of common functions.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include "xp.h"
+
+extern struct device *xp;
+
+static enum xp_retval
+xp_register_nofault_code_uv(void)
+{
+ return xpSuccess;
+}
+
+static void
+xp_unregister_nofault_code_uv(void)
+{
+}
+
+static enum xp_retval
+xp_remote_memcpy_uv(void *vdst, const void *psrc, size_t len)
+{
+ /* >>> this function needs fleshing out */
+ return xpUnsupported;
+}
+
+static enum xp_retval
+xp_register_remote_amos_uv(u64 paddr, size_t len)
+{
+ /* >>> this function needs fleshing out */
+ return xpUnsupported;
+}
+
+static enum xp_retval
+xp_unregister_remote_amos_uv(u64 paddr, size_t len)
+{
+ /* >>> this function needs fleshing out */
+ return xpUnsupported;
+}
+
+/*
+ * Allocate the required number of contiguous physical pages to hold the
+ * specified number of AMOs.
+ */
+static u64 *
+xp_alloc_amos_uv(int n_amos)
+{
+ size_t n_bytes = roundup(n_amos * xp_sizeof_amo, PAGE_SIZE);
+ struct page *page;
+ u64 *amos_page = NULL;
+
+ page = alloc_pages_node(0, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ get_order(n_bytes));
+ if (page)
+ amos_page = (u64 *)page_address(page);
+
+ return amos_page;
+}
+
+static void
+xp_free_amos_uv(u64 *amos_page, int n_amos)
+{
+ size_t n_bytes = roundup(n_amos * xp_sizeof_amo, PAGE_SIZE);
+
+ free_pages((u64)amos_page, get_order(n_bytes));
+}
+
+static enum xp_retval
+xp_set_amo_uv(u64 *amo_va, int op, u64 operand, int remote)
+{
+ /* >>> this function needs fleshing out */
+ return xpUnsupported;
+}
+
+static enum xp_retval
+xp_set_amo_with_interrupt_uv(u64 *amo_va, int op, u64 operand, int remote,
+ int nasid, int phys_cpuid, int vector)
+{
+ /* >>> this function needs fleshing out */
+ return xpUnsupported;
+}
+
+static enum xp_retval
+xp_get_amo_uv(u64 *amo_va, int op, u64 *amo_value_addr)
+{
+ /* >>> this function needs fleshing out */
+ return xpUnsupported;
+}
+
+static enum xp_retval
+xp_get_partition_rsvd_page_pa_uv(u64 buf, u64 *cookie, u64 *paddr, size_t *len)
+{
+ /* >>> this function needs fleshing out */
+ return xpUnsupported;
+}
+
+static enum xp_retval
+xp_change_memprotect_uv(u64 paddr, size_t len, int request, u64 *nasid_array)
+{
+ /* >>> this function needs fleshing out */
+ return xpUnsupported;
+}
+
+static void
+xp_change_memprotect_shub_wars_1_1_uv(int request)
+{
+ return;
+}
+
+static void
+xp_allow_IPI_ops_uv(void)
+{
+ /* >>> this function needs fleshing out */
+ return;
+}
+
+static void
+xp_disallow_IPI_ops_uv(void)
+{
+ /* >>> this function needs fleshing out */
+ return;
+}
+
+static int
+xp_cpu_to_nasid_uv(int cpuid)
+{
+ /* >>> this function needs fleshing out */
+ return -1;
+}
+
+static int
+xp_node_to_nasid_uv(int nid)
+{
+ /* >>> this function needs fleshing out */
+ return -1;
+}
+
+enum xp_retval
+xp_init_uv(void)
+{
+ BUG_ON(!is_uv());
+
+ xp_partition_id = 0; /* >>> not correct value */
+ xp_region_size = 0; /* >>> not correct value */
+ xp_rtc_cycles_per_second = 0; /* >>> not correct value */
+
+ xp_remote_memcpy = xp_remote_memcpy_uv;
+
+ xp_register_remote_amos = xp_register_remote_amos_uv;
+ xp_unregister_remote_amos = xp_unregister_remote_amos_uv;
+
+ xp_sizeof_amo = sizeof(u64);
+ xp_alloc_amos = xp_alloc_amos_uv;
+ xp_free_amos = xp_free_amos_uv;
+ xp_set_amo = xp_set_amo_uv;
+ xp_set_amo_with_interrupt = xp_set_amo_with_interrupt_uv;
+ xp_get_amo = xp_get_amo_uv;
+
+ xp_get_partition_rsvd_page_pa = xp_get_partition_rsvd_page_pa_uv;
+
+ xp_change_memprotect = xp_change_memprotect_uv;
+ xp_change_memprotect_shub_wars_1_1 =
+ xp_change_memprotect_shub_wars_1_1_uv;
+ xp_allow_IPI_ops = xp_allow_IPI_ops_uv;
+ xp_disallow_IPI_ops = xp_disallow_IPI_ops_uv;
+
+ xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
+ xp_node_to_nasid = xp_node_to_nasid_uv;
+
+ return xp_register_nofault_code_uv();
+}
+
+void
+xp_exit_uv(void)
+{
+ BUG_ON(!is_uv());
+
+ xp_unregister_nofault_code_uv();
+}
+
Index: linux-2.6/arch/ia64/sn/kernel/setup.c
===================================================================
--- linux-2.6.orig/arch/ia64/sn/kernel/setup.c 2008-03-25 13:47:59.966930495 -0500
+++ linux-2.6/arch/ia64/sn/kernel/setup.c 2008-03-25 13:51:09.038474652 -0500
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1999,2001-2008 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -93,7 +93,7 @@ u8 sn_region_size;
EXPORT_SYMBOL(sn_region_size);
int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
-short physical_node_map[MAX_NUMALINK_NODES];
+short physical_node_map[SN_MAX_NUMALINK_NODES];
static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS];
EXPORT_SYMBOL(physical_node_map);
Index: linux-2.6/include/asm-ia64/sn/arch.h
===================================================================
--- linux-2.6.orig/include/asm-ia64/sn/arch.h 2008-03-25 13:47:59.966930495 -0500
+++ linux-2.6/include/asm-ia64/sn/arch.h 2008-03-25 13:51:09.066478139 -0500
@@ -5,7 +5,7 @@
*
* SGI specific setup.
*
- * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1995-1997,1999,2001-2005,2008 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
*/
#ifndef _ASM_IA64_SN_ARCH_H
@@ -42,7 +42,7 @@
* This value is also the value of the maximum number of NASIDs in the numalink
* fabric.
*/
-#define MAX_NUMALINK_NODES 16384
+#define SN_MAX_NUMALINK_NODES 16384
/*
* The following defines attributes of the HUB chip. These attributes are
@@ -60,6 +60,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __
#define sn_hub_info (&__get_cpu_var(__sn_hub_info))
#define is_shub2() (sn_hub_info->shub2)
#define is_shub1() (sn_hub_info->shub2 == 0)
+#define is_shub() (is_shub1() || is_shub2())
/*
* Use this macro to test if shub 1.1 wars should be enabled
Index: linux-2.6/drivers/misc/xp/xp_sn2.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6/drivers/misc/xp/xp_sn2.c 2008-03-25 13:51:09.090481128 -0500
@@ -0,0 +1,487 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition (XP) sn2-based functions.
+ *
+ * Architecture specific implementation of common functions.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <asm/uncached.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/mspec.h>
+#include <asm/sn/sn_sal.h>
+#include "xp.h"
+
+extern struct device *xp;
+
+/*
+ * Register a nofault code region which performs a cross-partition PIO read.
+ * If the PIO read times out, the MCA handler will consume the error and
+ * return to a kernel-provided instruction to indicate an error. This PIO read
+ * exists because it is guaranteed to timeout if the destination is down
+ * (AMO operations do not timeout on at least some CPUs on Shubs <= v1.2,
+ * which unfortunately we have to work around).
+ */
+static enum xp_retval
+xp_register_nofault_code_sn2(void)
+{
+ int ret;
+ u64 func_addr;
+ u64 err_func_addr;
+
+ func_addr = *(u64 *)xp_nofault_PIOR;
+ err_func_addr = *(u64 *)xp_error_PIOR;
+ ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
+ 1, 1);
+ if (ret != 0) {
+ dev_err(xp, "can't register nofault code, error=%d\n", ret);
+ return xpSalError;
+ }
+ /*
+ * Setup the nofault PIO read target. (There is no special reason why
+ * SH_IPI_ACCESS was selected.)
+ */
+ if (is_shub1())
+ xp_nofault_PIOR_target = SH1_IPI_ACCESS;
+ else if (is_shub2())
+ xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
+
+ return xpSuccess;
+}
+
+void
+xp_unregister_nofault_code_sn2(void)
+{
+ u64 func_addr = *(u64 *)xp_nofault_PIOR;
+ u64 err_func_addr = *(u64 *)xp_error_PIOR;
+
+ /* unregister the PIO read nofault code region */
+ (void)sn_register_nofault_code(func_addr, err_func_addr,
+ err_func_addr, 1, 0);
+}
+
+/*
+ * Wrapper for bte_copy().
+ *
+ * vdst - virtual address of the destination of the transfer.
+ * psrc - physical address of the source of the transfer.
+ * len - number of bytes to transfer from source to destination.
+ *
+ * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock.
+ */
+static enum xp_retval
+xp_remote_memcpy_sn2(void *vdst, const void *psrc, size_t len)
+{
+ bte_result_t ret;
+ u64 pdst = ia64_tpa(vdst);
+ /* >>> What are the rules governing the src and dst addresses passed in?
+ * >>> Currently we're assuming that dst is a virtual address and src
+ * >>> is a physical address, is this appropriate? Can we allow them to
+ * >>> be whatever and we make the change here without damaging the
+ * >>> addresses?
+ */
+
+ /*
+ * Ensure that the physically mapped memory is contiguous.
+ *
+ * We do this by ensuring that the memory is from region 7 only.
+ * If the need should arise to use memory from one of the other
+ * regions, then modify the BUG_ON() statement to ensure that the
+ * memory from that region is always physically contiguous.
+ */
+ BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL);
+
+ ret = bte_copy((u64)psrc, pdst, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+ if (ret == BTE_SUCCESS)
+ return xpSuccess;
+
+ if (is_shub2())
+ dev_err(xp, "bte_copy() on shub2 failed, error=0x%x\n", ret);
+ else
+ dev_err(xp, "bte_copy() failed, error=%d\n", ret);
+
+ return xpBteCopyError;
+}
+
+/*
+ * Register the remote partition's AMOs with SAL so it can handle and cleanup
+ * errors within that address range should the remote partition go down. We
+ * don't unregister this range because it is difficult to tell when outstanding
+ * writes to the remote partition are finished and thus when it is safe to
+ * unregister. This should not result in wasted space in the SAL xp_addr_region
+ * table because we should get the same page for remote_amos_page_pa after
+ * module reloads and system reboots.
+ */
+static enum xp_retval
+xp_register_remote_amos_sn2(u64 paddr, size_t len)
+{
+ enum xp_retval ret = xpSuccess;
+
+ if (sn_register_xp_addr_region(paddr, len, 1) < 0)
+ ret = xpSalError;
+ return ret;
+}
+
+static enum xp_retval
+xp_unregister_remote_amos_sn2(u64 paddr, size_t len)
+{
+ return xpSuccess; /* we don't unregister AMOs on sn2 */
+}
+
+/*
+ * Allocate the required number of contiguous physical pages to hold the
+ * specified number of AMOs.
+ */
+static u64 *
+xp_alloc_amos_sn2(int n_amos)
+{
+ int n_pages = DIV_ROUND_UP(n_amos * xp_sizeof_amo, PAGE_SIZE);
+
+ return (u64 *)TO_AMO(uncached_alloc_page(0, n_pages));
+}
+
+static void
+xp_free_amos_sn2(u64 *amos_page, int n_amos)
+{
+ int n_pages = DIV_ROUND_UP(n_amos * xp_sizeof_amo, PAGE_SIZE);
+
+ uncached_free_page(__IA64_UNCACHED_OFFSET | TO_PHYS((u64) amos_page),
+ n_pages);
+}
+
+
+static enum xp_retval
+xp_set_amo_sn2(u64 *amo_va, int op, u64 operand, int remote)
+{
+ unsigned long irq_flags = irq_flags; /* eliminate compiler warning */
+ int ret = xpSuccess;
+ /* >>> eliminate remote arg and xp_nofault_PIOR() call */
+
+ if (op == XP_AMO_AND)
+ op = FETCHOP_AND;
+ else if (op == XP_AMO_OR)
+ op = FETCHOP_OR;
+ else
+ BUG();
+
+ if (remote)
+ local_irq_save(irq_flags);
+
+ FETCHOP_STORE_OP(TO_AMO((u64)amo_va), op, operand);
+
+ if (remote) {
+ /*
+ * We must always use the nofault function regardless of
+ * whether we are on a Shub 1.1 system or a Shub 1.2 slice
+ * 0xc processor. If we didn't, we'd never know that the other
+ * partition is down and would keep sending IPIs and AMOs to
+ * it until the heartbeat times out.
+ */
+ if (xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(amo_va),
+ xp_nofault_PIOR_target)) != 0)
+ ret = xpPioReadError;
+
+ local_irq_restore(irq_flags);
+ }
+
+ return ret;
+}
+
+static enum xp_retval
+xp_set_amo_with_interrupt_sn2(u64 *amo_va, int op, u64 operand, int remote,
+ int nasid, int phys_cpuid, int vector)
+{
+ unsigned long irq_flags = irq_flags; /* eliminate compiler warning */
+ int ret = xpSuccess;
+
+ if (op == XP_AMO_AND)
+ op = FETCHOP_AND;
+ else if (op == XP_AMO_OR)
+ op = FETCHOP_OR;
+ else
+ BUG();
+
+ if (remote)
+ local_irq_save(irq_flags);
+
+ FETCHOP_STORE_OP(TO_AMO((u64)amo_va), op, operand);
+ sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
+
+ if (remote) {
+ /*
+ * We must always use the nofault function regardless of
+ * whether we are on a Shub 1.1 system or a Shub 1.2 slice
+ * 0xc processor. If we didn't, we'd never know that the other
+ * partition is down and would keep sending IPIs and AMOs to
+ * it until the heartbeat times out.
+ */
+ if (xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(amo_va),
+ xp_nofault_PIOR_target)) != 0)
+ ret = xpPioReadError;
+
+ local_irq_restore(irq_flags);
+ }
+
+ return ret;
+}
+
+static enum xp_retval
+xp_get_amo_sn2(u64 *amo_va, int op, u64 *amo_value_addr)
+{
+ u64 amo_value;
+
+ if (op == XP_AMO_LOAD)
+ op = FETCHOP_LOAD;
+ else if (op == XP_AMO_CLEAR)
+ op = FETCHOP_CLEAR;
+ else
+ BUG();
+
+ amo_value = FETCHOP_LOAD_OP(TO_AMO((u64)amo_va), op);
+ if (amo_value_addr != NULL)
+ *amo_value_addr = amo_value;
+ return xpSuccess;
+}
+
+static enum xp_retval
+xp_get_partition_rsvd_page_pa_sn2(u64 buf, u64 *cookie, u64 *paddr, size_t *len)
+{
+ s64 status;
+ enum xp_retval ret;
+
+ status = sn_partition_reserved_page_pa(buf, cookie, paddr, len);
+ if (status == SALRET_OK)
+ ret = xpSuccess;
+ else if (status == SALRET_MORE_PASSES)
+ ret = xpNeedMoreInfo;
+ else
+ ret = xpSalError;
+
+ return ret;
+}
+
+static enum xp_retval
+xp_change_memprotect_sn2(u64 paddr, size_t len, int request, u64 *nasid_array)
+{
+ u64 perms;
+ int status;
+
+ /*
+ * Since the BIST collides with memory operations on
+ * SHUB 1.1, sn_change_memprotect() cannot be used. See
+ * xp_change_memprotect_shub_wars_1_1() for WAR.
+ */
+ if (enable_shub_wars_1_1())
+ return xpSuccess;
+
+ if (request == XP_MEMPROT_DISALLOW_ALL)
+ perms = SN_MEMPROT_ACCESS_CLASS_0;
+ else if (request == XP_MEMPROT_ALLOW_CPU_AMO)
+ perms = SN_MEMPROT_ACCESS_CLASS_1;
+ else if (request == XP_MEMPROT_ALLOW_CPU_MEM)
+ perms = SN_MEMPROT_ACCESS_CLASS_2;
+ else
+ BUG();
+
+ status = sn_change_memprotect(paddr, len, perms, nasid_array);
+ return (status == 0) ? xpSuccess : xpSalError;
+}
+
+/* original protection values for each node */
+static u64 xpc_prot_vec[MAX_NUMNODES];
+
+/*
+ * Change protections to allow/disallow all operations on Shub 1.1 systems.
+ */
+static void
+xp_change_memprotect_shub_wars_1_1_sn2(int request)
+{
+ int node;
+ int nasid;
+
+ /*
+ * Since the BIST collides with memory operations on SHUB 1.1
+ * sn_change_memprotect() cannot be used.
+ */
+ if (!enable_shub_wars_1_1())
+ return;
+
+ if (request == XP_MEMPROT_ALLOW_ALL) {
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ /* save current protection values */
+ xpc_prot_vec[node] =
+ (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0));
+ /* open up everything */
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0), -1UL);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQRP_MMR_DIR_PRIVEC0), -1UL);
+ }
+ } else if (request == XP_MEMPROT_DISALLOW_ALL) {
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ /* restore original protection values */
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0), xpc_prot_vec[node]);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQRP_MMR_DIR_PRIVEC0), xpc_prot_vec[node]);
+ }
+ } else
+ BUG();
+}
+
+/* SH_IPI_ACCESS shub register value on startup */
+static u64 xpc_sh1_IPI_access;
+static u64 xpc_sh2_IPI_access0;
+static u64 xpc_sh2_IPI_access1;
+static u64 xpc_sh2_IPI_access2;
+static u64 xpc_sh2_IPI_access3;
+
+/*
+ * Change protections to allow IPI operations.
+ */
+static void
+xp_allow_IPI_ops_sn2(void)
+{
+ int node;
+ int nasid;
+
+ /* >>> The following should get moved into SAL. */
+ if (is_shub2()) {
+ xpc_sh2_IPI_access0 =
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
+ xpc_sh2_IPI_access1 =
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
+ xpc_sh2_IPI_access2 =
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
+ xpc_sh2_IPI_access3 =
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
+
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
+ -1UL);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
+ -1UL);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
+ -1UL);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
+ -1UL);
+ }
+ } else {
+ xpc_sh1_IPI_access =
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
+
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
+ -1UL);
+ }
+ }
+}
+
+/*
+ * Restrict protections to disallow IPI operations.
+ */
+static void
+xp_disallow_IPI_ops_sn2(void)
+{
+ int node;
+ int nasid;
+
+ /* >>> The following should get moved into SAL. */
+ if (is_shub2()) {
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
+ xpc_sh2_IPI_access0);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
+ xpc_sh2_IPI_access1);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
+ xpc_sh2_IPI_access2);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
+ xpc_sh2_IPI_access3);
+ }
+ } else {
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
+ xpc_sh1_IPI_access);
+ }
+ }
+}
+
+static int
+xp_cpu_to_nasid_sn2(int cpuid)
+{
+ return cpuid_to_nasid(cpuid);
+}
+
+static int
+xp_node_to_nasid_sn2(int nid)
+{
+ return cnodeid_to_nasid(nid);
+}
+
+enum xp_retval
+xp_init_sn2(void)
+{
+ BUG_ON(!is_shub());
+
+ xp_partition_id = sn_partition_id;
+ xp_region_size = sn_region_size;
+ xp_rtc_cycles_per_second = sn_rtc_cycles_per_second;
+
+ xp_remote_memcpy = xp_remote_memcpy_sn2;
+
+ xp_register_remote_amos = xp_register_remote_amos_sn2;
+ xp_unregister_remote_amos = xp_unregister_remote_amos_sn2;
+
+ /*
+ * MSPEC based AMOs are assumed to have the important bits in only the
+ * first 64. The remainder is ignored other than xp_sizeof_amo must
+ * reflect its existence.
+ */
+ BUG_ON(offsetof(AMO_t, variable) != 0);
+ BUG_ON(sizeof(((AMO_t *)NULL)->variable) != sizeof(u64));
+ xp_sizeof_amo = sizeof(AMO_t);
+ xp_alloc_amos = xp_alloc_amos_sn2;
+ xp_free_amos = xp_free_amos_sn2;
+ xp_set_amo = xp_set_amo_sn2;
+ xp_set_amo_with_interrupt = xp_set_amo_with_interrupt_sn2;
+ xp_get_amo = xp_get_amo_sn2;
+
+ xp_get_partition_rsvd_page_pa = xp_get_partition_rsvd_page_pa_sn2;
+
+ xp_change_memprotect = xp_change_memprotect_sn2;
+ xp_change_memprotect_shub_wars_1_1 =
+ xp_change_memprotect_shub_wars_1_1_sn2;
+ xp_allow_IPI_ops = xp_allow_IPI_ops_sn2;
+ xp_disallow_IPI_ops = xp_disallow_IPI_ops_sn2;
+
+ xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
+ xp_node_to_nasid = xp_node_to_nasid_sn2;
+
+ return xp_register_nofault_code_sn2();
+}
+
+void
+xp_exit_sn2(void)
+{
+ BUG_ON(!is_shub());
+
+ xp_unregister_nofault_code_sn2();
+}
+
Index: linux-2.6/drivers/misc/Kconfig
===================================================================
--- linux-2.6.orig/drivers/misc/Kconfig 2008-03-25 13:50:58.000000000 -0500
+++ linux-2.6/drivers/misc/Kconfig 2008-03-25 13:51:36.301869309 -0500
@@ -329,7 +329,7 @@ config ENCLOSURE_SERVICES
config SGI_XP
tristate "Support communication between SGI SSIs"
- depends on IA64_GENERIC || IA64_SGI_SN2
+ depends on IA64_GENERIC || IA64_SGI_SN2 || X86_64
select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
---help---
--
^ permalink raw reply [flat|nested] 11+ messages in thread
* [Patch 4/5] run drivers/misc/xp through scripts/Lindent
2008-03-25 19:25 [Patch 0/5] prepare XPC and XPNET to support SGI UV dcn
2008-03-25 19:25 ` [Patch 1/5] add multi-page allocation to the uncached allocator dcn
2008-03-25 19:25 ` [Patch 3/5] prepare XPC and XPNET for future support of SGIs UV architecture dcn
@ 2008-03-25 19:25 ` dcn
2008-03-25 19:25 ` [Patch 5/5] run drivers/misc/xp through scripts/checkpatch.pl dcn
2008-03-25 20:14 ` [Patch 0/5] prepare XPC and XPNET to support SGI UV Dean Nelson
4 siblings, 0 replies; 11+ messages in thread
From: dcn @ 2008-03-25 19:25 UTC (permalink / raw)
To: jes, tony.luck, linux-kernel, linux-ia64
[-- Attachment #1: Lindent --]
[-- Type: text/plain, Size: 163773 bytes --]
Ran patches through scripts/Lindent.
Signed-off-by: Dean Nelson <dcn@sgi.com>
---
drivers/misc/xp/xp.h | 115 ++++------
drivers/misc/xp/xp_main.c | 109 ++++------
drivers/misc/xp/xp_sn2.c | 52 ++--
drivers/misc/xp/xp_uv.c | 7
drivers/misc/xp/xpc.h | 368 ++++++++++++++--------------------
drivers/misc/xp/xpc_channel.c | 379 +++++++++++++-----------------------
drivers/misc/xp/xpc_main.c | 329 +++++++++++++------------------
drivers/misc/xp/xpc_partition.c | 168 +++++----------
drivers/misc/xp/xpnet.c | 95 +++------
9 files changed, 661 insertions(+), 961 deletions(-)
Index: linux-2.6/drivers/misc/xp/xp.h
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xp.h 2008-03-21 13:30:52.030174502 -0500
+++ linux-2.6/drivers/misc/xp/xp.h 2008-03-23 08:43:19.628746640 -0500
@@ -6,16 +6,13 @@
* Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
*/
-
/*
* External Cross Partition (XP) structures and defines.
*/
-
#ifndef _DRIVERS_MISC_XP_XP_H
#define _DRIVERS_MISC_XP_XP_H
-
#include <linux/cache.h>
#include <linux/hardirq.h>
#include <linux/mutex.h>
@@ -35,14 +32,12 @@
#error architecture is NOT supported
#endif
-
#ifdef USE_DBUG_ON
#define DBUG_ON(condition) BUG_ON(condition)
#else
#define DBUG_ON(condition)
#endif
-
#ifndef is_shub1
#define is_shub1() 0
#endif
@@ -59,7 +54,6 @@
#define is_uv() 0
#endif
-
/*
* Define the maximum number of logically defined partitions the system
* can support. It is constrained by the maximum number of hardware
@@ -78,10 +72,9 @@
#error XP_NPARTITIONS exceeds MAXIMUM possible.
#endif
-#define XP_MIN_PARTID 1 /* inclusive */
+#define XP_MIN_PARTID 1 /* inclusive */
#define XP_MAX_PARTID (XP_NPARTITIONS - 1) /* inclusive */
-
/*
* XPC establishes channel connections between the local partition and any
* other partition that is currently up. Over these channels, kernel-level
@@ -107,7 +100,6 @@
#error XPC_NCHANNELS exceeds MAXIMUM possible.
#endif
-
/*
* The format of an XPC message is as follows:
*
@@ -145,12 +137,10 @@ struct xpc_msg {
u64 payload; /* user defined portion of message */
};
-
#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
#define XPC_MSG_SIZE(_payload_size) \
L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
-
/*
* Define the return values and values passed to user's callout functions.
* (It is important to add new value codes at the end just preceding
@@ -237,7 +227,6 @@ enum xp_retval {
xpUnknownReason /* 56: unknown reason (must be last in list) */
};
-
/* the following are valid xp_set_amo() ops */
#define XP_AMO_OR 1 /* set variable to (variable | operand) */
#define XP_AMO_AND 2 /* set variable to (variable & operand) */
@@ -252,7 +241,6 @@ enum xp_retval {
#define XP_MEMPROT_ALLOW_CPU_MEM 2
#define XP_MEMPROT_ALLOW_ALL 3 /* Shub 1.1 only */
-
/*
* Define the callout function types used by XPC to update the user on
* connection activity and state changes (via the user function registered by
@@ -357,12 +345,11 @@ enum xp_retval {
* =====================+================================+=====================
*/
-typedef void (*xpc_channel_func)(enum xp_retval reason, short partid,
- int ch_number, void *data, void *key);
-
-typedef void (*xpc_notify_func)(enum xp_retval reason, short partid,
- int ch_number, void *key);
+typedef void (*xpc_channel_func) (enum xp_retval reason, short partid,
+ int ch_number, void *data, void *key);
+typedef void (*xpc_notify_func) (enum xp_retval reason, short partid,
+ int ch_number, void *key);
/*
* The following is a registration entry. There is a global array of these,
@@ -380,50 +367,45 @@ typedef void (*xpc_notify_func)(enum xp_
*/
struct xpc_registration {
struct mutex mutex;
- xpc_channel_func func; /* function to call */
- void *key; /* pointer to user's key */
- u16 nentries; /* #of msg entries in local msg queue */
- u16 msg_size; /* message queue's message size */
- u32 assigned_limit; /* limit on #of assigned kthreads */
- u32 idle_limit; /* limit on #of idle kthreads */
+ xpc_channel_func func; /* function to call */
+ void *key; /* pointer to user's key */
+ u16 nentries; /* #of msg entries in local msg queue */
+ u16 msg_size; /* message queue's message size */
+ u32 assigned_limit; /* limit on #of assigned kthreads */
+ u32 idle_limit; /* limit on #of idle kthreads */
} ____cacheline_aligned;
-
#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
-
/* the following are valid xpc_allocate() flags */
-#define XPC_WAIT 0 /* wait flag */
-#define XPC_NOWAIT 1 /* no wait flag */
-
+#define XPC_WAIT 0 /* wait flag */
+#define XPC_NOWAIT 1 /* no wait flag */
struct xpc_interface {
- void (*connect)(int);
- void (*disconnect)(int);
- enum xp_retval (*allocate)(short, int, u32, void **);
- enum xp_retval (*send)(short, int, void *);
- enum xp_retval (*send_notify)(short, int, void *,
- xpc_notify_func, void *);
- void (*received)(short, int, void *);
- enum xp_retval (*partid_to_nasids)(short, void *);
+ void (*connect) (int);
+ void (*disconnect) (int);
+ enum xp_retval (*allocate) (short, int, u32, void **);
+ enum xp_retval (*send) (short, int, void *);
+ enum xp_retval (*send_notify) (short, int, void *,
+ xpc_notify_func, void *);
+ void (*received) (short, int, void *);
+ enum xp_retval (*partid_to_nasids) (short, void *);
};
-
extern struct xpc_interface xpc_interface;
extern void xpc_set_interface(void (*)(int),
- void (*)(int),
- enum xp_retval (*)(short, int, u32, void **),
- enum xp_retval (*)(short, int, void *),
- enum xp_retval (*)(short, int, void *, xpc_notify_func,
- void *),
- void (*)(short, int, void *),
- enum xp_retval (*)(short, void *));
+ void (*)(int),
+ enum xp_retval (*)(short, int, u32, void **),
+ enum xp_retval (*)(short, int, void *),
+ enum xp_retval (*)(short, int, void *,
+ xpc_notify_func, void *),
+ void (*)(short, int, void *),
+ enum xp_retval (*)(short, void *));
extern void xpc_clear_interface(void);
-
extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16,
- u16, u32, u32);
+ u16, u32, u32);
extern void xpc_disconnect(int);
static inline enum xp_retval
@@ -440,7 +422,7 @@ xpc_send(short partid, int ch_number, vo
static inline enum xp_retval
xpc_send_notify(short partid, int ch_number, void *payload,
- xpc_notify_func func, void *key)
+ xpc_notify_func func, void *key)
{
return xpc_interface.send_notify(partid, ch_number, payload, func, key);
}
@@ -460,31 +442,36 @@ xpc_partid_to_nasids(short partid, void
extern short xp_partition_id;
extern u8 xp_region_size;
extern unsigned long xp_rtc_cycles_per_second;
-extern enum xp_retval (*xp_remote_memcpy)(void *, const void *, size_t);
-extern enum xp_retval (*xp_register_remote_amos)(u64, size_t);
-extern enum xp_retval (*xp_unregister_remote_amos)(u64, size_t);
+extern enum xp_retval (*xp_remote_memcpy) (void *, const void *, size_t);
+extern enum xp_retval (*xp_register_remote_amos) (u64, size_t);
+extern enum xp_retval (*xp_unregister_remote_amos) (u64, size_t);
extern int xp_sizeof_nasid_mask;
extern int xp_sizeof_amo;
-extern u64 *(*xp_alloc_amos)(int);
-extern void (*xp_free_amos)(u64 *, int);
-extern enum xp_retval (*xp_set_amo)(u64 *, int, u64, int);
-extern enum xp_retval (*xp_set_amo_with_interrupt)(u64 *, int, u64, int, int,
+extern u64 *(*xp_alloc_amos) (int);
+extern void (*xp_free_amos) (u64 *, int);
+extern enum xp_retval (*xp_set_amo) (u64 *, int, u64, int);
+extern enum xp_retval (*xp_set_amo_with_interrupt) (u64 *, int, u64, int, int,
int, int);
-extern enum xp_retval (*xp_get_amo)(u64 *, int, u64 *);
-extern enum xp_retval (*xp_get_partition_rsvd_page_pa)(u64, u64 *, u64 *,
+extern enum xp_retval (*xp_get_amo) (u64 *, int, u64 *);
+extern enum xp_retval (*xp_get_partition_rsvd_page_pa) (u64, u64 *, u64 *,
size_t *);
-extern enum xp_retval (*xp_change_memprotect)(u64, size_t, int, u64 *);
-extern void (*xp_change_memprotect_shub_wars_1_1)(int);
-extern void (*xp_allow_IPI_ops)(void);
-extern void (*xp_disallow_IPI_ops)(void);
+extern enum xp_retval (*xp_change_memprotect) (u64, size_t, int, u64 *);
+extern void (*xp_change_memprotect_shub_wars_1_1) (int);
+extern void (*xp_allow_IPI_ops) (void);
+extern void (*xp_disallow_IPI_ops) (void);
-extern int (*xp_cpu_to_nasid)(int);
-extern int (*xp_node_to_nasid)(int);
+extern int (*xp_cpu_to_nasid) (int);
+extern int (*xp_node_to_nasid) (int);
extern u64 xp_nofault_PIOR_target;
extern int xp_nofault_PIOR(void *);
extern int xp_error_PIOR(void);
+extern struct device *xp;
+extern enum xp_retval xp_init_sn2(void);
+extern enum xp_retval xp_init_uv(void);
+extern void xp_exit_sn2(void);
+extern void xp_exit_uv(void);
static inline int
xp_partid_mask_words(int npartitions)
@@ -498,6 +485,4 @@ xp_nasid_mask_words(void)
return DIV_ROUND_UP(xp_sizeof_nasid_mask, BYTES_PER_WORD);
}
-
#endif /* _DRIVERS_MISC_XP_XP_H */
-
Index: linux-2.6/drivers/misc/xp/xp_main.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xp_main.c 2008-03-21 13:30:52.030174502 -0500
+++ linux-2.6/drivers/misc/xp/xp_main.c 2008-03-24 19:31:37.154185684 -0500
@@ -6,7 +6,6 @@
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
-
/*
* Cross Partition (XP) base.
*
@@ -15,7 +14,6 @@
*
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -28,7 +26,7 @@ struct device_driver xp_dbg_name = {
};
struct device xp_dbg_subname = {
- .bus_id = {0}, /* set to "" */
+ .bus_id = {0}, /* set to "" */
.driver = &xp_dbg_name
};
@@ -43,66 +41,68 @@ short xp_partition_id;
u8 xp_region_size;
unsigned long xp_rtc_cycles_per_second;
-enum xp_retval (*xp_remote_memcpy)(void *dst, const void *src, size_t len);
+enum xp_retval (*xp_remote_memcpy) (void *dst, const void *src, size_t len);
-enum xp_retval (*xp_register_remote_amos)(u64 paddr, size_t len);
-enum xp_retval (*xp_unregister_remote_amos)(u64 paddr, size_t len);
+enum xp_retval (*xp_register_remote_amos) (u64 paddr, size_t len);
+enum xp_retval (*xp_unregister_remote_amos) (u64 paddr, size_t len);
int xp_sizeof_nasid_mask;
int xp_sizeof_amo;
-u64 *(*xp_alloc_amos)(int n_amos);
-void (*xp_free_amos)(u64 *amos_page, int n_amos);
+u64 *(*xp_alloc_amos) (int n_amos);
+void (*xp_free_amos) (u64 *amos_page, int n_amos);
-enum xp_retval (*xp_set_amo)(u64 *amo_va, int op, u64 operand, int remote);
-enum xp_retval (*xp_set_amo_with_interrupt)(u64 *amo_va, int op, u64 operand,
+enum xp_retval (*xp_set_amo) (u64 *amo_va, int op, u64 operand, int remote);
+enum xp_retval (*xp_set_amo_with_interrupt) (u64 *amo_va, int op, u64 operand,
int remote, int nasid,
int phys_cpuid, int vector);
-enum xp_retval (*xp_get_amo)(u64 *amo_va, int op, u64 *amo_value_addr);
+enum xp_retval (*xp_get_amo) (u64 *amo_va, int op, u64 *amo_value_addr);
-enum xp_retval (*xp_get_partition_rsvd_page_pa)(u64 buf, u64 *cookie,
+enum xp_retval (*xp_get_partition_rsvd_page_pa) (u64 buf, u64 *cookie,
u64 *paddr, size_t *len);
-enum xp_retval (*xp_change_memprotect)(u64 paddr, size_t len, int request,
+enum xp_retval (*xp_change_memprotect) (u64 paddr, size_t len, int request,
u64 *nasid_array);
-void (*xp_change_memprotect_shub_wars_1_1)(int request);
-void (*xp_allow_IPI_ops)(void);
-void (*xp_disallow_IPI_ops)(void);
-
-int (*xp_cpu_to_nasid)(int cpuid);
-int (*xp_node_to_nasid)(int nid);
+void (*xp_change_memprotect_shub_wars_1_1) (int request);
+void (*xp_allow_IPI_ops) (void);
+void (*xp_disallow_IPI_ops) (void);
+int (*xp_cpu_to_nasid) (int cpuid);
+int (*xp_node_to_nasid) (int nid);
/*
* Initialize the XPC interface to indicate that XPC isn't loaded.
*/
-static enum xp_retval xpc_notloaded(void) { return xpNotLoaded; }
+static enum xp_retval
+xpc_notloaded(void)
+{
+ return xpNotLoaded;
+}
struct xpc_interface xpc_interface = {
- (void (*)(int)) xpc_notloaded,
- (void (*)(int)) xpc_notloaded,
- (enum xp_retval (*)(short, int, u32, void **)) xpc_notloaded,
- (enum xp_retval (*)(short, int, void *)) xpc_notloaded,
- (enum xp_retval (*)(short, int, void *, xpc_notify_func, void *))
- xpc_notloaded,
- (void (*)(short, int, void *)) xpc_notloaded,
- (enum xp_retval (*)(short, void *)) xpc_notloaded
+ (void (*)(int))xpc_notloaded,
+ (void (*)(int))xpc_notloaded,
+ (enum xp_retval(*)(short, int, u32, void **))xpc_notloaded,
+ (enum xp_retval(*)(short, int, void *))xpc_notloaded,
+ (enum xp_retval(*)(short, int, void *, xpc_notify_func, void *))
+ xpc_notloaded,
+ (void (*)(short, int, void *))xpc_notloaded,
+ (enum xp_retval(*)(short, void *))xpc_notloaded
};
-
/*
* XPC calls this when it (the XPC module) has been loaded.
*/
void
-xpc_set_interface(void (*connect)(int),
- void (*disconnect)(int),
- enum xp_retval (*allocate)(short, int, u32, void **),
- enum xp_retval (*send)(short, int, void *),
- enum xp_retval (*send_notify)(short, int, void *,
- xpc_notify_func, void *),
- void (*received)(short, int, void *),
- enum xp_retval (*partid_to_nasids)(short, void *))
+xpc_set_interface(void (*connect) (int),
+ void (*disconnect) (int),
+ enum xp_retval (*allocate) (short, int, u32, void **),
+ enum xp_retval (*send) (short, int, void *),
+ enum xp_retval (*send_notify) (short, int, void *,
+ xpc_notify_func, void *),
+ void (*received) (short, int, void *),
+ enum xp_retval (*partid_to_nasids) (short, void *))
{
xpc_interface.connect = connect;
xpc_interface.disconnect = disconnect;
@@ -113,35 +113,33 @@ xpc_set_interface(void (*connect)(int),
xpc_interface.partid_to_nasids = partid_to_nasids;
}
-
/*
* XPC calls this when it (the XPC module) is being unloaded.
*/
void
xpc_clear_interface(void)
{
- xpc_interface.connect = (void (*)(int)) xpc_notloaded;
- xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
- xpc_interface.allocate = (enum xp_retval (*)(short, int, u32,
- void **)) xpc_notloaded;
- xpc_interface.send = (enum xp_retval (*)(short, int, void *))
- xpc_notloaded;
- xpc_interface.send_notify = (enum xp_retval (*)(short, int, void *,
- xpc_notify_func, void *)) xpc_notloaded;
+ xpc_interface.connect = (void (*)(int))xpc_notloaded;
+ xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
+ xpc_interface.allocate = (enum xp_retval(*)(short, int, u32,
+ void **))xpc_notloaded;
+ xpc_interface.send = (enum xp_retval(*)(short, int, void *))
+ xpc_notloaded;
+ xpc_interface.send_notify = (enum xp_retval(*)(short, int, void *,
+ xpc_notify_func,
+ void *))xpc_notloaded;
xpc_interface.received = (void (*)(short, int, void *))
- xpc_notloaded;
- xpc_interface.partid_to_nasids = (enum xp_retval (*)(short, void *))
- xpc_notloaded;
+ xpc_notloaded;
+ xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
+ xpc_notloaded;
}
-
/*
* xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
* users of XPC.
*/
struct xpc_registration xpc_registrations[XPC_NCHANNELS];
-
/*
* Register for automatic establishment of a channel connection whenever
* a partition comes up.
@@ -168,11 +166,10 @@ struct xpc_registration xpc_registration
*/
enum xp_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
- u16 nentries, u32 assigned_limit, u32 idle_limit)
+ u16 nentries, u32 assigned_limit, u32 idle_limit)
{
struct xpc_registration *registration;
-
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
DBUG_ON(payload_size == 0 || nentries == 0);
DBUG_ON(func == NULL);
@@ -205,7 +202,6 @@ xpc_connect(int ch_number, xpc_channel_f
return xpSuccess;
}
-
/*
* Remove the registration for automatic connection of the specified channel
* when a partition comes up.
@@ -224,7 +220,6 @@ xpc_disconnect(int ch_number)
{
struct xpc_registration *registration;
-
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
registration = &xpc_registrations[ch_number];
@@ -284,6 +279,7 @@ xp_init(void)
return 0;
}
+
module_init(xp_init);
extern void xp_exit_sn2(void);
@@ -297,8 +293,8 @@ xp_exit(void)
else if (is_uv())
xp_exit_uv();
}
-module_exit(xp_exit);
+module_exit(xp_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition (XP) base");
@@ -330,4 +326,3 @@ EXPORT_SYMBOL(xpc_clear_interface);
EXPORT_SYMBOL(xpc_set_interface);
EXPORT_SYMBOL(xpc_connect);
EXPORT_SYMBOL(xpc_disconnect);
-
Index: linux-2.6/drivers/misc/xp/xp_sn2.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xp_sn2.c 2008-03-21 13:30:52.030174502 -0500
+++ linux-2.6/drivers/misc/xp/xp_sn2.c 2008-03-24 19:31:37.154185684 -0500
@@ -67,7 +67,7 @@ xp_unregister_nofault_code_sn2(void)
/* unregister the PIO read nofault code region */
(void)sn_register_nofault_code(func_addr, err_func_addr,
- err_func_addr, 1, 0);
+ err_func_addr, 1, 0);
}
/*
@@ -155,15 +155,14 @@ xp_free_amos_sn2(u64 *amos_page, int n_a
{
int n_pages = DIV_ROUND_UP(n_amos * xp_sizeof_amo, PAGE_SIZE);
- uncached_free_page(__IA64_UNCACHED_OFFSET | TO_PHYS((u64) amos_page),
+ uncached_free_page(__IA64_UNCACHED_OFFSET | TO_PHYS((u64)amos_page),
n_pages);
}
-
static enum xp_retval
xp_set_amo_sn2(u64 *amo_va, int op, u64 operand, int remote)
{
- unsigned long irq_flags = irq_flags; /* eliminate compiler warning */
+ unsigned long irq_flags = irq_flags; /* eliminate compiler warning */
int ret = xpSuccess;
/* >>> eliminate remote arg and xp_nofault_PIOR() call */
@@ -188,7 +187,8 @@ xp_set_amo_sn2(u64 *amo_va, int op, u64
* it until the heartbeat times out.
*/
if (xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(amo_va),
- xp_nofault_PIOR_target)) != 0)
+ xp_nofault_PIOR_target))
+ != 0)
ret = xpPioReadError;
local_irq_restore(irq_flags);
@@ -201,7 +201,7 @@ static enum xp_retval
xp_set_amo_with_interrupt_sn2(u64 *amo_va, int op, u64 operand, int remote,
int nasid, int phys_cpuid, int vector)
{
- unsigned long irq_flags = irq_flags; /* eliminate compiler warning */
+ unsigned long irq_flags = irq_flags; /* eliminate compiler warning */
int ret = xpSuccess;
if (op == XP_AMO_AND)
@@ -226,7 +226,8 @@ xp_set_amo_with_interrupt_sn2(u64 *amo_v
* it until the heartbeat times out.
*/
if (xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(amo_va),
- xp_nofault_PIOR_target)) != 0)
+ xp_nofault_PIOR_target))
+ != 0)
ret = xpPioReadError;
local_irq_restore(irq_flags);
@@ -321,22 +322,28 @@ xp_change_memprotect_shub_wars_1_1_sn2(i
nasid = cnodeid_to_nasid(node);
/* save current protection values */
xpc_prot_vec[node] =
- (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0));
+ (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0));
/* open up everything */
HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0), -1UL);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQRP_MMR_DIR_PRIVEC0), -1UL);
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+ -1UL);
+ HUB_S((u64 *)
+ GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQRP_MMR_DIR_PRIVEC0),
+ -1UL);
}
} else if (request == XP_MEMPROT_DISALLOW_ALL) {
for_each_online_node(node) {
nasid = cnodeid_to_nasid(node);
/* restore original protection values */
HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0), xpc_prot_vec[node]);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQRP_MMR_DIR_PRIVEC0), xpc_prot_vec[node]);
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+ xpc_prot_vec[node]);
+ HUB_S((u64 *)
+ GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQRP_MMR_DIR_PRIVEC0),
+ xpc_prot_vec[node]);
}
} else
BUG();
@@ -361,13 +368,13 @@ xp_allow_IPI_ops_sn2(void)
/* >>> The following should get moved into SAL. */
if (is_shub2()) {
xpc_sh2_IPI_access0 =
- (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
xpc_sh2_IPI_access1 =
- (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
xpc_sh2_IPI_access2 =
- (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
xpc_sh2_IPI_access3 =
- (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
for_each_online_node(node) {
nasid = cnodeid_to_nasid(node);
@@ -382,7 +389,7 @@ xp_allow_IPI_ops_sn2(void)
}
} else {
xpc_sh1_IPI_access =
- (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
for_each_online_node(node) {
nasid = cnodeid_to_nasid(node);
@@ -455,7 +462,7 @@ xp_init_sn2(void)
* reflect its existence.
*/
BUG_ON(offsetof(AMO_t, variable) != 0);
- BUG_ON(sizeof(((AMO_t *)NULL)->variable) != sizeof(u64));
+ BUG_ON(sizeof(((AMO_t *) NULL)->variable) != sizeof(u64));
xp_sizeof_amo = sizeof(AMO_t);
xp_alloc_amos = xp_alloc_amos_sn2;
xp_free_amos = xp_free_amos_sn2;
@@ -467,7 +474,7 @@ xp_init_sn2(void)
xp_change_memprotect = xp_change_memprotect_sn2;
xp_change_memprotect_shub_wars_1_1 =
- xp_change_memprotect_shub_wars_1_1_sn2;
+ xp_change_memprotect_shub_wars_1_1_sn2;
xp_allow_IPI_ops = xp_allow_IPI_ops_sn2;
xp_disallow_IPI_ops = xp_disallow_IPI_ops_sn2;
@@ -484,4 +491,3 @@ xp_exit_sn2(void)
xp_unregister_nofault_code_sn2();
}
-
Index: linux-2.6/drivers/misc/xp/xp_uv.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xp_uv.c 2008-03-21 13:30:52.030174502 -0500
+++ linux-2.6/drivers/misc/xp/xp_uv.c 2008-03-24 19:31:37.154185684 -0500
@@ -154,8 +154,8 @@ xp_init_uv(void)
{
BUG_ON(!is_uv());
- xp_partition_id = 0; /* >>> not correct value */
- xp_region_size = 0; /* >>> not correct value */
+ xp_partition_id = 0; /* >>> not correct value */
+ xp_region_size = 0; /* >>> not correct value */
xp_rtc_cycles_per_second = 0; /* >>> not correct value */
xp_remote_memcpy = xp_remote_memcpy_uv;
@@ -174,7 +174,7 @@ xp_init_uv(void)
xp_change_memprotect = xp_change_memprotect_uv;
xp_change_memprotect_shub_wars_1_1 =
- xp_change_memprotect_shub_wars_1_1_uv;
+ xp_change_memprotect_shub_wars_1_1_uv;
xp_allow_IPI_ops = xp_allow_IPI_ops_uv;
xp_disallow_IPI_ops = xp_disallow_IPI_ops_uv;
@@ -191,4 +191,3 @@ xp_exit_uv(void)
xp_unregister_nofault_code_uv();
}
-
Index: linux-2.6/drivers/misc/xp/xpc.h
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc.h 2008-03-21 13:30:52.030174502 -0500
+++ linux-2.6/drivers/misc/xp/xpc.h 2008-03-24 19:31:37.154185684 -0500
@@ -6,7 +6,6 @@
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
-
/*
* Cross Partition Communication (XPC) structures and macros.
*/
@@ -14,7 +13,6 @@
#ifndef _DRIVERS_MISC_XP_XPC_H
#define _DRIVERS_MISC_XP_XPC_H
-
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/device.h>
@@ -32,7 +30,6 @@
#endif
#include "xp.h"
-
/*
* XPC Version numbers consist of a major and minor number. XPC can always
* talk to versions with same major #, and never talk to versions with a
@@ -42,7 +39,6 @@
#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
-
/*
* The next macros define word or bit representations for given
* C-brick nasid in either the SAL provided bit array representing
@@ -66,7 +62,6 @@
/* define the process name of the discovery thread */
#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
-
/*
* the reserved page
*
@@ -120,7 +115,7 @@ struct xpc_rsvd_page {
u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */
};
-#define XPC_RP_VERSION _XPC_VERSION(2,0) /* version 2.0 of the reserved page */
+#define XPC_RP_VERSION _XPC_VERSION(2,0) /* version 2.0 of the reserved page */
#define XPC_SUPPORTS_RP_STAMP(_version) \
(_version >= _XPC_VERSION(1,1))
@@ -137,14 +132,12 @@ xpc_compare_stamps(struct timespec *stam
{
int ret;
-
if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
ret = stamp1->tv_nsec - stamp2->tv_nsec;
}
return ret;
}
-
/*
* Define the structures by which XPC variables can be exported to other
* partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
@@ -173,12 +166,11 @@ struct xpc_vars {
u64 heartbeating_to_mask[BITS_TO_LONGS(XP_MAX_NPARTITIONS)];
};
-#define XPC_V_VERSION _XPC_VERSION(4,0) /* version 4.0 of the cross vars */
+#define XPC_V_VERSION _XPC_VERSION(4,0) /* version 4.0 of the cross vars */
#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
(_version >= _XPC_VERSION(3,1))
-
static inline int
xpc_hb_allowed(short partid, struct xpc_vars *vars)
{
@@ -214,7 +206,6 @@ xpc_disallow_all_hbs(struct xpc_vars *va
vars->heartbeating_to_mask[i] = 0;
}
-
/*
* The AMOs page(s) consists of a number of AMO variables which are divided into
* four groups, The first group consists of one AMO per partition, each of which
@@ -254,7 +245,7 @@ static inline int
xpc_disengage_request_amos(int npartitions)
{
return xpc_engaged_partitions_amos(npartitions) +
- xp_partid_mask_words(npartitions);
+ xp_partid_mask_words(npartitions);
}
/* get total number of AMOs */
@@ -262,10 +253,9 @@ static inline int
xpc_number_of_amos(int npartitions)
{
return xpc_disengage_request_amos(npartitions) +
- xp_partid_mask_words(npartitions);
+ xp_partid_mask_words(npartitions);
}
-
/*
* The following structure describes the per partition specific variables.
*
@@ -300,9 +290,8 @@ struct xpc_vars_part {
* MAGIC2 indicates that this partition has pulled the remote partititions
* per partition variables that pertain to this partition.
*/
-#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
-#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
-
+#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
+#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
/* the reserved page sizes and offsets */
@@ -316,7 +305,6 @@ struct xpc_vars_part {
xp_nasid_mask_words())
#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *)((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE)
-
/*
* Functions registered by add_timer() or called by kernel_thread() only
* allow for a single 64-bit argument. The following macros can be used to
@@ -330,8 +318,6 @@ struct xpc_vars_part {
#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
-
-
/*
* Define a Get/Put value pair (pointers) used with a message queue.
*/
@@ -343,8 +329,6 @@ struct xpc_gp {
#define XPC_GP_SIZE \
L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
-
-
/*
* Define a structure that contains arguments associated with opening and
* closing a channel.
@@ -360,20 +344,15 @@ struct xpc_openclose_args {
#define XPC_OPENCLOSE_ARGS_SIZE \
L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
-
-
/* struct xpc_msg flags */
#define XPC_M_DONE 0x01 /* msg has been received/consumed */
#define XPC_M_READY 0x02 /* msg is ready to be sent */
#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */
-
#define XPC_MSG_ADDRESS(_payload) \
((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
-
-
/*
* Defines notify entry.
*
@@ -381,19 +360,17 @@ struct xpc_openclose_args {
* and consumed by the intended recipient.
*/
struct xpc_notify {
- volatile u8 type; /* type of notification */
+ volatile u8 type; /* type of notification */
/* the following two fields are only used if type == XPC_N_CALL */
- xpc_notify_func func; /* user's notify function */
- void *key; /* pointer to user's key */
+ xpc_notify_func func; /* user's notify function */
+ void *key; /* pointer to user's key */
};
/* struct xpc_notify type of notification */
#define XPC_N_CALL 0x01 /* notify function provided by user */
-
-
/*
* Define the structure that manages all the stuff required by a channel. In
* particular, they are used to manage the messages sent across the channel.
@@ -473,48 +450,48 @@ struct xpc_notify {
* messages.
*/
struct xpc_channel {
- short partid; /* ID of remote partition connected */
- spinlock_t lock; /* lock for updating this structure */
- u32 flags; /* general flags */
-
- enum xp_retval reason; /* reason why channel is disconnect'g */
- int reason_line; /* line# disconnect initiated from */
-
- u16 number; /* channel # */
-
- u16 msg_size; /* sizeof each msg entry */
- u16 local_nentries; /* #of msg entries in local msg queue */
- u16 remote_nentries; /* #of msg entries in remote msg queue*/
+ short partid; /* ID of remote partition connected */
+ spinlock_t lock; /* lock for updating this structure */
+ u32 flags; /* general flags */
+
+ enum xp_retval reason; /* reason why channel is disconnect'g */
+ int reason_line; /* line# disconnect initiated from */
+
+ u16 number; /* channel # */
+
+ u16 msg_size; /* sizeof each msg entry */
+ u16 local_nentries; /* #of msg entries in local msg queue */
+ u16 remote_nentries; /* #of msg entries in remote msg queue */
void *local_msgqueue_base; /* base address of kmalloc'd space */
struct xpc_msg *local_msgqueue; /* local message queue */
void *remote_msgqueue_base; /* base address of kmalloc'd space */
- struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
- /* local message queue */
- u64 remote_msgqueue_pa; /* phys addr of remote partition's */
- /* local message queue */
+ struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
+ /* local message queue */
+ u64 remote_msgqueue_pa; /* phys addr of remote partition's */
+ /* local message queue */
- atomic_t references; /* #of external references to queues */
+ atomic_t references; /* #of external references to queues */
- atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
- wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
+ atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
+ wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
- u8 delayed_IPI_flags; /* IPI flags received, but delayed */
- /* action until channel disconnected */
+ u8 delayed_IPI_flags; /* IPI flags received, but delayed */
+ /* action until channel disconnected */
/* queue of msg senders who want to be notified when msg received */
- atomic_t n_to_notify; /* #of msg senders to notify */
- struct xpc_notify *notify_queue;/* notify queue for messages sent */
+ atomic_t n_to_notify; /* #of msg senders to notify */
+ struct xpc_notify *notify_queue; /* notify queue for messages sent */
- xpc_channel_func func; /* user's channel function */
- void *key; /* pointer to user's key */
+ xpc_channel_func func; /* user's channel function */
+ void *key; /* pointer to user's key */
struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
- struct completion wdisconnect_wait; /* wait for channel disconnect */
+ struct completion wdisconnect_wait; /* wait for channel disconnect */
- struct xpc_openclose_args *local_openclose_args; /* args passed on */
- /* opening or closing of channel */
+ struct xpc_openclose_args *local_openclose_args; /* args passed on */
+ /* opening or closing of channel */
/* various flavors of local and remote Get/Put values */
@@ -522,13 +499,13 @@ struct xpc_channel {
struct xpc_gp remote_GP; /* remote Get/Put values */
struct xpc_gp w_local_GP; /* working local Get/Put values */
struct xpc_gp w_remote_GP; /* working remote Get/Put values */
- s64 next_msg_to_pull; /* Put value of next msg to pull */
+ s64 next_msg_to_pull; /* Put value of next msg to pull */
/* kthread management related fields */
atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
- u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
- atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
+ u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
+ atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
u32 kthreads_idle_limit; /* limit on #of kthreads idle */
atomic_t kthreads_active; /* #of kthreads actively working */
@@ -536,37 +513,34 @@ struct xpc_channel {
} ____cacheline_aligned;
-
/* struct xpc_channel flags */
-#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
+#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
-#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
-#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
-#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
-#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
+#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
+#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
+#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
+#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
-#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
-#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
+#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
+#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
#define XPC_C_CONNECTEDCALLOUT_MADE \
- 0x00000080 /* connected callout completed */
-#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
-#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
-
-#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
-#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
-#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
-#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
+ 0x00000080 /* connected callout completed */
+#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
+#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
+
+#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
+#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
+#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
+#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
-#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
-#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
+#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
+#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
#define XPC_C_DISCONNECTINGCALLOUT \
- 0x00010000 /* disconnecting callout initiated */
+ 0x00010000 /* disconnecting callout initiated */
#define XPC_C_DISCONNECTINGCALLOUT_MADE \
- 0x00020000 /* disconnecting callout completed */
-#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
-
-
+ 0x00020000 /* disconnecting callout completed */
+#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
/*
* Manages channels on a partition basis. There is one of these structures
@@ -577,36 +551,34 @@ struct xpc_partition {
/* XPC HB infrastructure */
- u8 remote_rp_version; /* version# of partition's rsvd pg */
+ u8 remote_rp_version; /* version# of partition's rsvd pg */
short remote_npartitions; /* value of XPC_NPARTITIONS */
- u32 flags; /* general flags */
- struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */
- u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
- u64 remote_vars_pa; /* phys addr of partition's vars */
+ u32 flags; /* general flags */
+ struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */
+ u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
+ u64 remote_vars_pa; /* phys addr of partition's vars */
u64 remote_vars_part_pa; /* phys addr of partition's vars part */
- u64 last_heartbeat; /* HB at last read */
+ u64 last_heartbeat; /* HB at last read */
u64 remote_amos_page_pa; /* phys addr of partition's amos page */
- int remote_act_nasid; /* active part's act/deact nasid */
+ int remote_act_nasid; /* active part's act/deact nasid */
int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
- u32 act_IRQ_rcvd; /* IRQs since activation */
- spinlock_t lock; /* protect updating of act_state and */
- /* the general flags */
- u8 act_state; /* from XPC HB viewpoint */
- u8 remote_vars_version; /* version# of partition's vars */
- enum xp_retval reason; /* reason partition is deactivating */
- int reason_line; /* line# deactivation initiated from */
- int reactivate_nasid; /* nasid in partition to reactivate */
+ u32 act_IRQ_rcvd; /* IRQs since activation */
+ spinlock_t lock; /* protect updating of act_state and */
+ /* the general flags */
+ u8 act_state; /* from XPC HB viewpoint */
+ u8 remote_vars_version; /* version# of partition's vars */
+ enum xp_retval reason; /* reason partition is deactivating */
+ int reason_line; /* line# deactivation initiated from */
+ int reactivate_nasid; /* nasid in partition to reactivate */
- unsigned long disengage_request_timeout; /* timeout in jiffies */
+ unsigned long disengage_request_timeout; /* timeout in jiffies */
struct timer_list disengage_request_timer;
-
/* XPC infrastructure referencing and teardown control */
volatile u8 setup_state; /* infrastructure setup state */
wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
- atomic_t references; /* #of references to infrastructure */
-
+ atomic_t references; /* #of references to infrastructure */
/*
* NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
@@ -615,55 +587,51 @@ struct xpc_partition {
* 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
*/
-
- u8 nchannels; /* #of defined channels supported */
- atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
- atomic_t nchannels_engaged;/* #of channels engaged with remote part */
- struct xpc_channel *channels;/* array of channel structures */
-
- void *local_GPs_base; /* base address of kmalloc'd space */
- struct xpc_gp *local_GPs; /* local Get/Put values */
- void *remote_GPs_base; /* base address of kmalloc'd space */
- struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
- /* values */
- u64 remote_GPs_pa; /* phys address of remote partition's local */
- /* Get/Put values */
-
+ u8 nchannels; /* #of defined channels supported */
+ atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
+ atomic_t nchannels_engaged; /* #of channels engaged with remote part */
+ struct xpc_channel *channels; /* array of channel structures */
+
+ void *local_GPs_base; /* base address of kmalloc'd space */
+ struct xpc_gp *local_GPs; /* local Get/Put values */
+ void *remote_GPs_base; /* base address of kmalloc'd space */
+ struct xpc_gp *remote_GPs; /* copy of remote partition's local Get/Put */
+ /* values */
+ u64 remote_GPs_pa; /* phys address of remote partition's local */
+ /* Get/Put values */
/* fields used to pass args when opening or closing a channel */
- void *local_openclose_args_base; /* base address of kmalloc'd space */
- struct xpc_openclose_args *local_openclose_args; /* local's args */
- void *remote_openclose_args_base; /* base address of kmalloc'd space */
- struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
- /* args */
- u64 remote_openclose_args_pa; /* phys addr of remote's args */
-
+ void *local_openclose_args_base; /* base address of kmalloc'd space */
+ struct xpc_openclose_args *local_openclose_args; /* local's args */
+ void *remote_openclose_args_base; /* base address of kmalloc'd space */
+ struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
+ /* args */
+ u64 remote_openclose_args_pa; /* phys addr of remote's args */
/* IPI sending, receiving and handling related fields */
- int remote_IPI_nasid; /* nasid of where to send IPIs */
- int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
- u64 *remote_IPI_amo_va; /* address of remote IPI AMO variable */
-
- u64 *local_IPI_amo_va; /* address of IPI AMO variable */
- u64 local_IPI_amo; /* IPI amo flags yet to be handled */
- char IPI_owner[8]; /* IPI owner's name */
- struct timer_list dropped_IPI_timer; /* dropped IPI timer */
-
- spinlock_t IPI_lock; /* IPI handler lock */
+ int remote_IPI_nasid; /* nasid of where to send IPIs */
+ int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
+ u64 *remote_IPI_amo_va; /* address of remote IPI AMO variable */
+
+ u64 *local_IPI_amo_va; /* address of IPI AMO variable */
+ u64 local_IPI_amo; /* IPI amo flags yet to be handled */
+ char IPI_owner[8]; /* IPI owner's name */
+ struct timer_list dropped_IPI_timer; /* dropped IPI timer */
+ spinlock_t IPI_lock; /* IPI handler lock */
/* channel manager related fields */
atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
- wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
+ wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
} ____cacheline_aligned;
/* struct xpc_partition flags */
-#define XPC_P_RAMOSREGISTERED 0x00000001 /* remote AMOs were registered */
+#define XPC_P_RAMOSREGISTERED 0x00000001 /* remote AMOs were registered */
/* struct xpc_partition act_state values (for XPC HB) */
@@ -673,11 +641,9 @@ struct xpc_partition {
#define XPC_P_AS_ACTIVE 0x03 /* xpc_partition_up() was called */
#define XPC_P_AS_DEACTIVATING 0x04 /* partition deactivation initiated */
-
#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
xpc_deactivate_partition(__LINE__, (_p), (_reason))
-
/* struct xpc_partition setup_state values */
#define XPC_P_SS_UNSET 0x00 /* infrastructure was never setup */
@@ -685,8 +651,6 @@ struct xpc_partition {
#define XPC_P_SS_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
#define XPC_P_SS_TORNDOWN 0x03 /* infrastructure is torndown */
-
-
/*
* struct xpc_partition IPI_timer #of seconds to wait before checking for
* dropped IPIs. These occur whenever an IPI amo write doesn't complete until
@@ -694,22 +658,17 @@ struct xpc_partition {
*/
#define XPC_DROPPED_IPI_WAIT_INTERVAL (0.25 * HZ)
-
/* number of seconds to wait for other partitions to disengage */
#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90
/* interval in seconds to print 'waiting disengagement' messages */
#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
-
#define XPC_PARTID(_p) ((short) ((_p) - &xpc_partitions[0]))
-
-
/* found in xp_main.c */
extern struct xpc_registration xpc_registrations[];
-
/* found in xpc_main.c */
extern struct device *xpc_part;
extern struct device *xpc_chan;
@@ -722,7 +681,6 @@ extern void xpc_activate_kthreads(struct
extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_disconnect_wait(int);
-
/* found in xpc_partition.c */
extern int xpc_exiting;
extern struct xpc_vars *xpc_vars;
@@ -737,7 +695,7 @@ extern int xpc_identify_act_IRQ_sender(v
extern int xpc_partition_disengaged(struct xpc_partition *);
extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
extern void xpc_deactivate_partition(const int, struct xpc_partition *,
- enum xp_retval);
+ enum xp_retval);
extern void xpc_mark_partition_inactive(struct xpc_partition *);
extern enum xp_retval xpc_register_remote_amos(struct xpc_partition *);
extern void xpc_unregister_remote_amos(struct xpc_partition *);
@@ -745,14 +703,13 @@ extern void xpc_discovery(void);
extern void xpc_check_remote_hb(void);
extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
-
/* found in xpc_channel.c */
extern void xpc_initiate_connect(int);
extern void xpc_initiate_disconnect(int);
extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **);
extern enum xp_retval xpc_initiate_send(short, int, void *);
extern enum xp_retval xpc_initiate_send_notify(short, int, void *,
- xpc_notify_func, void *);
+ xpc_notify_func, void *);
extern void xpc_initiate_received(short, int, void *);
extern enum xp_retval xpc_setup_infrastructure(struct xpc_partition *);
extern enum xp_retval xpc_pull_remote_vars_part(struct xpc_partition *);
@@ -760,13 +717,11 @@ extern void xpc_process_channel_activity
extern void xpc_connected_callout(struct xpc_channel *);
extern void xpc_deliver_msg(struct xpc_channel *);
extern void xpc_disconnect_channel(const int, struct xpc_channel *,
- enum xp_retval, unsigned long *);
+ enum xp_retval, unsigned long *);
extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
extern void xpc_teardown_infrastructure(struct xpc_partition *);
-
-
static inline void
xpc_wakeup_channel_mgr(struct xpc_partition *part)
{
@@ -775,8 +730,6 @@ xpc_wakeup_channel_mgr(struct xpc_partit
}
}
-
-
/*
* These next two inlines are used to keep us from tearing down a channel's
* msg queues while a thread may be referencing them.
@@ -798,12 +751,9 @@ xpc_msgqueue_deref(struct xpc_channel *c
}
}
-
-
#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
-
/*
* These two inlines are used to keep us from tearing down a partition's
* setup infrastructure while a thread may be referencing it.
@@ -813,7 +763,6 @@ xpc_part_deref(struct xpc_partition *par
{
s32 refs = atomic_dec_return(&part->references);
-
DBUG_ON(refs < 0);
if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN) {
wake_up(&part->teardown_wq);
@@ -825,7 +774,6 @@ xpc_part_ref(struct xpc_partition *part)
{
int setup;
-
atomic_inc(&part->references);
setup = (part->setup_state == XPC_P_SS_SETUP);
if (!setup) {
@@ -834,8 +782,6 @@ xpc_part_ref(struct xpc_partition *part)
return setup;
}
-
-
/*
* The following macro is to be used for the setting of the reason and
* reason_line fields in both the struct xpc_channel and struct xpc_partition
@@ -847,8 +793,6 @@ xpc_part_ref(struct xpc_partition *part)
(_p)->reason_line = _line; \
}
-
-
/*
* This next set of inlines are used to keep track of when a partition is
* potentially engaged in accessing memory belonging to another partition.
@@ -858,8 +802,9 @@ static inline void
xpc_mark_partition_engaged(struct xpc_partition *part)
{
u64 *amo_va = __va(part->remote_amos_page_pa +
- (xpc_engaged_partitions_amos(part->remote_npartitions) +
- BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
+ (xpc_engaged_partitions_amos
+ (part->remote_npartitions) +
+ BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
/* set bit corresponding to our partid in remote partition's AMO */
(void)xp_set_amo(amo_va, XP_AMO_OR, BIT_MASK(xp_partition_id), 1);
@@ -869,8 +814,9 @@ static inline void
xpc_mark_partition_disengaged(struct xpc_partition *part)
{
u64 *amo_va = __va(part->remote_amos_page_pa +
- (xpc_engaged_partitions_amos(part->remote_npartitions) +
- BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
+ (xpc_engaged_partitions_amos
+ (part->remote_npartitions) +
+ BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
/* clear bit corresponding to our partid in remote partition's AMO */
(void)xp_set_amo(amo_va, XP_AMO_AND, ~BIT_MASK(xp_partition_id), 1);
@@ -880,8 +826,8 @@ static inline void
xpc_request_partition_disengage(struct xpc_partition *part)
{
u64 *amo_va = __va(part->remote_amos_page_pa +
- (xpc_disengage_request_amos(part->remote_npartitions) +
- BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
+ (xpc_disengage_request_amos(part->remote_npartitions)
+ + BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
/* set bit corresponding to our partid in remote partition's AMO */
(void)xp_set_amo(amo_va, XP_AMO_OR, BIT_MASK(xp_partition_id), 1);
@@ -891,8 +837,8 @@ static inline void
xpc_cancel_partition_disengage_request(struct xpc_partition *part)
{
u64 *amo_va = __va(part->remote_amos_page_pa +
- (xpc_disengage_request_amos(part->remote_npartitions) +
- BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
+ (xpc_disengage_request_amos(part->remote_npartitions)
+ + BIT_WORD(xp_partition_id)) * xp_sizeof_amo);
/* clear bit corresponding to our partid in remote partition's AMO */
(void)xp_set_amo(amo_va, XP_AMO_AND, ~BIT_MASK(xp_partition_id), 1);
@@ -904,14 +850,15 @@ xpc_any_partition_engaged(void)
enum xp_retval ret;
int w_index;
u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page +
- xpc_engaged_partitions_amos(xpc_vars->npartitions) *
- xp_sizeof_amo);
+ xpc_engaged_partitions_amos(xpc_vars->
+ npartitions) *
+ xp_sizeof_amo);
u64 amo;
for (w_index = 0; w_index < xp_partid_mask_words(xpc_vars->npartitions);
w_index++) {
ret = xp_get_amo(amo_va, XP_AMO_LOAD, &amo);
- BUG_ON(ret != xpSuccess); /* should never happen */
+ BUG_ON(ret != xpSuccess); /* should never happen */
if (amo != 0)
return 1;
@@ -925,13 +872,14 @@ xpc_partition_engaged(short partid)
{
enum xp_retval ret;
u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page +
- (xpc_engaged_partitions_amos(xpc_vars->npartitions) +
- BIT_WORD(partid)) * xp_sizeof_amo);
+ (xpc_engaged_partitions_amos
+ (xpc_vars->npartitions) +
+ BIT_WORD(partid)) * xp_sizeof_amo);
u64 amo;
/* return our partition's AMO variable ANDed with partid mask */
ret = xp_get_amo(amo_va, XP_AMO_LOAD, &amo);
- BUG_ON(ret != xpSuccess); /* should never happen */
+ BUG_ON(ret != xpSuccess); /* should never happen */
return (amo & BIT_MASK(partid));
}
@@ -940,13 +888,14 @@ xpc_partition_disengage_requested(short
{
enum xp_retval ret;
u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page +
- (xpc_disengage_request_amos(xpc_vars->npartitions) +
- BIT_WORD(partid)) * xp_sizeof_amo);
+ (xpc_disengage_request_amos
+ (xpc_vars->npartitions) +
+ BIT_WORD(partid)) * xp_sizeof_amo);
u64 amo;
/* return our partition's AMO variable ANDed with partid mask */
ret = xp_get_amo(amo_va, XP_AMO_LOAD, &amo);
- BUG_ON(ret != xpSuccess); /* should never happen */
+ BUG_ON(ret != xpSuccess); /* should never happen */
return (amo & BIT_MASK(partid));
}
@@ -955,12 +904,13 @@ xpc_clear_partition_engaged(short partid
{
enum xp_retval ret;
u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page +
- (xpc_engaged_partitions_amos(xpc_vars->npartitions) +
- BIT_WORD(partid)) * xp_sizeof_amo);
+ (xpc_engaged_partitions_amos
+ (xpc_vars->npartitions) +
+ BIT_WORD(partid)) * xp_sizeof_amo);
/* clear bit corresponding to partid in our partition's AMO */
ret = xp_set_amo(amo_va, XP_AMO_AND, ~BIT_MASK(partid), 0);
- BUG_ON(ret != xpSuccess); /* should never happen */
+ BUG_ON(ret != xpSuccess); /* should never happen */
}
static inline void
@@ -968,16 +918,15 @@ xpc_clear_partition_disengage_request(sh
{
enum xp_retval ret;
u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page +
- (xpc_disengage_request_amos(xpc_vars->npartitions) +
- BIT_WORD(partid)) * xp_sizeof_amo);
+ (xpc_disengage_request_amos
+ (xpc_vars->npartitions) +
+ BIT_WORD(partid)) * xp_sizeof_amo);
/* clear bit corresponding to partid in our partition's AMO */
ret = xp_set_amo(amo_va, XP_AMO_AND, ~BIT_MASK(partid), 0);
- BUG_ON(ret != xpSuccess); /* should never happen */
+ BUG_ON(ret != xpSuccess); /* should never happen */
}
-
-
/*
* The following set of macros and inlines are used for the sending and
* receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
@@ -1000,13 +949,13 @@ xpc_activate_IRQ_send(u64 amos_page_pa,
/* SN nodes are always even numbered nasids */
u64 *amo_va = (u64 *)__va(amos_page_pa +
(xpc_activate_irq_amos(npartitions) +
- BIT_WORD(from_nasid/2)) * xp_sizeof_amo);
+ BIT_WORD(from_nasid / 2)) * xp_sizeof_amo);
ret = xp_set_amo_with_interrupt(amo_va, XP_AMO_OR,
- BIT_MASK(from_nasid/2),
+ BIT_MASK(from_nasid / 2),
remote_amo, to_nasid,
to_phys_cpuid, SGI_XPC_ACTIVATE);
- BUG_ON(!remote_amo && ret != xpSuccess); /* should never happen*/
+ BUG_ON(!remote_amo && ret != xpSuccess); /* should never happen */
}
static inline void
@@ -1043,7 +992,6 @@ xpc_IPI_send_disengage(struct xpc_partit
part->remote_npartitions);
}
-
/*
* IPIs associated with SGI_XPC_NOTIFY IRQ.
*/
@@ -1057,12 +1005,11 @@ xpc_IPI_send_disengage(struct xpc_partit
static inline void
xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
- unsigned long *irq_flags)
+ unsigned long *irq_flags)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
enum xp_retval ret;
-
if (unlikely(part->act_state == XPC_P_AS_DEACTIVATING))
return;
@@ -1082,7 +1029,6 @@ xpc_notify_IRQ_send(struct xpc_channel *
}
}
-
/*
* Make it look like the remote partition, which is associated with the
* specified channel, sent us an IPI. This faked IPI will be handled
@@ -1093,21 +1039,21 @@ xpc_notify_IRQ_send(struct xpc_channel *
static inline void
xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
- char *ipi_flag_string)
+ char *ipi_flag_string)
{
enum xp_retval ret;
u64 *amo_va = xpc_partitions[ch->partid].local_IPI_amo_va;
/* set IPI flag corresponding to channel in partition's local AMO */
- ret = xp_set_amo(amo_va, XP_AMO_OR, ((u64)ipi_flag << (ch->number * 8)),
- 0);
- BUG_ON(ret != xpSuccess); /* should never happen */
+ ret =
+ xp_set_amo(amo_va, XP_AMO_OR, ((u64)ipi_flag << (ch->number * 8)),
+ 0);
+ BUG_ON(ret != xpSuccess); /* should never happen */
dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
ipi_flag_string, ch->partid, ch->number);
}
-
/*
* The sending and receiving of IPIs includes the setting of an AMO variable
* to indicate the reason the IPI was sent. The 64-bit variable is divided
@@ -1122,7 +1068,6 @@ xpc_notify_IRQ_send_local(struct xpc_cha
#define XPC_IPI_OPENREPLY 0x08
#define XPC_IPI_MSGREQUEST 0x10
-
/* given an AMO variable and a channel#, get its associated IPI flags */
#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
@@ -1130,13 +1075,11 @@ xpc_notify_IRQ_send_local(struct xpc_cha
#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL)
-
static inline void
xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_openclose_args *args = ch->local_openclose_args;
-
args->reason = ch->reason;
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
@@ -1153,7 +1096,6 @@ xpc_IPI_send_openrequest(struct xpc_chan
{
struct xpc_openclose_args *args = ch->local_openclose_args;
-
args->msg_size = ch->msg_size;
args->local_nentries = ch->local_nentries;
@@ -1165,7 +1107,6 @@ xpc_IPI_send_openreply(struct xpc_channe
{
struct xpc_openclose_args *args = ch->local_openclose_args;
-
args->remote_nentries = ch->remote_nentries;
args->local_nentries = ch->local_nentries;
args->local_msgqueue_pa = __pa(ch->local_msgqueue);
@@ -1185,21 +1126,17 @@ xpc_IPI_send_local_msgrequest(struct xpc
XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
}
-
static inline u64 *
xpc_IPI_init(int index)
{
enum xp_retval ret;
- u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page + index *
- xp_sizeof_amo);
+ u64 *amo_va = (u64 *)((u64)xpc_vars->amos_page + index * xp_sizeof_amo);
ret = xp_get_amo(amo_va, XP_AMO_CLEAR, NULL);
- BUG_ON(ret != xpSuccess); /* should never happen */
+ BUG_ON(ret != xpSuccess); /* should never happen */
return amo_va;
}
-
-
/*
* Check to see if there is any channel activity to/from the specified
* partition.
@@ -1211,9 +1148,8 @@ xpc_check_for_channel_activity(struct xp
u64 IPI_amo;
unsigned long irq_flags;
-
ret = xp_get_amo(part->local_IPI_amo_va, XP_AMO_CLEAR, &IPI_amo);
- BUG_ON(ret != xpSuccess); /* should never happen */
+ BUG_ON(ret != xpSuccess); /* should never happen */
if (IPI_amo == 0) {
return;
}
@@ -1228,6 +1164,4 @@ xpc_check_for_channel_activity(struct xp
xpc_wakeup_channel_mgr(part);
}
-
#endif /* _DRIVERS_MISC_XP_XPC_H */
-
Index: linux-2.6/drivers/misc/xp/xpc_channel.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc_channel.c 2008-03-21 13:30:52.034175000 -0500
+++ linux-2.6/drivers/misc/xp/xpc_channel.c 2008-03-24 19:31:37.154185684 -0500
@@ -6,7 +6,6 @@
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
-
/*
* Cross Partition Communication (XPC) channel support.
*
@@ -15,7 +14,6 @@
*
*/
-
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -29,7 +27,6 @@
#define cmpxchg_rel(ptr,o,n) cmpxchg(ptr,o,n)
#endif
-
/*
* Guarantee that the kzalloc'd memory is cacheline aligned.
*/
@@ -41,7 +38,7 @@ xpc_kzalloc_cacheline_aligned(size_t siz
if (*base == NULL) {
return NULL;
}
- if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
+ if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) {
return *base;
}
kfree(*base);
@@ -51,10 +48,9 @@ xpc_kzalloc_cacheline_aligned(size_t siz
if (*base == NULL) {
return NULL;
}
- return (void *) L1_CACHE_ALIGN((u64) *base);
+ return (void *)L1_CACHE_ALIGN((u64)*base);
}
-
/*
* Set up the initial values for the XPartition Communication channels.
*/
@@ -64,7 +60,6 @@ xpc_initialize_channels(struct xpc_parti
int ch_number;
struct xpc_channel *ch;
-
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number];
@@ -74,7 +69,7 @@ xpc_initialize_channels(struct xpc_parti
ch->local_GP = &part->local_GPs[ch_number];
ch->local_openclose_args =
- &part->local_openclose_args[ch_number];
+ &part->local_openclose_args[ch_number];
atomic_set(&ch->kthreads_assigned, 0);
atomic_set(&ch->kthreads_idle, 0);
@@ -93,7 +88,6 @@ xpc_initialize_channels(struct xpc_parti
}
}
-
/*
* Setup the infrastructure necessary to support XPartition Communication
* between the specified remote partition and the local one.
@@ -105,7 +99,6 @@ xpc_setup_infrastructure(struct xpc_part
struct timer_list *timer;
short partid = XPC_PARTID(part);
-
/*
* Zero out MOST of the entry for this partition. Only the fields
* starting with `nchannels' will be zeroed. The preceding fields must
@@ -113,14 +106,14 @@ xpc_setup_infrastructure(struct xpc_part
* referenced during this memset() operation.
*/
memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
- offsetof(struct xpc_partition, nchannels));
+ offsetof(struct xpc_partition, nchannels));
/*
* Allocate all of the channel structures as a contiguous chunk of
* memory.
*/
part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
- GFP_KERNEL);
+ GFP_KERNEL);
if (part->channels == NULL) {
dev_err(xpc_chan, "can't get memory for channels\n");
return xpNoMemory;
@@ -128,11 +121,11 @@ xpc_setup_infrastructure(struct xpc_part
part->nchannels = XPC_NCHANNELS;
-
/* allocate all the required GET/PUT values */
part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
- GFP_KERNEL, &part->local_GPs_base);
+ GFP_KERNEL,
+ &part->local_GPs_base);
if (part->local_GPs == NULL) {
kfree(part->channels);
part->channels = NULL;
@@ -142,7 +135,9 @@ xpc_setup_infrastructure(struct xpc_part
}
part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
- GFP_KERNEL, &part->remote_GPs_base);
+ GFP_KERNEL,
+ &part->
+ remote_GPs_base);
if (part->remote_GPs == NULL) {
dev_err(xpc_chan, "can't get memory for remote get/put "
"values\n");
@@ -153,12 +148,11 @@ xpc_setup_infrastructure(struct xpc_part
return xpNoMemory;
}
-
/* allocate all the required open and close args */
- part->local_openclose_args = xpc_kzalloc_cacheline_aligned(
- XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
- &part->local_openclose_args_base);
+ part->local_openclose_args =
+ xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
+ &part->local_openclose_args_base);
if (part->local_openclose_args == NULL) {
dev_err(xpc_chan, "can't get memory for local connect args\n");
kfree(part->remote_GPs_base);
@@ -170,9 +164,9 @@ xpc_setup_infrastructure(struct xpc_part
return xpNoMemory;
}
- part->remote_openclose_args = xpc_kzalloc_cacheline_aligned(
- XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
- &part->remote_openclose_args_base);
+ part->remote_openclose_args =
+ xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
+ &part->remote_openclose_args_base);
if (part->remote_openclose_args == NULL) {
dev_err(xpc_chan, "can't get memory for remote connect args\n");
kfree(part->local_openclose_args_base);
@@ -186,13 +180,11 @@ xpc_setup_infrastructure(struct xpc_part
return xpNoMemory;
}
-
xpc_initialize_channels(part, partid);
atomic_set(&part->nchannels_active, 0);
atomic_set(&part->nchannels_engaged, 0);
-
/* local_IPI_amo were set to 0 by an earlier memset() */
/* Initialize this partitions AMO structure */
@@ -205,7 +197,7 @@ xpc_setup_infrastructure(struct xpc_part
sprintf(part->IPI_owner, "xpc%02d", partid);
ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED,
- part->IPI_owner, (void *) (u64) partid);
+ part->IPI_owner, (void *)(u64)partid);
if (ret != 0) {
dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
"errno=%d\n", -ret);
@@ -225,8 +217,8 @@ xpc_setup_infrastructure(struct xpc_part
/* Setup a timer to check for dropped IPIs */
timer = &part->dropped_IPI_timer;
init_timer(timer);
- timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
- timer->data = (unsigned long) part;
+ timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check;
+ timer->data = (unsigned long)part;
timer->expires = jiffies + XPC_DROPPED_IPI_WAIT_INTERVAL;
add_timer(timer);
@@ -236,7 +228,6 @@ xpc_setup_infrastructure(struct xpc_part
*/
part->setup_state = XPC_P_SS_SETUP;
-
/*
* Setup the per partition specific variables required by the
* remote partition to establish channel connections with us.
@@ -246,7 +237,7 @@ xpc_setup_infrastructure(struct xpc_part
*/
xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
xpc_vars_part[partid].openclose_args_pa =
- __pa(part->local_openclose_args);
+ __pa(part->local_openclose_args);
xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
xpc_vars_part[partid].IPI_nasid = xp_cpu_to_nasid(cpuid);
@@ -257,7 +248,6 @@ xpc_setup_infrastructure(struct xpc_part
return xpSuccess;
}
-
/*
* Create a wrapper that hides the underlying mechanism for pulling a cacheline
* (or multiple cachelines) from a remote partition.
@@ -268,13 +258,12 @@ xpc_setup_infrastructure(struct xpc_part
*/
static enum xp_retval
xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
- const void *src, size_t cnt)
+ const void *src, size_t cnt)
{
enum xp_retval ret;
-
- DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
- DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
+ DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src));
+ DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
if (part->act_state == XPC_P_AS_DEACTIVATING) {
@@ -290,7 +279,6 @@ xpc_pull_remote_cachelines(struct xpc_pa
return ret;
}
-
/*
* Pull the remote per partition specific variables from the specified
* partition.
@@ -300,41 +288,40 @@ xpc_pull_remote_vars_part(struct xpc_par
{
u8 buffer[L1_CACHE_BYTES * 2];
struct xpc_vars_part *pulled_entry_cacheline =
- (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
+ (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer);
struct xpc_vars_part *pulled_entry;
u64 remote_entry_cacheline_pa, remote_entry_pa;
short partid = XPC_PARTID(part);
enum xp_retval ret;
-
/* pull the cacheline that contains the variables we're interested in */
DBUG_ON(part->remote_vars_part_pa !=
- L1_CACHE_ALIGN(part->remote_vars_part_pa));
+ L1_CACHE_ALIGN(part->remote_vars_part_pa));
DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
remote_entry_pa = part->remote_vars_part_pa +
- xp_partition_id * sizeof(struct xpc_vars_part);
+ xp_partition_id * sizeof(struct xpc_vars_part);
remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
- pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline +
- (remote_entry_pa & (L1_CACHE_BYTES - 1)));
+ pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline +
+ (remote_entry_pa &
+ (L1_CACHE_BYTES - 1)));
ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
- (void *) remote_entry_cacheline_pa,
- L1_CACHE_BYTES);
+ (void *)remote_entry_cacheline_pa,
+ L1_CACHE_BYTES);
if (ret != xpSuccess) {
dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
"partition %d, ret=%d\n", partid, ret);
return ret;
}
-
/* see if they've been set up yet */
if (pulled_entry->magic != XPC_VP_MAGIC1 &&
- pulled_entry->magic != XPC_VP_MAGIC2) {
+ pulled_entry->magic != XPC_VP_MAGIC2) {
if (pulled_entry->magic != 0) {
dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
@@ -353,8 +340,8 @@ xpc_pull_remote_vars_part(struct xpc_par
/* validate the variables */
if (pulled_entry->GPs_pa == 0 ||
- pulled_entry->openclose_args_pa == 0 ||
- pulled_entry->IPI_amo_pa == 0) {
+ pulled_entry->openclose_args_pa == 0 ||
+ pulled_entry->IPI_amo_pa == 0) {
dev_err(xpc_chan, "partition %d's XPC vars_part for "
"partition %d are not valid\n", partid,
@@ -366,7 +353,7 @@ xpc_pull_remote_vars_part(struct xpc_par
part->remote_GPs_pa = pulled_entry->GPs_pa;
part->remote_openclose_args_pa =
- pulled_entry->openclose_args_pa;
+ pulled_entry->openclose_args_pa;
part->remote_IPI_amo_va = __va(pulled_entry->IPI_amo_pa);
part->remote_IPI_nasid = pulled_entry->IPI_nasid;
part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
@@ -387,7 +374,6 @@ xpc_pull_remote_vars_part(struct xpc_par
return xpSuccess;
}
-
/*
* Get the IPI flags and pull the openclose args and/or remote GPs as needed.
*/
@@ -398,7 +384,6 @@ xpc_get_IPI_flags(struct xpc_partition *
u64 IPI_amo;
enum xp_retval ret;
-
/*
* See if there are any IPI flags to be handled.
*/
@@ -409,12 +394,12 @@ xpc_get_IPI_flags(struct xpc_partition *
}
spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
-
if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
ret = xpc_pull_remote_cachelines(part,
- part->remote_openclose_args,
- (void *) part->remote_openclose_args_pa,
- XPC_OPENCLOSE_ARGS_SIZE);
+ part->remote_openclose_args,
+ (void *)part->
+ remote_openclose_args_pa,
+ XPC_OPENCLOSE_ARGS_SIZE);
if (ret != xpSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret);
@@ -429,8 +414,8 @@ xpc_get_IPI_flags(struct xpc_partition *
if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
- (void *) part->remote_GPs_pa,
- XPC_GP_SIZE);
+ (void *)part->remote_GPs_pa,
+ XPC_GP_SIZE);
if (ret != xpSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret);
@@ -445,7 +430,6 @@ xpc_get_IPI_flags(struct xpc_partition *
return IPI_amo;
}
-
/*
* Allocate the local message queue and the notify queue.
*/
@@ -460,8 +444,9 @@ xpc_allocate_local_msgqueue(struct xpc_c
nbytes = nentries * ch->msg_size;
ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
- GFP_KERNEL,
- &ch->local_msgqueue_base);
+ GFP_KERNEL,
+ &ch->
+ local_msgqueue_base);
if (ch->local_msgqueue == NULL) {
continue;
}
@@ -491,7 +476,6 @@ xpc_allocate_local_msgqueue(struct xpc_c
return xpNoMemory;
}
-
/*
* Allocate the cached remote message queue.
*/
@@ -502,15 +486,15 @@ xpc_allocate_remote_msgqueue(struct xpc_
int nentries;
size_t nbytes;
-
DBUG_ON(ch->remote_nentries <= 0);
for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size;
ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
- GFP_KERNEL,
- &ch->remote_msgqueue_base);
+ GFP_KERNEL,
+ &ch->
+ remote_msgqueue_base);
if (ch->remote_msgqueue == NULL) {
continue;
}
@@ -532,7 +516,6 @@ xpc_allocate_remote_msgqueue(struct xpc_
return xpNoMemory;
}
-
/*
* Allocate message queues and other stuff associated with a channel.
*
@@ -544,7 +527,6 @@ xpc_allocate_msgqueues(struct xpc_channe
unsigned long irq_flags;
enum xp_retval ret;
-
DBUG_ON(ch->flags & XPC_C_SETUP);
if ((ret = xpc_allocate_local_msgqueue(ch)) != xpSuccess) {
@@ -566,7 +548,6 @@ xpc_allocate_msgqueues(struct xpc_channe
return xpSuccess;
}
-
/*
* Process a connect message from a remote partition.
*
@@ -578,11 +559,10 @@ xpc_process_connect(struct xpc_channel *
{
enum xp_retval ret;
-
DBUG_ON(!spin_is_locked(&ch->lock));
if (!(ch->flags & XPC_C_OPENREQUEST) ||
- !(ch->flags & XPC_C_ROPENREQUEST)) {
+ !(ch->flags & XPC_C_ROPENREQUEST)) {
/* nothing more to do for now */
return;
}
@@ -619,14 +599,13 @@ xpc_process_connect(struct xpc_channel *
ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
dev_info(xpc_chan, "channel %d to partition %d connected\n",
- ch->number, ch->partid);
+ ch->number, ch->partid);
spin_unlock_irqrestore(&ch->lock, *irq_flags);
xpc_create_kthreads(ch, 1, 0);
spin_lock_irqsave(&ch->lock, *irq_flags);
}
-
/*
* Notify those who wanted to be notified upon delivery of their message.
*/
@@ -637,7 +616,6 @@ xpc_notify_senders(struct xpc_channel *c
u8 notify_type;
s64 get = ch->w_remote_GP.get - 1;
-
while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
notify = &ch->notify_queue[get % ch->local_nentries];
@@ -650,8 +628,7 @@ xpc_notify_senders(struct xpc_channel *c
*/
notify_type = notify->type;
if (notify_type == 0 ||
- cmpxchg(¬ify->type, notify_type, 0) !=
- notify_type) {
+ cmpxchg(¬ify->type, notify_type, 0) != notify_type) {
continue;
}
@@ -662,21 +639,20 @@ xpc_notify_senders(struct xpc_channel *c
if (notify->func != NULL) {
dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
"msg_number=%" U64_ELL "d, partid=%d, "
- "channel=%d\n", (void *) notify, get,
+ "channel=%d\n", (void *)notify, get,
ch->partid, ch->number);
notify->func(reason, ch->partid, ch->number,
- notify->key);
+ notify->key);
dev_dbg(xpc_chan, "notify->func() returned, "
"notify=0x%p, msg_number=%" U64_ELL "d, "
- "partid=%d, channel=%d\n", (void *) notify,
+ "partid=%d, channel=%d\n", (void *)notify,
get, ch->partid, ch->number);
}
}
}
-
/*
* Free up message queues and other stuff that were allocated for the specified
* channel.
@@ -724,7 +700,6 @@ xpc_free_msgqueues(struct xpc_channel *c
}
}
-
/*
* spin_lock_irqsave() is expected to be held on entry.
*/
@@ -734,7 +709,6 @@ xpc_process_disconnect(struct xpc_channe
struct xpc_partition *part = &xpc_partitions[ch->partid];
u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
-
DBUG_ON(!spin_is_locked(&ch->lock));
if (!(ch->flags & XPC_C_DISCONNECTING)) {
@@ -746,11 +720,11 @@ xpc_process_disconnect(struct xpc_channe
/* make sure all activity has settled down first */
if (atomic_read(&ch->kthreads_assigned) > 0 ||
- atomic_read(&ch->references) > 0) {
+ atomic_read(&ch->references) > 0) {
return;
}
DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
- !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
+ !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
if (part->act_state == XPC_P_AS_DEACTIVATING) {
/* can't proceed until the other side disengages from us */
@@ -800,7 +774,7 @@ xpc_process_disconnect(struct xpc_channe
if (channel_was_connected) {
dev_info(xpc_chan, "channel %d to partition %d disconnected, "
- "reason=%d\n", ch->number, ch->partid, ch->reason);
+ "reason=%d\n", ch->number, ch->partid, ch->reason);
}
if (ch->flags & XPC_C_WDISCONNECT) {
@@ -811,35 +785,31 @@ xpc_process_disconnect(struct xpc_channe
/* time to take action on any delayed IPI flags */
spin_lock(&part->IPI_lock);
XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
- ch->delayed_IPI_flags);
+ ch->delayed_IPI_flags);
spin_unlock(&part->IPI_lock);
}
ch->delayed_IPI_flags = 0;
}
}
-
/*
* Process a change in the channel's remote connection state.
*/
static void
xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
- u8 IPI_flags)
+ u8 IPI_flags)
{
unsigned long irq_flags;
struct xpc_openclose_args *args =
- &part->remote_openclose_args[ch_number];
+ &part->remote_openclose_args[ch_number];
struct xpc_channel *ch = &part->channels[ch_number];
enum xp_retval reason;
-
-
spin_lock_irqsave(&ch->lock, irq_flags);
-again:
+ again:
- if ((ch->flags & XPC_C_DISCONNECTED) &&
- (ch->flags & XPC_C_WDISCONNECT)) {
+ if ((ch->flags & XPC_C_DISCONNECTED) && (ch->flags & XPC_C_WDISCONNECT)) {
/*
* Delay processing IPI flags until thread waiting disconnect
* has had a chance to see that the channel is disconnected.
@@ -849,7 +819,6 @@ again:
return;
}
-
if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
@@ -881,13 +850,14 @@ again:
if (ch->flags & XPC_C_DISCONNECTED) {
if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
- ch_number) & XPC_IPI_OPENREQUEST)) {
+ ch_number) &
+ XPC_IPI_OPENREQUEST)) {
DBUG_ON(ch->delayed_IPI_flags != 0);
spin_lock(&part->IPI_lock);
XPC_SET_IPI_FLAGS(part->local_IPI_amo,
- ch_number,
- XPC_IPI_CLOSEREQUEST);
+ ch_number,
+ XPC_IPI_CLOSEREQUEST);
spin_unlock(&part->IPI_lock);
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -928,7 +898,6 @@ again:
xpc_process_disconnect(ch, &irq_flags);
}
-
if (IPI_flags & XPC_IPI_CLOSEREPLY) {
dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
@@ -944,12 +913,13 @@ again:
if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
- & XPC_IPI_CLOSEREQUEST)) {
+ & XPC_IPI_CLOSEREQUEST)) {
DBUG_ON(ch->delayed_IPI_flags != 0);
spin_lock(&part->IPI_lock);
XPC_SET_IPI_FLAGS(part->local_IPI_amo,
- ch_number, XPC_IPI_CLOSEREPLY);
+ ch_number,
+ XPC_IPI_CLOSEREPLY);
spin_unlock(&part->IPI_lock);
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -964,7 +934,6 @@ again:
}
}
-
if (IPI_flags & XPC_IPI_OPENREQUEST) {
dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
@@ -973,7 +942,7 @@ again:
ch->partid, ch->number);
if (part->act_state == XPC_P_AS_DEACTIVATING ||
- (ch->flags & XPC_C_ROPENREQUEST)) {
+ (ch->flags & XPC_C_ROPENREQUEST)) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
@@ -984,9 +953,9 @@ again:
return;
}
DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
- XPC_C_OPENREQUEST)));
+ XPC_C_OPENREQUEST)));
DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
- XPC_C_OPENREPLY | XPC_C_CONNECTED));
+ XPC_C_OPENREPLY | XPC_C_CONNECTED));
/*
* The meaningful OPENREQUEST connection state fields are:
@@ -1002,11 +971,10 @@ again:
ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
ch->remote_nentries = args->local_nentries;
-
if (ch->flags & XPC_C_OPENREQUEST) {
if (args->msg_size != ch->msg_size) {
XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
- &irq_flags);
+ &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
@@ -1022,7 +990,6 @@ again:
xpc_process_connect(ch, &irq_flags);
}
-
if (IPI_flags & XPC_IPI_OPENREPLY) {
dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%"
@@ -1037,7 +1004,7 @@ again:
}
if (!(ch->flags & XPC_C_OPENREQUEST)) {
XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
- &irq_flags);
+ &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
@@ -1048,7 +1015,7 @@ again:
/*
* The meaningful OPENREPLY connection state fields are:
* local_msgqueue_pa = physical address of remote
- * partition's local_msgqueue
+ * partition's local_msgqueue
* local_nentries = remote partition's local_nentries
* remote_nentries = remote partition's remote_nentries
*/
@@ -1084,7 +1051,6 @@ again:
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
-
/*
* Attempt to establish a channel connection to a remote partition.
*/
@@ -1094,7 +1060,6 @@ xpc_connect_channel(struct xpc_channel *
unsigned long irq_flags;
struct xpc_registration *registration = &xpc_registrations[ch->number];
-
if (mutex_trylock(®istration->mutex) == 0) {
return xpRetry;
}
@@ -1115,7 +1080,6 @@ xpc_connect_channel(struct xpc_channel *
return ch->reason;
}
-
/* add info from the channel connect registration to the channel */
ch->kthreads_assigned_limit = registration->assigned_limit;
@@ -1145,7 +1109,7 @@ xpc_connect_channel(struct xpc_channel *
*/
mutex_unlock(®istration->mutex);
XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
- &irq_flags);
+ &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpUnequalMsgSizes;
}
@@ -1160,7 +1124,6 @@ xpc_connect_channel(struct xpc_channel *
mutex_unlock(®istration->mutex);
-
/* initiate the connection */
ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
@@ -1173,7 +1136,6 @@ xpc_connect_channel(struct xpc_channel *
return xpSuccess;
}
-
/*
* Clear some of the msg flags in the local message queue.
*/
@@ -1183,16 +1145,15 @@ xpc_clear_local_msgqueue_flags(struct xp
struct xpc_msg *msg;
s64 get;
-
get = ch->w_remote_GP.get;
do {
- msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
- (get % ch->local_nentries) * ch->msg_size);
+ msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
+ (get % ch->local_nentries) *
+ ch->msg_size);
msg->flags = 0;
- } while (++get < (volatile s64) ch->remote_GP.get);
+ } while (++get < (volatile s64)ch->remote_GP.get);
}
-
/*
* Clear some of the msg flags in the remote message queue.
*/
@@ -1202,43 +1163,39 @@ xpc_clear_remote_msgqueue_flags(struct x
struct xpc_msg *msg;
s64 put;
-
put = ch->w_remote_GP.put;
do {
- msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
- (put % ch->remote_nentries) * ch->msg_size);
+ msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
+ (put % ch->remote_nentries) *
+ ch->msg_size);
msg->flags = 0;
- } while (++put < (volatile s64) ch->remote_GP.put);
+ } while (++put < (volatile s64)ch->remote_GP.put);
}
-
static void
xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
{
struct xpc_channel *ch = &part->channels[ch_number];
int nmsgs_sent;
-
ch->remote_GP = part->remote_GPs[ch_number];
-
/* See what, if anything, has changed for each connected channel */
xpc_msgqueue_ref(ch);
if (ch->w_remote_GP.get == ch->remote_GP.get &&
- ch->w_remote_GP.put == ch->remote_GP.put) {
+ ch->w_remote_GP.put == ch->remote_GP.put) {
/* nothing changed since GPs were last pulled */
xpc_msgqueue_deref(ch);
return;
}
- if (!(ch->flags & XPC_C_CONNECTED)){
+ if (!(ch->flags & XPC_C_CONNECTED)) {
xpc_msgqueue_deref(ch);
return;
}
-
/*
* First check to see if messages recently sent by us have been
* received by the other side. (The remote GET value will have
@@ -1260,7 +1217,7 @@ xpc_process_msg_IPI(struct xpc_partition
* received and delivered by the other side.
*/
xpc_notify_senders(ch, xpMsgDelivered,
- ch->remote_GP.get);
+ ch->remote_GP.get);
}
/*
@@ -1284,7 +1241,6 @@ xpc_process_msg_IPI(struct xpc_partition
}
}
-
/*
* Now check for newly sent messages by the other side. (The remote
* PUT value will have changed since we last looked at it.)
@@ -1318,7 +1274,6 @@ xpc_process_msg_IPI(struct xpc_partition
xpc_msgqueue_deref(ch);
}
-
void
xpc_process_channel_activity(struct xpc_partition *part)
{
@@ -1328,7 +1283,6 @@ xpc_process_channel_activity(struct xpc_
int ch_number;
u32 ch_flags;
-
IPI_amo = xpc_get_IPI_flags(part);
/*
@@ -1341,7 +1295,6 @@ xpc_process_channel_activity(struct xpc_
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number];
-
/*
* Process any open or close related IPI flags, and then deal
* with connecting or disconnecting the channel as required.
@@ -1369,7 +1322,7 @@ xpc_process_channel_activity(struct xpc_
if (!(ch_flags & XPC_C_CONNECTED)) {
if (!(ch_flags & XPC_C_OPENREQUEST)) {
DBUG_ON(ch_flags & XPC_C_SETUP);
- (void) xpc_connect_channel(ch);
+ (void)xpc_connect_channel(ch);
} else {
spin_lock_irqsave(&ch->lock, irq_flags);
xpc_process_connect(ch, &irq_flags);
@@ -1378,7 +1331,6 @@ xpc_process_channel_activity(struct xpc_
continue;
}
-
/*
* Process any message related IPI flags, this may involve the
* activation of kthreads to deliver any pending messages sent
@@ -1391,7 +1343,6 @@ xpc_process_channel_activity(struct xpc_
}
}
-
/*
* XPC's heartbeat code calls this function to inform XPC that a partition is
* going down. XPC responds by tearing down the XPartition Communication
@@ -1408,7 +1359,6 @@ xpc_partition_going_down(struct xpc_part
int ch_number;
struct xpc_channel *ch;
-
dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
XPC_PARTID(part), reason);
@@ -1417,7 +1367,6 @@ xpc_partition_going_down(struct xpc_part
return;
}
-
/* disconnect channels associated with the partition going down */
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
@@ -1437,7 +1386,6 @@ xpc_partition_going_down(struct xpc_part
xpc_part_deref(part);
}
-
/*
* Teardown the infrastructure necessary to support XPartition Communication
* between the specified remote partition and the local one.
@@ -1447,7 +1395,6 @@ xpc_teardown_infrastructure(struct xpc_p
{
short partid = XPC_PARTID(part);
-
/*
* We start off by making this partition inaccessible to local
* processes by marking it as no longer setup. Then we make it
@@ -1464,9 +1411,7 @@ xpc_teardown_infrastructure(struct xpc_p
xpc_vars_part[partid].magic = 0;
-
- free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
-
+ free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
/*
* Before proceeding with the teardown we have to wait until all
@@ -1474,7 +1419,6 @@ xpc_teardown_infrastructure(struct xpc_p
*/
wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
-
/* now we can begin tearing down the infrastructure */
part->setup_state = XPC_P_SS_TORNDOWN;
@@ -1495,7 +1439,6 @@ xpc_teardown_infrastructure(struct xpc_p
part->local_IPI_amo_va = NULL;
}
-
/*
* Called by XP at the time of channel connection registration to cause
* XPC to establish connections to all currently active partitions.
@@ -1507,7 +1450,6 @@ xpc_initiate_connect(int ch_number)
struct xpc_partition *part;
struct xpc_channel *ch;
-
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
@@ -1526,7 +1468,6 @@ xpc_initiate_connect(int ch_number)
}
}
-
void
xpc_connected_callout(struct xpc_channel *ch)
{
@@ -1537,14 +1478,13 @@ xpc_connected_callout(struct xpc_channel
"partid=%d, channel=%d\n", ch->partid, ch->number);
ch->func(xpConnected, ch->partid, ch->number,
- (void *) (u64) ch->local_nentries, ch->key);
+ (void *)(u64)ch->local_nentries, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
}
}
-
/*
* Called by XP at the time of channel connection unregistration to cause
* XPC to teardown all current connections for the specified channel.
@@ -1566,7 +1506,6 @@ xpc_initiate_disconnect(int ch_number)
struct xpc_partition *part;
struct xpc_channel *ch;
-
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
/* initiate the channel disconnect for every active partition */
@@ -1583,7 +1522,7 @@ xpc_initiate_disconnect(int ch_number)
ch->flags |= XPC_C_WDISCONNECT;
XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
- &irq_flags);
+ &irq_flags);
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1596,7 +1535,6 @@ xpc_initiate_disconnect(int ch_number)
xpc_disconnect_wait(ch_number);
}
-
/*
* To disconnect a channel, and reflect it back to all who may be waiting.
*
@@ -1608,11 +1546,10 @@ xpc_initiate_disconnect(int ch_number)
*/
void
xpc_disconnect_channel(const int line, struct xpc_channel *ch,
- enum xp_retval reason, unsigned long *irq_flags)
+ enum xp_retval reason, unsigned long *irq_flags)
{
u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
-
DBUG_ON(!spin_is_locked(&ch->lock));
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
@@ -1628,8 +1565,8 @@ xpc_disconnect_channel(const int line, s
ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
/* some of these may not have been set */
ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
- XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
- XPC_C_CONNECTING | XPC_C_CONNECTED);
+ XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
+ XPC_C_CONNECTING | XPC_C_CONNECTED);
xpc_IPI_send_closerequest(ch, irq_flags);
@@ -1644,7 +1581,7 @@ xpc_disconnect_channel(const int line, s
wake_up_all(&ch->idle_wq);
} else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
- !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
+ !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
/* start a kthread that will do the xpDisconnecting callout */
xpc_create_kthreads(ch, 1, 1);
}
@@ -1657,7 +1594,6 @@ xpc_disconnect_channel(const int line, s
spin_lock_irqsave(&ch->lock, *irq_flags);
}
-
void
xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
{
@@ -1678,7 +1614,6 @@ xpc_disconnect_callout(struct xpc_channe
}
}
-
/*
* Wait for a message entry to become available for the specified channel,
* but don't wait any longer than 1 jiffy.
@@ -1688,7 +1623,6 @@ xpc_allocate_msg_wait(struct xpc_channel
{
enum xp_retval ret;
-
if (ch->flags & XPC_C_DISCONNECTING) {
DBUG_ON(ch->reason == xpInterrupted);
return ch->reason;
@@ -1710,20 +1644,18 @@ xpc_allocate_msg_wait(struct xpc_channel
return ret;
}
-
/*
* Allocate an entry for a message from the message queue associated with the
* specified channel.
*/
static enum xp_retval
xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
- struct xpc_msg **address_of_msg)
+ struct xpc_msg **address_of_msg)
{
struct xpc_msg *msg;
enum xp_retval ret;
s64 put;
-
/* this reference will be dropped in xpc_send_msg() */
xpc_msgqueue_ref(ch);
@@ -1736,7 +1668,6 @@ xpc_allocate_msg(struct xpc_channel *ch,
return xpNotConnected;
}
-
/*
* Get the next available message entry from the local message queue.
* If none are available, we'll make sure that we grab the latest
@@ -1746,25 +1677,23 @@ xpc_allocate_msg(struct xpc_channel *ch,
while (1) {
- put = (volatile s64) ch->w_local_GP.put;
- if (put - (volatile s64) ch->w_remote_GP.get <
- ch->local_nentries) {
+ put = (volatile s64)ch->w_local_GP.put;
+ if (put - (volatile s64)ch->w_remote_GP.get <
+ ch->local_nentries) {
/* There are available message entries. We need to try
* to secure one for ourselves. We'll do this by trying
* to increment w_local_GP.put as long as someone else
* doesn't beat us to it. If they do, we'll have to
* try again.
- */
- if (cmpxchg(&ch->w_local_GP.put, put, put + 1) ==
- put) {
+ */
+ if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) {
/* we got the entry referenced by put */
break;
}
continue; /* try again */
}
-
/*
* There aren't any available msg entries at this time.
*
@@ -1790,25 +1719,22 @@ xpc_allocate_msg(struct xpc_channel *ch,
}
}
-
/* get the message's address and initialize it */
- msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
- (put % ch->local_nentries) * ch->msg_size);
-
+ msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
+ (put % ch->local_nentries) * ch->msg_size);
DBUG_ON(msg->flags != 0);
msg->number = put;
dev_dbg(xpc_chan, "w_local_GP.put changed to %" U64_ELL "d; msg=0x%p, "
"msg_number=%" U64_ELL "d, partid=%d, channel=%d\n", put + 1,
- (void *) msg, msg->number, ch->partid, ch->number);
+ (void *)msg, msg->number, ch->partid, ch->number);
*address_of_msg = msg;
return xpSuccess;
}
-
/*
* Allocate an entry for a message from the message queue associated with the
* specified channel. NOTE that this routine can sleep waiting for a message
@@ -1829,7 +1755,6 @@ xpc_initiate_allocate(short partid, int
enum xp_retval ret = xpUnknownReason;
struct xpc_msg *msg = NULL;
-
DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
@@ -1847,7 +1772,6 @@ xpc_initiate_allocate(short partid, int
return ret;
}
-
/*
* Now we actually send the messages that are ready to be sent by advancing
* the local message queue's Put value and then send an IPI to the recipient
@@ -1860,16 +1784,16 @@ xpc_send_msgs(struct xpc_channel *ch, s6
s64 put = initial_put + 1;
int send_IPI = 0;
-
while (1) {
while (1) {
- if (put == (volatile s64) ch->w_local_GP.put) {
+ if (put == (volatile s64)ch->w_local_GP.put) {
break;
}
- msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
- (put % ch->local_nentries) * ch->msg_size);
+ msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
+ (put % ch->local_nentries) *
+ ch->msg_size);
if (!(msg->flags & XPC_M_READY)) {
break;
@@ -1884,9 +1808,9 @@ xpc_send_msgs(struct xpc_channel *ch, s6
}
if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
- initial_put) {
+ initial_put) {
/* someone else beat us to it */
- DBUG_ON((volatile s64) ch->local_GP->put < initial_put);
+ DBUG_ON((volatile s64)ch->local_GP->put < initial_put);
break;
}
@@ -1910,7 +1834,6 @@ xpc_send_msgs(struct xpc_channel *ch, s6
}
}
-
/*
* Common code that does the actual sending of the message by advancing the
* local message queue's Put value and sends an IPI to the partition the
@@ -1918,16 +1841,15 @@ xpc_send_msgs(struct xpc_channel *ch, s6
*/
static enum xp_retval
xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
- xpc_notify_func func, void *key)
+ xpc_notify_func func, void *key)
{
enum xp_retval ret = xpSuccess;
struct xpc_notify *notify = notify;
s64 put, msg_number = msg->number;
-
DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
- DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) !=
- msg_number % ch->local_nentries);
+ DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) !=
+ msg_number % ch->local_nentries);
DBUG_ON(msg->flags & XPC_M_READY);
if (ch->flags & XPC_C_DISCONNECTING) {
@@ -1961,7 +1883,7 @@ xpc_send_msg(struct xpc_channel *ch, str
* the notify entry.
*/
if (cmpxchg(¬ify->type, notify_type, 0) ==
- notify_type) {
+ notify_type) {
atomic_dec(&ch->n_to_notify);
ret = ch->reason;
}
@@ -1992,7 +1914,6 @@ xpc_send_msg(struct xpc_channel *ch, str
return ret;
}
-
/*
* Send a message previously allocated using xpc_initiate_allocate() on the
* specified channel connected to the specified partition.
@@ -2020,8 +1941,7 @@ xpc_initiate_send(short partid, int ch_n
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
enum xp_retval ret;
-
- dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
+ dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
partid, ch_number);
DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
@@ -2033,7 +1953,6 @@ xpc_initiate_send(short partid, int ch_n
return ret;
}
-
/*
* Send a message previously allocated using xpc_initiate_allocate on the
* specified channel connected to the specified partition.
@@ -2066,14 +1985,13 @@ xpc_initiate_send(short partid, int ch_n
*/
enum xp_retval
xpc_initiate_send_notify(short partid, int ch_number, void *payload,
- xpc_notify_func func, void *key)
+ xpc_notify_func func, void *key)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
enum xp_retval ret;
-
- dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
+ dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
partid, ch_number);
DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
@@ -2082,11 +2000,10 @@ xpc_initiate_send_notify(short partid, i
DBUG_ON(func == NULL);
ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
- func, key);
+ func, key);
return ret;
}
-
static struct xpc_msg *
xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
{
@@ -2096,7 +2013,6 @@ xpc_pull_remote_msg(struct xpc_channel *
u64 msg_offset;
enum xp_retval ret;
-
if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
/* we were interrupted by a signal */
return NULL;
@@ -2109,22 +2025,22 @@ xpc_pull_remote_msg(struct xpc_channel *
msg_index = ch->next_msg_to_pull % ch->remote_nentries;
DBUG_ON(ch->next_msg_to_pull >=
- (volatile s64) ch->w_remote_GP.put);
- nmsgs = (volatile s64) ch->w_remote_GP.put -
- ch->next_msg_to_pull;
+ (volatile s64)ch->w_remote_GP.put);
+ nmsgs = (volatile s64)ch->w_remote_GP.put -
+ ch->next_msg_to_pull;
if (msg_index + nmsgs > ch->remote_nentries) {
/* ignore the ones that wrap the msg queue for now */
nmsgs = ch->remote_nentries - msg_index;
}
msg_offset = msg_index * ch->msg_size;
- msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
- msg_offset);
- remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa +
- msg_offset);
+ msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
+ remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
+ msg_offset);
if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
- nmsgs * ch->msg_size)) != xpSuccess) {
+ nmsgs * ch->msg_size)) !=
+ xpSuccess) {
dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
" msg %" U64_ELL "d from partition %d, "
@@ -2138,7 +2054,7 @@ xpc_pull_remote_msg(struct xpc_channel *
return NULL;
}
- mb(); /* >>> this may not be needed, we're not sure */
+ mb(); /* >>> this may not be needed, we're not sure */
ch->next_msg_to_pull += nmsgs;
}
@@ -2147,12 +2063,11 @@ xpc_pull_remote_msg(struct xpc_channel *
/* return the message we were looking for */
msg_offset = (get % ch->remote_nentries) * ch->msg_size;
- msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset);
+ msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
return msg;
}
-
/*
* Get a message to be delivered.
*/
@@ -2162,14 +2077,13 @@ xpc_get_deliverable_msg(struct xpc_chann
struct xpc_msg *msg = NULL;
s64 get;
-
do {
- if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) {
+ if ((volatile u32)ch->flags & XPC_C_DISCONNECTING) {
break;
}
- get = (volatile s64) ch->w_local_GP.get;
- if (get == (volatile s64) ch->w_remote_GP.put) {
+ get = (volatile s64)ch->w_local_GP.get;
+ if (get == (volatile s64)ch->w_remote_GP.put) {
break;
}
@@ -2178,7 +2092,7 @@ xpc_get_deliverable_msg(struct xpc_chann
* by trying to increment w_local_GP.get and hope that no one
* else beats us to it. If they do, we'll we'll simply have
* to try again for the next one.
- */
+ */
if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
/* we got the entry referenced by get */
@@ -2203,7 +2117,6 @@ xpc_get_deliverable_msg(struct xpc_chann
return msg;
}
-
/*
* Deliver a message to its intended recipient.
*/
@@ -2212,7 +2125,6 @@ xpc_deliver_msg(struct xpc_channel *ch)
{
struct xpc_msg *msg;
-
if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
/*
@@ -2227,16 +2139,16 @@ xpc_deliver_msg(struct xpc_channel *ch)
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
"msg_number=%" U64_ELL "d, partid=%d, "
- "channel=%d\n", (void *) msg, msg->number,
+ "channel=%d\n", (void *)msg, msg->number,
ch->partid, ch->number);
/* deliver the message to its intended recipient */
ch->func(xpMsgReceived, ch->partid, ch->number,
- &msg->payload, ch->key);
+ &msg->payload, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
"msg_number=%" U64_ELL "d, partid=%d, "
- "channel=%d\n", (void *) msg, msg->number,
+ "channel=%d\n", (void *)msg, msg->number,
ch->partid, ch->number);
}
@@ -2244,7 +2156,6 @@ xpc_deliver_msg(struct xpc_channel *ch)
}
}
-
/*
* Now we actually acknowledge the messages that have been delivered and ack'd
* by advancing the cached remote message queue's Get value and if requested
@@ -2257,16 +2168,16 @@ xpc_acknowledge_msgs(struct xpc_channel
s64 get = initial_get + 1;
int send_IPI = 0;
-
while (1) {
while (1) {
- if (get == (volatile s64) ch->w_local_GP.get) {
+ if (get == (volatile s64)ch->w_local_GP.get) {
break;
}
- msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
- (get % ch->remote_nentries) * ch->msg_size);
+ msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
+ (get % ch->remote_nentries) *
+ ch->msg_size);
if (!(msg->flags & XPC_M_DONE)) {
break;
@@ -2282,10 +2193,9 @@ xpc_acknowledge_msgs(struct xpc_channel
}
if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
- initial_get) {
+ initial_get) {
/* someone else beat us to it */
- DBUG_ON((volatile s64) ch->local_GP->get <=
- initial_get);
+ DBUG_ON((volatile s64)ch->local_GP->get <= initial_get);
break;
}
@@ -2309,7 +2219,6 @@ xpc_acknowledge_msgs(struct xpc_channel
}
}
-
/*
* Acknowledge receipt of a delivered message.
*
@@ -2335,18 +2244,17 @@ xpc_initiate_received(short partid, int
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
s64 get, msg_number = msg->number;
-
DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
ch = &part->channels[ch_number];
dev_dbg(xpc_chan, "msg=0x%p, msg_number=%" U64_ELL "d, partid=%d, "
- "channel=%d\n", (void *) msg, msg_number, ch->partid,
+ "channel=%d\n", (void *)msg, msg_number, ch->partid,
ch->number);
- DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
- msg_number % ch->remote_nentries);
+ DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
+ msg_number % ch->remote_nentries);
DBUG_ON(msg->flags & XPC_M_DONE);
msg->flags |= XPC_M_DONE;
@@ -2369,4 +2277,3 @@ xpc_initiate_received(short partid, int
/* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
xpc_msgqueue_deref(ch);
}
-
Index: linux-2.6/drivers/misc/xp/xpc_main.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc_main.c 2008-03-21 13:30:52.034175000 -0500
+++ linux-2.6/drivers/misc/xp/xpc_main.c 2008-03-24 19:31:37.154185684 -0500
@@ -6,7 +6,6 @@
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
-
/*
* Cross Partition Communication (XPC) support - standard version.
*
@@ -44,7 +43,6 @@
*
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -67,7 +65,6 @@
#include <asm/uaccess.h>
#include "xpc.h"
-
/* define two XPC debug device structures to be used with dev_dbg() et al */
struct device_driver xpc_dbg_name = {
@@ -87,10 +84,8 @@ struct device xpc_chan_dbg_subname = {
struct device *xpc_part = &xpc_part_dbg_subname;
struct device *xpc_chan = &xpc_chan_dbg_subname;
-
static int xpc_kdebug_ignore;
-
/* systune related variables for /proc/sys directories */
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
@@ -107,56 +102,51 @@ static int xpc_disengage_request_max_tim
static ctl_table xpc_sys_xpc_hb_dir[] = {
{
- .ctl_name = CTL_UNNUMBERED,
- .procname = "hb_interval",
- .data = &xpc_hb_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &xpc_hb_min_interval,
- .extra2 = &xpc_hb_max_interval
- },
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "hb_interval",
+ .data = &xpc_hb_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &xpc_hb_min_interval,
+ .extra2 = &xpc_hb_max_interval},
{
- .ctl_name = CTL_UNNUMBERED,
- .procname = "hb_check_interval",
- .data = &xpc_hb_check_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &xpc_hb_check_min_interval,
- .extra2 = &xpc_hb_check_max_interval
- },
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "hb_check_interval",
+ .data = &xpc_hb_check_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &xpc_hb_check_min_interval,
+ .extra2 = &xpc_hb_check_max_interval},
{}
};
static ctl_table xpc_sys_xpc_dir[] = {
{
- .ctl_name = CTL_UNNUMBERED,
- .procname = "hb",
- .mode = 0555,
- .child = xpc_sys_xpc_hb_dir
- },
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "hb",
+ .mode = 0555,
+ .child = xpc_sys_xpc_hb_dir},
{
- .ctl_name = CTL_UNNUMBERED,
- .procname = "disengage_request_timelimit",
- .data = &xpc_disengage_request_timelimit,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &xpc_disengage_request_min_timelimit,
- .extra2 = &xpc_disengage_request_max_timelimit
- },
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "disengage_request_timelimit",
+ .data = &xpc_disengage_request_timelimit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &xpc_disengage_request_min_timelimit,
+ .extra2 = &xpc_disengage_request_max_timelimit},
{}
};
static ctl_table xpc_sys_dir[] = {
{
- .ctl_name = CTL_UNNUMBERED,
- .procname = "xpc",
- .mode = 0555,
- .child = xpc_sys_xpc_dir
- },
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "xpc",
+ .mode = 0555,
+ .child = xpc_sys_xpc_dir},
{}
};
static struct ctl_table_header *xpc_sysctl;
@@ -178,13 +168,10 @@ static DECLARE_COMPLETION(xpc_hb_checker
/* notification that the xpc_discovery thread has exited */
static DECLARE_COMPLETION(xpc_discovery_exited);
-
static struct timer_list xpc_hb_timer;
-
static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
-
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_reboot_notifier = {
.notifier_call = xpc_system_reboot,
@@ -195,25 +182,22 @@ static struct notifier_block xpc_die_not
.notifier_call = xpc_system_die,
};
-
/*
* Timer function to enforce the timelimit on the partition disengage request.
*/
static void
xpc_timeout_partition_disengage_request(unsigned long data)
{
- struct xpc_partition *part = (struct xpc_partition *) data;
-
+ struct xpc_partition *part = (struct xpc_partition *)data;
DBUG_ON(jiffies < part->disengage_request_timeout);
- (void) xpc_partition_disengaged(part);
+ (void)xpc_partition_disengaged(part);
DBUG_ON(part->disengage_request_timeout != 0);
DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)) != 0);
}
-
/*
* Notify the heartbeat check thread that an IRQ has been received.
*/
@@ -225,7 +209,6 @@ xpc_act_IRQ_handler(int irq, void *dev_i
return IRQ_HANDLED;
}
-
/*
* Timer to produce the heartbeat. The timer structures function is
* already set when this is initially called. A tunable is used to
@@ -244,7 +227,6 @@ xpc_hb_beater(unsigned long dummy)
add_timer(&xpc_hb_timer);
}
-
/*
* This thread is responsible for nearly all of the partition
* activation/deactivation.
@@ -254,8 +236,7 @@ xpc_hb_checker(void *ignore)
{
int last_IRQ_count = 0;
int new_IRQ_count;
- int force_IRQ=0;
-
+ int force_IRQ = 0;
/* this thread was marked active by xpc_hb_init() */
@@ -267,14 +248,13 @@ xpc_hb_checker(void *ignore)
xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
xpc_hb_beater(0);
- while (!(volatile int) xpc_exiting) {
+ while (!(volatile int)xpc_exiting) {
dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
"been received\n",
- (int) (xpc_hb_check_timeout - jiffies),
+ (int)(xpc_hb_check_timeout - jiffies),
atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
-
/* checking of remote heartbeats is skewed by IRQ handling */
if (jiffies >= xpc_hb_check_timeout) {
dev_dbg(xpc_part, "checking remote heartbeats\n");
@@ -288,7 +268,6 @@ xpc_hb_checker(void *ignore)
force_IRQ = 1;
}
-
/* check for outstanding IRQs */
new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
@@ -300,30 +279,30 @@ xpc_hb_checker(void *ignore)
last_IRQ_count += xpc_identify_act_IRQ_sender();
if (last_IRQ_count < new_IRQ_count) {
/* retry once to help avoid missing AMO */
- (void) xpc_identify_act_IRQ_sender();
+ (void)xpc_identify_act_IRQ_sender();
}
last_IRQ_count = new_IRQ_count;
xpc_hb_check_timeout = jiffies +
- (xpc_hb_check_interval * HZ);
+ (xpc_hb_check_interval * HZ);
}
/* wait for IRQ or timeout */
- (void) wait_event_interruptible(xpc_act_IRQ_wq,
- (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
- jiffies >= xpc_hb_check_timeout ||
- (volatile int) xpc_exiting));
+ (void)wait_event_interruptible(xpc_act_IRQ_wq,
+ (last_IRQ_count <
+ atomic_read(&xpc_act_IRQ_rcvd)
+ || jiffies >=
+ xpc_hb_check_timeout
+ || (volatile int)xpc_exiting));
}
dev_dbg(xpc_part, "heartbeat checker is exiting\n");
-
/* mark this thread as having exited */
complete(&xpc_hb_checker_exited);
return 0;
}
-
/*
* This thread will attempt to discover other partitions to activate
* based on info provided by SAL. This new thread is short lived and
@@ -343,7 +322,6 @@ xpc_initiate_discovery(void *ignore)
return 0;
}
-
/*
* Establish first contact with the remote partititon. This involves pulling
* the XPC per partition variables from the remote partition and waiting for
@@ -354,7 +332,6 @@ xpc_make_first_contact(struct xpc_partit
{
enum xp_retval ret;
-
while ((ret = xpc_pull_remote_vars_part(part)) != xpSuccess) {
if (ret != xpRetry) {
XPC_DEACTIVATE_PARTITION(part, ret);
@@ -365,7 +342,7 @@ xpc_make_first_contact(struct xpc_partit
"partition %d\n", XPC_PARTID(part));
/* wait a 1/4 of a second or so */
- (void) msleep_interruptible(250);
+ (void)msleep_interruptible(250);
if (part->act_state == XPC_P_AS_DEACTIVATING) {
return part->reason;
@@ -375,7 +352,6 @@ xpc_make_first_contact(struct xpc_partit
return xpc_mark_partition_active(part);
}
-
/*
* The first kthread assigned to a newly activated partition is the one
* created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
@@ -392,12 +368,11 @@ static void
xpc_channel_mgr(struct xpc_partition *part)
{
while (part->act_state != XPC_P_AS_DEACTIVATING ||
- atomic_read(&part->nchannels_active) > 0 ||
- !xpc_partition_disengaged(part)) {
+ atomic_read(&part->nchannels_active) > 0 ||
+ !xpc_partition_disengaged(part)) {
xpc_process_channel_activity(part);
-
/*
* Wait until we've been requested to activate kthreads or
* all of the channel's message queues have been torn down or
@@ -412,18 +387,25 @@ xpc_channel_mgr(struct xpc_partition *pa
* wake him up.
*/
atomic_dec(&part->channel_mgr_requests);
- (void) wait_event_interruptible(part->channel_mgr_wq,
- (atomic_read(&part->channel_mgr_requests) > 0 ||
- (volatile u64) part->local_IPI_amo != 0 ||
- ((volatile u8) part->act_state ==
- XPC_P_AS_DEACTIVATING &&
- atomic_read(&part->nchannels_active) == 0 &&
- xpc_partition_disengaged(part))));
+ (void)wait_event_interruptible(part->channel_mgr_wq,
+ (atomic_read
+ (&part->channel_mgr_requests) >
+ 0
+ || (volatile u64)part->
+ local_IPI_amo != 0
+ || ((volatile u8)part->
+ act_state ==
+ XPC_P_AS_DEACTIVATING
+ && atomic_read(&part->
+ nchannels_active)
+ == 0
+ &&
+ xpc_partition_disengaged
+ (part))));
atomic_set(&part->channel_mgr_requests, 1);
}
}
-
/*
* When XPC HB determines that a partition has come up, it will create a new
* kthread and that kthread will call this function to attempt to set up the
@@ -457,7 +439,7 @@ xpc_partition_up(struct xpc_partition *p
* has been dismantled.
*/
- (void) xpc_part_ref(part); /* this will always succeed */
+ (void)xpc_part_ref(part); /* this will always succeed */
if (xpc_make_first_contact(part) == xpSuccess) {
xpc_channel_mgr(part);
@@ -468,17 +450,15 @@ xpc_partition_up(struct xpc_partition *p
xpc_teardown_infrastructure(part);
}
-
static int
xpc_activating(void *__partid)
{
- short partid = (u64) __partid;
+ short partid = (u64)__partid;
struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags;
- struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
int ret;
-
DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
spin_lock_irqsave(&part->lock, irq_flags);
@@ -508,7 +488,7 @@ xpc_activating(void *__partid)
ret = sched_setscheduler(current, SCHED_FIFO, ¶m);
if (ret != 0) {
dev_warn(xpc_part, "unable to set pid %d to a realtime "
- "priority, ret=%d\n", current->pid, ret);
+ "priority, ret=%d\n", current->pid, ret);
}
/* allow this thread and its children to run on any CPU */
@@ -522,8 +502,7 @@ xpc_activating(void *__partid)
ret = xpc_register_remote_amos(part);
if (ret != xpSuccess) {
dev_warn(xpc_part, "xpc_activating() failed to register remote "
- "AMOs for partition %d, ret=%d\n", partid,
- ret);
+ "AMOs for partition %d, ret=%d\n", partid, ret);
spin_lock_irqsave(&part->lock, irq_flags);
part->act_state = XPC_P_AS_INACTIVE;
@@ -536,7 +515,6 @@ xpc_activating(void *__partid)
xpc_allow_hb(partid, xpc_vars);
xpc_IPI_send_activated(part);
-
/*
* xpc_partition_up() holds this thread and marks this partition as
* XPC_P_AS_ACTIVE by calling xpc_hb_mark_active().
@@ -556,7 +534,6 @@ xpc_activating(void *__partid)
return 0;
}
-
void
xpc_activate_partition(struct xpc_partition *part)
{
@@ -564,7 +541,6 @@ xpc_activate_partition(struct xpc_partit
unsigned long irq_flags;
pid_t pid;
-
spin_lock_irqsave(&part->lock, irq_flags);
DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
@@ -574,7 +550,7 @@ xpc_activate_partition(struct xpc_partit
spin_unlock_irqrestore(&part->lock, irq_flags);
- pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
+ pid = kernel_thread(xpc_activating, (void *)((u64)partid), 0);
if (unlikely(pid <= 0)) {
spin_lock_irqsave(&part->lock, irq_flags);
@@ -584,7 +560,6 @@ xpc_activate_partition(struct xpc_partit
}
}
-
/*
* Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
* partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
@@ -604,10 +579,9 @@ xpc_activate_partition(struct xpc_partit
irqreturn_t
xpc_notify_IRQ_handler(int irq, void *dev_id)
{
- short partid = (short) (u64) dev_id;
+ short partid = (short)(u64)dev_id;
struct xpc_partition *part = &xpc_partitions[partid];
-
DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
if (xpc_part_ref(part)) {
@@ -618,7 +592,6 @@ xpc_notify_IRQ_handler(int irq, void *de
return IRQ_HANDLED;
}
-
/*
* Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
* because the write to their associated IPI amo completed after the IRQ/IPI
@@ -631,13 +604,12 @@ xpc_dropped_IPI_check(struct xpc_partiti
xpc_check_for_channel_activity(part);
part->dropped_IPI_timer.expires = jiffies +
- XPC_DROPPED_IPI_WAIT_INTERVAL;
+ XPC_DROPPED_IPI_WAIT_INTERVAL;
add_timer(&part->dropped_IPI_timer);
xpc_part_deref(part);
}
}
-
void
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{
@@ -645,7 +617,6 @@ xpc_activate_kthreads(struct xpc_channel
int assigned = atomic_read(&ch->kthreads_assigned);
int wakeup;
-
DBUG_ON(needed <= 0);
if (idle > 0) {
@@ -676,7 +647,6 @@ xpc_activate_kthreads(struct xpc_channel
xpc_create_kthreads(ch, needed, 0);
}
-
/*
* This function is where XPC's kthreads wait for messages to deliver.
*/
@@ -686,15 +656,14 @@ xpc_kthread_waitmsgs(struct xpc_partitio
do {
/* deliver messages to their intended recipients */
- while ((volatile s64) ch->w_local_GP.get <
- (volatile s64) ch->w_remote_GP.put &&
- !((volatile u32) ch->flags &
- XPC_C_DISCONNECTING)) {
+ while ((volatile s64)ch->w_local_GP.get <
+ (volatile s64)ch->w_remote_GP.put &&
+ !((volatile u32)ch->flags & XPC_C_DISCONNECTING)) {
xpc_deliver_msg(ch);
}
if (atomic_inc_return(&ch->kthreads_idle) >
- ch->kthreads_idle_limit) {
+ ch->kthreads_idle_limit) {
/* too many idle kthreads on this channel */
atomic_dec(&ch->kthreads_idle);
break;
@@ -703,18 +672,20 @@ xpc_kthread_waitmsgs(struct xpc_partitio
dev_dbg(xpc_chan, "idle kthread calling "
"wait_event_interruptible_exclusive()\n");
- (void) wait_event_interruptible_exclusive(ch->idle_wq,
- ((volatile s64) ch->w_local_GP.get <
- (volatile s64) ch->w_remote_GP.put ||
- ((volatile u32) ch->flags &
- XPC_C_DISCONNECTING)));
+ (void)wait_event_interruptible_exclusive(ch->idle_wq,
+ ((volatile s64)ch->
+ w_local_GP.get <
+ (volatile s64)ch->
+ w_remote_GP.put
+ || ((volatile u32)ch->
+ flags &
+ XPC_C_DISCONNECTING)));
atomic_dec(&ch->kthreads_idle);
- } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING));
+ } while (!((volatile u32)ch->flags & XPC_C_DISCONNECTING));
}
-
static int
xpc_daemonize_kthread(void *args)
{
@@ -725,7 +696,6 @@ xpc_daemonize_kthread(void *args)
int n_needed;
unsigned long irq_flags;
-
daemonize("xpc%02dc%d", partid, ch_number);
dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
@@ -756,8 +726,7 @@ xpc_daemonize_kthread(void *args)
* need one less than total #of messages to deliver.
*/
n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
- if (n_needed > 0 &&
- !(ch->flags & XPC_C_DISCONNECTING)) {
+ if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) {
xpc_activate_kthreads(ch, n_needed);
}
} else {
@@ -771,7 +740,7 @@ xpc_daemonize_kthread(void *args)
spin_lock_irqsave(&ch->lock, irq_flags);
if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
- !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
+ !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -798,7 +767,6 @@ xpc_daemonize_kthread(void *args)
return 0;
}
-
/*
* For each partition that XPC has established communications with, there is
* a minimum of one kernel thread assigned to perform any operation that
@@ -813,14 +781,13 @@ xpc_daemonize_kthread(void *args)
*/
void
xpc_create_kthreads(struct xpc_channel *ch, int needed,
- int ignore_disconnecting)
+ int ignore_disconnecting)
{
unsigned long irq_flags;
pid_t pid;
u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
struct xpc_partition *part = &xpc_partitions[ch->partid];
-
while (needed-- > 0) {
/*
@@ -832,7 +799,7 @@ xpc_create_kthreads(struct xpc_channel *
if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
/* kthreads assigned had gone to zero */
BUG_ON(!(ch->flags &
- XPC_C_DISCONNECTINGCALLOUT_MADE));
+ XPC_C_DISCONNECTINGCALLOUT_MADE));
break;
}
@@ -843,10 +810,10 @@ xpc_create_kthreads(struct xpc_channel *
if (atomic_inc_return(&part->nchannels_engaged) == 1)
xpc_mark_partition_engaged(part);
}
- (void) xpc_part_ref(part);
+ (void)xpc_part_ref(part);
xpc_msgqueue_ref(ch);
- pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
+ pid = kernel_thread(xpc_daemonize_kthread, (void *)args, 0);
if (pid < 0) {
/* the fork failed */
@@ -869,7 +836,7 @@ xpc_create_kthreads(struct xpc_channel *
xpc_part_deref(part);
if (atomic_read(&ch->kthreads_assigned) <
- ch->kthreads_idle_limit) {
+ ch->kthreads_idle_limit) {
/*
* Flag this as an error only if we have an
* insufficient #of kthreads for the channel
@@ -877,7 +844,7 @@ xpc_create_kthreads(struct xpc_channel *
*/
spin_lock_irqsave(&ch->lock, irq_flags);
XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
- &irq_flags);
+ &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
break;
@@ -885,7 +852,6 @@ xpc_create_kthreads(struct xpc_channel *
}
}
-
void
xpc_disconnect_wait(int ch_number)
{
@@ -895,7 +861,6 @@ xpc_disconnect_wait(int ch_number)
struct xpc_channel *ch;
int wakeup_channel_mgr;
-
/* now wait for all callouts to the caller's function to cease */
for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
part = &xpc_partitions[partid];
@@ -921,7 +886,8 @@ xpc_disconnect_wait(int ch_number)
if (part->act_state != XPC_P_AS_DEACTIVATING) {
spin_lock(&part->IPI_lock);
XPC_SET_IPI_FLAGS(part->local_IPI_amo,
- ch->number, ch->delayed_IPI_flags);
+ ch->number,
+ ch->delayed_IPI_flags);
spin_unlock(&part->IPI_lock);
wakeup_channel_mgr = 1;
}
@@ -939,7 +905,6 @@ xpc_disconnect_wait(int ch_number)
}
}
-
static void
xpc_do_exit(enum xp_retval reason)
{
@@ -948,7 +913,6 @@ xpc_do_exit(enum xp_retval reason)
struct xpc_partition *part;
unsigned long printmsg_time, disengage_request_timeout = 0;
-
/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
DBUG_ON(xpc_exiting == 1);
@@ -969,10 +933,8 @@ xpc_do_exit(enum xp_retval reason)
/* wait for the heartbeat checker thread to exit */
wait_for_completion(&xpc_hb_checker_exited);
-
/* sleep for a 1/3 of a second or so */
- (void) msleep_interruptible(300);
-
+ (void)msleep_interruptible(300);
/* wait for all partitions to become inactive */
@@ -982,12 +944,11 @@ xpc_do_exit(enum xp_retval reason)
do {
active_part_count = 0;
- for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID;
- partid++) {
+ for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
part = &xpc_partitions[partid];
if (xpc_partition_disengaged(part) &&
- part->act_state == XPC_P_AS_INACTIVE) {
+ part->act_state == XPC_P_AS_INACTIVE) {
xpc_unregister_remote_amos(part);
continue;
}
@@ -997,47 +958,46 @@ xpc_do_exit(enum xp_retval reason)
XPC_DEACTIVATE_PARTITION(part, reason);
if (part->disengage_request_timeout >
- disengage_request_timeout) {
+ disengage_request_timeout) {
disengage_request_timeout =
- part->disengage_request_timeout;
+ part->disengage_request_timeout;
}
}
if (xpc_any_partition_engaged()) {
if (time_after(jiffies, printmsg_time)) {
dev_info(xpc_part, "waiting for remote "
- "partitions to disengage, timeout in "
- "%ld seconds\n",
- (disengage_request_timeout - jiffies)
- / HZ);
+ "partitions to disengage, timeout in "
+ "%ld seconds\n",
+ (disengage_request_timeout - jiffies)
+ / HZ);
printmsg_time = jiffies +
- (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
+ (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
printed_waiting_msg = 1;
}
} else if (active_part_count > 0) {
if (printed_waiting_msg) {
dev_info(xpc_part, "waiting for local partition"
- " to disengage\n");
+ " to disengage\n");
printed_waiting_msg = 0;
}
} else {
if (!xpc_disengage_request_timedout) {
dev_info(xpc_part, "all partitions have "
- "disengaged\n");
+ "disengaged\n");
}
break;
}
/* sleep for a 1/3 of a second or so */
- (void) msleep_interruptible(300);
+ (void)msleep_interruptible(300);
} while (1);
DBUG_ON(xpc_any_partition_engaged());
-
/* indicate to others that our reserved page is uninitialized */
xpc_rsvd_page->vars_pa = 0;
@@ -1047,17 +1007,16 @@ xpc_do_exit(enum xp_retval reason)
if (reason == xpUnloading) {
/* take ourselves off of the reboot_notifier_list */
- (void) unregister_reboot_notifier(&xpc_reboot_notifier);
+ (void)unregister_reboot_notifier(&xpc_reboot_notifier);
/* take ourselves off of the die_notifier list */
- (void) unregister_die_notifier(&xpc_die_notifier);
+ (void)unregister_die_notifier(&xpc_die_notifier);
}
/* close down protections for IPI operations */
xp_disallow_IPI_ops();
xp_change_memprotect_shub_wars_1_1(XP_MEMPROT_DISALLOW_ALL);
-
/* clear the interface to XPC's functions */
xpc_clear_interface();
@@ -1068,7 +1027,6 @@ xpc_do_exit(enum xp_retval reason)
kfree(xpc_remote_copy_buffer_base);
}
-
/*
* This function is called when the system is being rebooted.
*/
@@ -1077,7 +1035,6 @@ xpc_system_reboot(struct notifier_block
{
enum xp_retval reason;
-
switch (event) {
case SYS_RESTART:
reason = xpSystemReboot;
@@ -1096,7 +1053,6 @@ xpc_system_reboot(struct notifier_block
return NOTIFY_DONE;
}
-
#ifdef CONFIG_IA64
/*
* Notify other partitions to disengage from all references to our memory.
@@ -1108,17 +1064,15 @@ xpc_die_disengage(void)
short partid;
long time, printmsg_time, disengage_request_timeout;
-
/* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1;
- xpc_disallow_all_hbs(xpc_vars); /* indicate we're deactivated */
+ xpc_disallow_all_hbs(xpc_vars); /* indicate we're deactivated */
for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
part = &xpc_partitions[partid];
- if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
- remote_vars_version)) {
+ if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
/* just in case it was left set by an earlier XPC */
xpc_clear_partition_engaged(partid);
@@ -1126,7 +1080,7 @@ xpc_die_disengage(void)
}
if (xpc_partition_engaged(partid) ||
- part->act_state != XPC_P_AS_INACTIVE) {
+ part->act_state != XPC_P_AS_INACTIVE) {
xpc_request_partition_disengage(part);
xpc_mark_partition_disengaged(part);
xpc_IPI_send_disengage(part);
@@ -1135,9 +1089,9 @@ xpc_die_disengage(void)
time = rtc_time();
printmsg_time = time +
- (XPC_DISENGAGE_PRINTMSG_INTERVAL * xp_rtc_cycles_per_second);
+ (XPC_DISENGAGE_PRINTMSG_INTERVAL * xp_rtc_cycles_per_second);
disengage_request_timeout = time +
- (xpc_disengage_request_timelimit * xp_rtc_cycles_per_second);
+ (xpc_disengage_request_timelimit * xp_rtc_cycles_per_second);
/* wait for all other partitions to disengage from us */
@@ -1153,8 +1107,8 @@ xpc_die_disengage(void)
partid++) {
if (xpc_partition_engaged(partid)) {
dev_info(xpc_part, "disengage from "
- "remote partition %d timed "
- "out\n", partid);
+ "remote partition %d timed "
+ "out\n", partid);
}
}
break;
@@ -1162,18 +1116,17 @@ xpc_die_disengage(void)
if (time >= printmsg_time) {
dev_info(xpc_part, "waiting for remote partitions to "
- "disengage, timeout in %ld seconds\n",
- (disengage_request_timeout - time) /
- xp_rtc_cycles_per_second);
+ "disengage, timeout in %ld seconds\n",
+ (disengage_request_timeout - time) /
+ xp_rtc_cycles_per_second);
printmsg_time = time +
- (XPC_DISENGAGE_PRINTMSG_INTERVAL *
- xp_rtc_cycles_per_second);
+ (XPC_DISENGAGE_PRINTMSG_INTERVAL *
+ xp_rtc_cycles_per_second);
}
}
}
#endif /* CONFIG_IA64 */
-
/*
* This function is called when the system is being restarted or halted due
* to some sort of system failure. If this is the case we need to notify the
@@ -1185,7 +1138,7 @@ xpc_die_disengage(void)
static int
xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
{
-#ifdef CONFIG_IA64 /* >>> will deal with notify_die events on X86_64 shortly */
+#ifdef CONFIG_IA64 /* >>> will deal with notify_die events on X86_64 shortly */
switch (event) {
case DIE_MACHINE_RESTART:
case DIE_MACHINE_HALT:
@@ -1220,7 +1173,6 @@ xpc_system_die(struct notifier_block *nb
return NOTIFY_DONE;
}
-
int __init
xpc_init(void)
{
@@ -1257,7 +1209,7 @@ xpc_init(void)
for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
part = &xpc_partitions[partid];
- DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part));
+ DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
part->act_IRQ_rcvd = 0;
spin_lock_init(&part->lock);
@@ -1266,8 +1218,8 @@ xpc_init(void)
init_timer(&part->disengage_request_timer);
part->disengage_request_timer.function =
- xpc_timeout_partition_disengage_request;
- part->disengage_request_timer.data = (unsigned long) part;
+ xpc_timeout_partition_disengage_request;
+ part->disengage_request_timer.data = (unsigned long)part;
part->setup_state = XPC_P_SS_UNSET;
init_waitqueue_head(&part->teardown_wq);
@@ -1294,7 +1246,7 @@ xpc_init(void)
* but rather immediately process the interrupt.
*/
ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
- "xpc hb", NULL);
+ "xpc hb", NULL);
if (ret != 0) {
dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
"errno=%d\n", -ret);
@@ -1332,7 +1284,8 @@ xpc_init(void)
buf_size = max(XPC_RP_VARS_SIZE,
XPC_RP_HEADER_SIZE + xp_sizeof_nasid_mask);
xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
- GFP_KERNEL, &xpc_remote_copy_buffer_base);
+ GFP_KERNEL,
+ &xpc_remote_copy_buffer_base);
if (xpc_remote_copy_buffer == NULL) {
dev_err(xpc_part, "could not allocate remote copy buffer\n");
@@ -1349,7 +1302,6 @@ xpc_init(void)
return -ENOMEM;
}
-
/* add ourselves to the reboot_notifier_list */
ret = register_reboot_notifier(&xpc_reboot_notifier);
if (ret != 0) {
@@ -1377,10 +1329,10 @@ xpc_init(void)
xpc_rsvd_page->vars_pa = 0;
/* take ourselves off of the reboot_notifier_list */
- (void) unregister_reboot_notifier(&xpc_reboot_notifier);
+ (void)unregister_reboot_notifier(&xpc_reboot_notifier);
/* take ourselves off of the die_notifier list */
- (void) unregister_die_notifier(&xpc_die_notifier);
+ (void)unregister_die_notifier(&xpc_die_notifier);
del_timer_sync(&xpc_hb_timer);
free_irq(SGI_XPC_ACTIVATE, NULL);
@@ -1395,7 +1347,6 @@ xpc_init(void)
return -EBUSY;
}
-
/*
* Startup a thread that will attempt to discover other partitions to
* activate based on info provided by SAL. This new thread is short
@@ -1412,7 +1363,6 @@ xpc_init(void)
return -EBUSY;
}
-
/* set the interface to point at XPC's functions */
xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
xpc_initiate_allocate, xpc_initiate_send,
@@ -1421,16 +1371,16 @@ xpc_init(void)
return 0;
}
-module_init(xpc_init);
+module_init(xpc_init);
void __exit
xpc_exit(void)
{
xpc_do_exit(xpUnloading);
}
-module_exit(xpc_exit);
+module_exit(xpc_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
@@ -1438,17 +1388,16 @@ MODULE_LICENSE("GPL");
module_param(xpc_hb_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
- "heartbeat increments.");
+ "heartbeat increments.");
module_param(xpc_hb_check_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
- "heartbeat checks.");
+ "heartbeat checks.");
module_param(xpc_disengage_request_timelimit, int, 0);
MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
- "for disengage request to complete.");
+ "for disengage request to complete.");
module_param(xpc_kdebug_ignore, int, 0);
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
- "other partitions when dropping into kdebug.");
-
+ "other partitions when dropping into kdebug.");
Index: linux-2.6/drivers/misc/xp/xpc_partition.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc_partition.c 2008-03-21 13:30:52.034175000 -0500
+++ linux-2.6/drivers/misc/xp/xpc_partition.c 2008-03-24 19:31:37.154185684 -0500
@@ -6,7 +6,6 @@
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
-
/*
* Cross Partition Communication (XPC) partition support.
*
@@ -16,7 +15,6 @@
*
*/
-
#include <linux/kernel.h>
#include <linux/sysctl.h>
#include <linux/cache.h>
@@ -32,11 +30,9 @@
#error architecture is NOT supported
#endif
-
/* XPC is exiting flag */
int xpc_exiting;
-
/* this partition's reserved page pointers */
struct xpc_rsvd_page *xpc_rsvd_page;
static u64 *xpc_part_nasids;
@@ -44,7 +40,6 @@ static u64 *xpc_mach_nasids;
struct xpc_vars *xpc_vars;
struct xpc_vars_part *xpc_vars_part;
-
/*
* For performance reasons, each entry of xpc_partitions[] is cacheline
* aligned. And xpc_partitions[] is padded with an additional entry at the
@@ -53,7 +48,6 @@ struct xpc_vars_part *xpc_vars_part;
*/
struct xpc_partition xpc_partitions[XP_NPARTITIONS + 1];
-
/*
* Generic buffer used to store a local copy of portions of a remote
* partition's reserved page (either its header and part_nasids mask,
@@ -62,7 +56,6 @@ struct xpc_partition xpc_partitions[XP_N
char *xpc_remote_copy_buffer;
void *xpc_remote_copy_buffer_base;
-
/*
* Guarantee that the kmalloc'd memory is cacheline aligned.
*/
@@ -74,7 +67,7 @@ xpc_kmalloc_cacheline_aligned(size_t siz
if (*base == NULL) {
return NULL;
}
- if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
+ if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) {
return *base;
}
kfree(*base);
@@ -84,10 +77,9 @@ xpc_kmalloc_cacheline_aligned(size_t siz
if (*base == NULL) {
return NULL;
}
- return (void *) L1_CACHE_ALIGN((u64) *base);
+ return (void *)L1_CACHE_ALIGN((u64)*base);
}
-
/*
* Given a nasid, get the physical address of the partition's reserved page
* for that nasid. This function returns 0 on any error.
@@ -103,7 +95,6 @@ xpc_get_rsvd_page_pa(int nasid)
size_t buf_len = 0;
void *buf_base = NULL;
-
while (1) {
ret = xp_get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);
@@ -120,7 +111,8 @@ xpc_get_rsvd_page_pa(int nasid)
kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
- GFP_KERNEL, &buf_base);
+ GFP_KERNEL,
+ &buf_base);
if (buf_base == NULL) {
dev_err(xpc_part, "unable to kmalloc "
"len=0x%016lx\n", buf_len);
@@ -146,7 +138,6 @@ xpc_get_rsvd_page_pa(int nasid)
return rp_pa;
}
-
/*
* Fill the partition reserved page with the information needed by
* other partitions to discover we are alive and establish initial
@@ -166,7 +157,6 @@ xpc_rsvd_page_init(void)
int disengage_request_amos;
int ret;
-
/* get the local reserved page's address */
preempt_disable();
@@ -176,7 +166,7 @@ xpc_rsvd_page_init(void)
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
return NULL;
}
- rp = (struct xpc_rsvd_page *) __va(rp_pa);
+ rp = (struct xpc_rsvd_page *)__va(rp_pa);
rp->version = XPC_RP_VERSION;
@@ -238,12 +228,11 @@ xpc_rsvd_page_init(void)
xpc_vars->act_phys_cpuid = cpu_physical_id(0);
xpc_vars->vars_part_pa = __pa(xpc_vars_part);
xpc_vars->amos_page_pa = xp_pa((u64)amos_page);
- xpc_vars->amos_page = amos_page; /* save for next load of XPC */
-
+ xpc_vars->amos_page = amos_page; /* save for next load of XPC */
/* clear xpc_vars_part */
memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
- XP_NPARTITIONS);
+ XP_NPARTITIONS);
/* initialize the activate IRQ related AMO variables */
activate_irq_amos = xpc_activate_irq_amos(XP_NPARTITIONS);
@@ -271,7 +260,6 @@ xpc_rsvd_page_init(void)
return rp;
}
-
/*
* At periodic intervals, scan through all active partitions and ensure
* their heartbeat is still active. If not, the partition is deactivated.
@@ -284,8 +272,7 @@ xpc_check_remote_hb(void)
short partid;
enum xp_retval ret;
-
- remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
+ remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
@@ -300,7 +287,7 @@ xpc_check_remote_hb(void)
part = &xpc_partitions[partid];
if (part->act_state == XPC_P_AS_INACTIVE ||
- part->act_state == XPC_P_AS_DEACTIVATING) {
+ part->act_state == XPC_P_AS_DEACTIVATING) {
continue;
}
@@ -320,8 +307,8 @@ xpc_check_remote_hb(void)
remote_vars->heartbeat_offline);
if (((remote_vars->heartbeat == part->last_heartbeat) &&
- (remote_vars->heartbeat_offline == 0)) ||
- !xpc_hb_allowed(xp_partition_id, remote_vars)) {
+ (remote_vars->heartbeat_offline == 0)) ||
+ !xpc_hb_allowed(xp_partition_id, remote_vars)) {
XPC_DEACTIVATE_PARTITION(part, xpNoHeartbeat);
continue;
@@ -331,7 +318,6 @@ xpc_check_remote_hb(void)
}
}
-
/*
* Get a copy of a portion of the remote partition's rsvd page.
*
@@ -341,12 +327,11 @@ xpc_check_remote_hb(void)
*/
static enum xp_retval
xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
- struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
+ struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
{
int i;
enum xp_retval ret;
-
/* get the reserved page's physical address */
*remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
@@ -354,7 +339,6 @@ xpc_get_remote_rp(int nasid, u64 *discov
return xpNoRsvdPageAddr;
}
-
/* pull over the reserved page header and part_nasids mask */
ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa,
XPC_RP_HEADER_SIZE + xp_sizeof_nasid_mask);
@@ -362,25 +346,22 @@ xpc_get_remote_rp(int nasid, u64 *discov
return ret;
}
-
if (discovered_nasids != NULL) {
u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
-
for (i = 0; i < xp_nasid_mask_words(); i++) {
discovered_nasids[i] |= remote_part_nasids[i];
}
}
if (XPC_VERSION_MAJOR(remote_rp->version) !=
- XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
+ XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
return xpBadVersion;
}
return xpSuccess;
}
-
/*
* Get a copy of the remote partition's XPC variables from the reserved page.
*
@@ -404,7 +385,7 @@ xpc_get_remote_vars(u64 remote_vars_pa,
}
if (XPC_VERSION_MAJOR(remote_vars->version) !=
- XPC_VERSION_MAJOR(XPC_V_VERSION)) {
+ XPC_VERSION_MAJOR(XPC_V_VERSION)) {
return xpBadVersion;
}
@@ -418,14 +399,13 @@ xpc_get_remote_vars(u64 remote_vars_pa,
return xpSuccess;
}
-
/*
* Update the remote partition's info.
*/
static void
xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
- struct timespec *remote_rp_stamp, u64 remote_rp_pa,
- u64 remote_vars_pa, struct xpc_vars *remote_vars)
+ struct timespec *remote_rp_stamp, u64 remote_rp_pa,
+ u64 remote_vars_pa, struct xpc_vars *remote_vars)
{
part->remote_rp_version = remote_rp_version;
dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
@@ -472,7 +452,6 @@ xpc_update_partition_info(struct xpc_par
part->remote_vars_version);
}
-
/*
* Prior code has determined the nasid which generated an IPI. Inspect
* that nasid to determine if its partition needs to be activated or
@@ -502,15 +481,14 @@ xpc_identify_act_IRQ_req(int nasid)
struct xpc_partition *part;
enum xp_retval ret;
-
/* pull over the reserved page structure */
- remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
+ remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
if (ret != xpSuccess) {
dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
- "which sent interrupt, reason=%d\n", nasid, ret);
+ "which sent interrupt, reason=%d\n", nasid, ret);
return;
}
@@ -522,12 +500,12 @@ xpc_identify_act_IRQ_req(int nasid)
/* pull over the cross partition variables */
- remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
+ remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
if (ret != xpSuccess) {
dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
- "which sent interrupt, reason=%d\n", nasid, ret);
+ "which sent interrupt, reason=%d\n", nasid, ret);
return;
}
@@ -537,15 +515,15 @@ xpc_identify_act_IRQ_req(int nasid)
part->act_IRQ_rcvd++;
dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
- "%" U64_ELL "d\n", (int) nasid, (int) partid,
+ "%" U64_ELL "d\n", (int)nasid, (int)partid,
part->act_IRQ_rcvd, remote_vars->heartbeat);
if (xpc_partition_disengaged(part) &&
- part->act_state == XPC_P_AS_INACTIVE) {
+ part->act_state == XPC_P_AS_INACTIVE) {
xpc_update_partition_info(part, remote_rp_version,
- &remote_rp_stamp, remote_rp_pa,
- remote_vars_pa, remote_vars);
+ &remote_rp_stamp, remote_rp_pa,
+ remote_vars_pa, remote_vars);
if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
if (xpc_partition_disengage_requested(partid)) {
@@ -569,16 +547,15 @@ xpc_identify_act_IRQ_req(int nasid)
if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
- remote_vars_version));
+ remote_vars_version));
if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
- version));
+ version));
/* see if the other side rebooted */
if (part->remote_amos_page_pa ==
- remote_vars->amos_page_pa &&
- xpc_hb_allowed(xp_partition_id,
- remote_vars)) {
+ remote_vars->amos_page_pa &&
+ xpc_hb_allowed(xp_partition_id, remote_vars)) {
/* doesn't look that way, so ignore the IPI */
return;
}
@@ -590,8 +567,8 @@ xpc_identify_act_IRQ_req(int nasid)
*/
xpc_update_partition_info(part, remote_rp_version,
- &remote_rp_stamp, remote_rp_pa,
- remote_vars_pa, remote_vars);
+ &remote_rp_stamp, remote_rp_pa,
+ remote_vars_pa, remote_vars);
part->reactivate_nasid = nasid;
XPC_DEACTIVATE_PARTITION(part, xpReactivating);
return;
@@ -611,15 +588,15 @@ xpc_identify_act_IRQ_req(int nasid)
xpc_clear_partition_disengage_request(partid);
xpc_update_partition_info(part, remote_rp_version,
- &remote_rp_stamp, remote_rp_pa,
- remote_vars_pa, remote_vars);
+ &remote_rp_stamp, remote_rp_pa,
+ remote_vars_pa, remote_vars);
reactivate = 1;
} else {
DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
- &remote_rp_stamp);
+ &remote_rp_stamp);
if (stamp_diff != 0) {
DBUG_ON(stamp_diff >= 0);
@@ -632,14 +609,15 @@ xpc_identify_act_IRQ_req(int nasid)
DBUG_ON(xpc_partition_disengage_requested(partid));
xpc_update_partition_info(part, remote_rp_version,
- &remote_rp_stamp, remote_rp_pa,
- remote_vars_pa, remote_vars);
+ &remote_rp_stamp,
+ remote_rp_pa, remote_vars_pa,
+ remote_vars);
reactivate = 1;
}
}
if (part->disengage_request_timeout > 0 &&
- !xpc_partition_disengaged(part)) {
+ !xpc_partition_disengaged(part)) {
/* still waiting on other side to disengage from us */
return;
}
@@ -649,12 +627,11 @@ xpc_identify_act_IRQ_req(int nasid)
XPC_DEACTIVATE_PARTITION(part, xpReactivating);
} else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
- xpc_partition_disengage_requested(partid)) {
+ xpc_partition_disengage_requested(partid)) {
XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
}
}
-
/*
* Loop through the activation AMO variables and process any bits
* which are set. Each bit indicates a nasid sending a partition
@@ -669,13 +646,12 @@ xpc_identify_act_IRQ_sender(void)
int w_index, b_index;
u64 *amo_va;
u64 nasid_mask;
- u64 nasid; /* remote nasid */
+ u64 nasid; /* remote nasid */
int n_IRQs_detected = 0;
amo_va = (u64 *)((u64)xpc_vars->amos_page +
- xpc_activate_irq_amos(xpc_vars->npartitions) *
- xp_sizeof_amo);
-
+ xpc_activate_irq_amos(xpc_vars->npartitions) *
+ xp_sizeof_amo);
/* scan through activation AMO variables looking for non-zero entries */
for (w_index = 0; w_index < xp_nasid_mask_words(); w_index++) {
@@ -685,8 +661,8 @@ xpc_identify_act_IRQ_sender(void)
}
ret = xp_get_amo(amo_va, XP_AMO_CLEAR, &nasid_mask);
- BUG_ON(ret != xpSuccess); /* should never happen */
- amo_va = (u64 *)((u64)amo_va + xp_sizeof_amo); /* next amo */
+ BUG_ON(ret != xpSuccess); /* should never happen */
+ amo_va = (u64 *)((u64)amo_va + xp_sizeof_amo); /* next amo */
if (nasid_mask == 0) {
/* no IRQs from nasids in this variable */
continue;
@@ -695,7 +671,6 @@ xpc_identify_act_IRQ_sender(void)
dev_dbg(xpc_part, "AMO[%d] gave back 0x%" U64_ELL "x\n",
w_index, nasid_mask);
-
/*
* If any nasid(s) in mask have been added to the machine
* since our partition was reset, this will retain the
@@ -704,7 +679,6 @@ xpc_identify_act_IRQ_sender(void)
*/
xpc_mach_nasids[w_index] |= nasid_mask;
-
/* locate the nasid(s) which sent interrupts */
for (b_index = 0; b_index < BITS_PER_LONG; b_index++) {
@@ -720,7 +694,6 @@ xpc_identify_act_IRQ_sender(void)
return n_IRQs_detected;
}
-
/*
* See if the other side has responded to a partition disengage request
* from us.
@@ -731,7 +704,6 @@ xpc_partition_disengaged(struct xpc_part
short partid = XPC_PARTID(part);
int disengaged;
-
disengaged = (xpc_partition_engaged(partid) == 0);
if (part->disengage_request_timeout) {
if (!disengaged) {
@@ -746,7 +718,7 @@ xpc_partition_disengaged(struct xpc_part
*/
dev_info(xpc_part, "disengage from remote partition %d "
- "timed out\n", partid);
+ "timed out\n", partid);
xpc_disengage_request_timedout = 1;
xpc_clear_partition_engaged(partid);
disengaged = 1;
@@ -756,11 +728,11 @@ xpc_partition_disengaged(struct xpc_part
/* cancel the timer function, provided it's not us */
if (!in_interrupt()) {
del_singleshot_timer_sync(&part->
- disengage_request_timer);
+ disengage_request_timer);
}
DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
- part->act_state != XPC_P_AS_INACTIVE);
+ part->act_state != XPC_P_AS_INACTIVE);
if (part->act_state != XPC_P_AS_INACTIVE) {
xpc_wakeup_channel_mgr(part);
}
@@ -772,7 +744,6 @@ xpc_partition_disengaged(struct xpc_part
return disengaged;
}
-
/*
* Mark specified partition as active.
*/
@@ -782,7 +753,6 @@ xpc_mark_partition_active(struct xpc_par
unsigned long irq_flags;
enum xp_retval ret;
-
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
spin_lock_irqsave(&part->lock, irq_flags);
@@ -798,17 +768,15 @@ xpc_mark_partition_active(struct xpc_par
return ret;
}
-
/*
* Notify XPC that the partition is down.
*/
void
xpc_deactivate_partition(const int line, struct xpc_partition *part,
- enum xp_retval reason)
+ enum xp_retval reason)
{
unsigned long irq_flags;
-
spin_lock_irqsave(&part->lock, irq_flags);
if (part->act_state == XPC_P_AS_INACTIVE) {
@@ -822,7 +790,7 @@ xpc_deactivate_partition(const int line,
}
if (part->act_state == XPC_P_AS_DEACTIVATING) {
if ((part->reason == xpUnloading && reason != xpUnloading) ||
- reason == xpReactivating) {
+ reason == xpReactivating) {
XPC_SET_REASON(part, reason, line);
}
spin_unlock_irqrestore(&part->lock, irq_flags);
@@ -840,9 +808,9 @@ xpc_deactivate_partition(const int line,
/* set a timelimit on the disengage request */
part->disengage_request_timeout = jiffies +
- (xpc_disengage_request_timelimit * HZ);
+ (xpc_disengage_request_timelimit * HZ);
part->disengage_request_timer.expires =
- part->disengage_request_timeout;
+ part->disengage_request_timeout;
add_timer(&part->disengage_request_timer);
}
@@ -852,7 +820,6 @@ xpc_deactivate_partition(const int line,
xpc_partition_going_down(part, reason);
}
-
/*
* Mark specified partition as inactive.
*/
@@ -861,7 +828,6 @@ xpc_mark_partition_inactive(struct xpc_p
{
unsigned long irq_flags;
-
dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
XPC_PARTID(part));
@@ -871,7 +837,6 @@ xpc_mark_partition_inactive(struct xpc_p
part->remote_rp_pa = 0;
}
-
/*
* Register the remote partition's AMOs so any errors within that address
* range can be handled and cleaned up should the remote partition go down.
@@ -917,7 +882,6 @@ xpc_unregister_remote_amos(struct xpc_pa
spin_unlock_irqrestore(&part->lock, irq_flags);
}
-
/*
* SAL has provided a partition and machine mask. The partition mask
* contains a bit for each even nasid in our partition. The machine
@@ -945,15 +909,13 @@ xpc_discovery(void)
u64 *discovered_nasids;
enum xp_retval ret;
-
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
xp_sizeof_nasid_mask,
GFP_KERNEL, &remote_rp_base);
if (remote_rp == NULL) {
return;
}
- remote_vars = (struct xpc_vars *) remote_rp;
-
+ remote_vars = (struct xpc_vars *)remote_rp;
discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words(),
GFP_KERNEL);
@@ -962,7 +924,7 @@ xpc_discovery(void)
return;
}
- rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
+ rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
/*
* The term 'region' in this context refers to the minimum number of
@@ -985,23 +947,21 @@ xpc_discovery(void)
for (region = 0; region < max_regions; region++) {
- if ((volatile int) xpc_exiting) {
+ if ((volatile int)xpc_exiting) {
break;
}
dev_dbg(xpc_part, "searching region %d\n", region);
for (nasid = (region * region_size * 2);
- nasid < ((region + 1) * region_size * 2);
- nasid += 2) {
+ nasid < ((region + 1) * region_size * 2); nasid += 2) {
- if ((volatile int) xpc_exiting) {
+ if ((volatile int)xpc_exiting) {
break;
}
dev_dbg(xpc_part, "checking nasid %d\n", nasid);
-
if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
dev_dbg(xpc_part, "PROM indicates Nasid %d is "
"part of the local partition; skipping "
@@ -1023,11 +983,10 @@ xpc_discovery(void)
continue;
}
-
/* pull over the reserved page structure */
ret = xpc_get_remote_rp(nasid, discovered_nasids,
- remote_rp, &remote_rp_pa);
+ remote_rp, &remote_rp_pa);
if (ret != xpSuccess) {
dev_dbg(xpc_part, "unable to get reserved page "
"from nasid %d, reason=%d\n", nasid,
@@ -1068,11 +1027,11 @@ xpc_discovery(void)
ret = xpc_register_remote_amos(part);
if (ret != xpSuccess) {
dev_warn(xpc_part, "xpc_discovery() failed to "
- "register remote AMOs for partition %d,"
- "ret=%d\n", partid, ret);
+ "register remote AMOs for partition %d,"
+ "ret=%d\n", partid, ret);
XPC_SET_REASON(part, xpPhysAddrRegFailed,
- __LINE__);
+ __LINE__);
break;
}
@@ -1088,9 +1047,9 @@ xpc_discovery(void)
remote_vars->act_phys_cpuid);
if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
- version)) {
+ version)) {
part->remote_amos_page_pa =
- remote_vars->amos_page_pa;
+ remote_vars->amos_page_pa;
xpc_mark_partition_disengaged(part);
xpc_cancel_partition_disengage_request(part);
}
@@ -1102,7 +1061,6 @@ xpc_discovery(void)
kfree(remote_rp_base);
}
-
/*
* Given a partid, get the nasids owned by that partition from the
* remote partition's reserved page.
@@ -1113,7 +1071,6 @@ xpc_initiate_partid_to_nasids(short part
struct xpc_partition *part;
u64 part_nasid_pa;
-
part = &xpc_partitions[partid];
if (part->remote_rp_pa == 0) {
return xpPartitionDown;
@@ -1121,9 +1078,8 @@ xpc_initiate_partid_to_nasids(short part
memset(nasid_mask, 0, xp_sizeof_nasid_mask);
- part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
+ part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa,
xp_sizeof_nasid_mask);
}
-
Index: linux-2.6/drivers/misc/xp/xpnet.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpnet.c 2008-03-21 13:30:52.034175000 -0500
+++ linux-2.6/drivers/misc/xp/xpnet.c 2008-03-24 19:31:37.154185684 -0500
@@ -6,7 +6,6 @@
* Copyright (C) 1999,2001-2008 Silicon Graphics, Inc. All rights reserved.
*/
-
/*
* Cross Partition Network Interface (XPNET) support
*
@@ -21,7 +20,6 @@
*
*/
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -37,7 +35,6 @@
#include <asm/atomic.h>
#include "xp.h"
-
/*
* The message payload transferred by XPC.
*
@@ -76,7 +73,6 @@ struct xpnet_message {
#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE))
#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
-
#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
@@ -88,9 +84,9 @@ struct xpnet_message {
#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
-#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */
-#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */
-#define XPNET_MAGIC 0x88786984 /* "XNET" */
+#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */
+#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */
+#define XPNET_MAGIC 0x88786984 /* "XNET" */
#define XPNET_VALID_MSG(_m) \
((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
@@ -98,7 +94,6 @@ struct xpnet_message {
#define XPNET_DEVICE_NAME "xp0"
-
/*
* When messages are queued with xpc_send_notify, a kmalloc'd buffer
* of the following type is passed as a notification cookie. When the
@@ -141,14 +136,12 @@ static DEFINE_SPINLOCK(xpnet_broadcast_l
/* 32KB has been determined to be the ideal */
#define XPNET_DEF_MTU (0x8000UL)
-
/*
* The partid is encapsulated in the MAC address beginning in the following
* octet.
*/
#define XPNET_PARTID_OCTET 2 /* consists of 2 octets total */
-
/* Define the XPNET debug device structures to be used with dev_dbg() et al */
struct device_driver xpnet_dbg_name = {
@@ -156,7 +149,7 @@ struct device_driver xpnet_dbg_name = {
};
struct device xpnet_dbg_subname = {
- .bus_id = {0}, /* set to "" */
+ .bus_id = {0}, /* set to "" */
.driver = &xpnet_dbg_name
};
@@ -171,14 +164,13 @@ xpnet_receive(short partid, int channel,
struct sk_buff *skb;
enum xp_retval ret;
struct xpnet_dev_private *priv =
- (struct xpnet_dev_private *) xpnet_device->priv;
-
+ (struct xpnet_dev_private *)xpnet_device->priv;
if (!XPNET_VALID_MSG(msg)) {
/*
* Packet with a different XPC version. Ignore.
*/
- xpc_received(partid, channel, (void *) msg);
+ xpc_received(partid, channel, (void *)msg);
priv->stats.rx_errors++;
@@ -187,14 +179,13 @@ xpnet_receive(short partid, int channel,
dev_dbg(xpnet, "received 0x%" U64_ELL "x, %d, %d, %d\n", msg->buf_pa,
msg->size, msg->leadin_ignore, msg->tailout_ignore);
-
/* reserve an extra cache line */
skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
if (!skb) {
dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
msg->size + L1_CACHE_BYTES);
- xpc_received(partid, channel, (void *) msg);
+ xpc_received(partid, channel, (void *)msg);
priv->stats.rx_errors++;
@@ -220,12 +211,13 @@ xpnet_receive(short partid, int channel,
* Move the data over from the other side.
*/
if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
- (msg->embedded_bytes != 0)) {
+ (msg->embedded_bytes != 0)) {
dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
"%lu)\n", skb->data, &msg->data,
- (size_t) msg->embedded_bytes);
+ (size_t)msg->embedded_bytes);
- skb_copy_to_linear_data(skb, &msg->data, (size_t)msg->embedded_bytes);
+ skb_copy_to_linear_data(skb, &msg->data,
+ (size_t)msg->embedded_bytes);
} else {
dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
"bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
@@ -233,8 +225,8 @@ xpnet_receive(short partid, int channel,
msg->size);
ret = xp_remote_memcpy((void *)((u64)skb->data &
- ~(L1_CACHE_BYTES - 1)),
- (void *)msg->buf_pa, msg->size);
+ ~(L1_CACHE_BYTES - 1)),
+ (void *)msg->buf_pa, msg->size);
if (ret != xpSuccess) {
/*
@@ -245,10 +237,10 @@ xpnet_receive(short partid, int channel,
dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) "
"returned error=0x%x\n",
(void *)__pa((u64)skb->data &
- ~(L1_CACHE_BYTES - 1)),
+ ~(L1_CACHE_BYTES - 1)),
(void *)msg->buf_pa, msg->size, ret);
- xpc_received(partid, channel, (void *) msg);
+ xpc_received(partid, channel, (void *)msg);
priv->stats.rx_errors++;
@@ -257,7 +249,7 @@ xpnet_receive(short partid, int channel,
}
dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
- "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
+ "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len);
@@ -270,16 +262,14 @@ xpnet_receive(short partid, int channel,
(void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->len);
-
xpnet_device->last_rx = jiffies;
priv->stats.rx_packets++;
priv->stats.rx_bytes += skb->len + ETH_HLEN;
netif_rx_ni(skb);
- xpc_received(partid, channel, (void *) msg);
+ xpc_received(partid, channel, (void *)msg);
}
-
/*
* This is the handler which XPC calls during any sort of change in
* state or message reception on a connection.
@@ -291,11 +281,11 @@ xpnet_connection_activity(enum xp_retval
DBUG_ON(partid < XP_MIN_PARTID || partid > XP_MAX_PARTID);
DBUG_ON(channel != XPC_NET_CHANNEL);
- switch(reason) {
+ switch (reason) {
case xpMsgReceived: /* message received */
DBUG_ON(data == NULL);
- xpnet_receive(partid, channel, (struct xpnet_message *) data);
+ xpnet_receive(partid, channel, (struct xpnet_message *)data);
break;
case xpConnected: /* connection completed to a partition */
@@ -325,13 +315,11 @@ xpnet_connection_activity(enum xp_retval
}
}
-
static int
xpnet_dev_open(struct net_device *dev)
{
enum xp_retval ret;
-
dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %" U64_ELL "d, %"
U64_ELL "d, %" U64_ELL "d, %" U64_ELL "d)\n", XPC_NET_CHANNEL,
xpnet_connection_activity, XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
@@ -352,7 +340,6 @@ xpnet_dev_open(struct net_device *dev)
return 0;
}
-
static int
xpnet_dev_stop(struct net_device *dev)
{
@@ -363,7 +350,6 @@ xpnet_dev_stop(struct net_device *dev)
return 0;
}
-
static int
xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
{
@@ -380,7 +366,6 @@ xpnet_dev_change_mtu(struct net_device *
return 0;
}
-
/*
* Required for the net_device structure.
*/
@@ -390,7 +375,6 @@ xpnet_dev_set_config(struct net_device *
return 0;
}
-
/*
* Return statistics to the caller.
*/
@@ -399,13 +383,11 @@ xpnet_dev_get_stats(struct net_device *d
{
struct xpnet_dev_private *priv;
-
- priv = (struct xpnet_dev_private *) dev->priv;
+ priv = (struct xpnet_dev_private *)dev->priv;
return &priv->stats;
}
-
/*
* Notification that the other end has received the message and
* DMA'd the skb information. At this point, they are done with
@@ -414,11 +396,9 @@ xpnet_dev_get_stats(struct net_device *d
*/
static void
xpnet_send_completed(enum xp_retval reason, short partid, int channel,
- void *__qm)
+ void *__qm)
{
- struct xpnet_pending_msg *queued_msg =
- (struct xpnet_pending_msg *) __qm;
-
+ struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
DBUG_ON(queued_msg == NULL);
@@ -427,14 +407,13 @@ xpnet_send_completed(enum xp_retval reas
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
- (void *) queued_msg->skb->head);
+ (void *)queued_msg->skb->head);
dev_kfree_skb_any(queued_msg->skb);
kfree(queued_msg);
}
}
-
static void
xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
u64 start_addr, u64 end_addr, u16 embedded_bytes, int dest_partid)
@@ -442,7 +421,6 @@ xpnet_send(struct sk_buff *skb, struct x
struct xpnet_message *msg;
enum xp_retval ret;
-
ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT,
(void **)&msg);
if (unlikely(ret != xpSuccess))
@@ -478,7 +456,6 @@ xpnet_send(struct sk_buff *skb, struct x
atomic_dec(&queued_msg->use_count);
}
-
/*
* Network layer has formatted a packet (skb) and is ready to place it
* "on the wire". Prepare and send an xpnet_message to all partitions
@@ -497,9 +474,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff
struct xpnet_dev_private *priv = (struct xpnet_dev_private *)dev->priv;
u16 embedded_bytes = 0;
-
dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
- "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
+ "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len);
@@ -517,16 +493,15 @@ xpnet_dev_hard_start_xmit(struct sk_buff
queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
if (queued_msg == NULL) {
dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
- "packet\n", sizeof(struct xpnet_pending_msg));
+ "packet\n", sizeof(struct xpnet_pending_msg));
priv->stats.tx_errors++;
return -ENOMEM;
}
-
/* get the beginning of the first cacheline and end of last */
- start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1));
+ start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
/* calculate how many bytes to embed in the XPC message */
@@ -535,7 +510,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff
embedded_bytes = skb->len;
}
-
/*
* Since the send occurs asynchronously, we set the count to one
* and begin sending. Any sends that happen to complete before
@@ -550,7 +524,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff
/* we are being asked to broadcast to all partitions */
for_each_bit(dest_partid,
(unsigned long *)xpnet_broadcast_partitions,
- XP_NPARTITIONS) {
+ XP_NPARTITIONS) {
xpnet_send(skb, queued_msg, start_addr, end_addr,
embedded_bytes, dest_partid);
}
@@ -577,7 +551,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff
return 0;
}
-
/*
* Deal with transmit timeouts coming from the network layer.
*/
@@ -586,21 +559,18 @@ xpnet_dev_tx_timeout(struct net_device *
{
struct xpnet_dev_private *priv;
-
- priv = (struct xpnet_dev_private *) dev->priv;
+ priv = (struct xpnet_dev_private *)dev->priv;
priv->stats.tx_errors++;
return;
}
-
static int __init
xpnet_init(void)
{
short partid;
int result = -ENOMEM;
-
if (!is_shub() && !is_uv()) {
return -ENODEV;
}
@@ -633,7 +603,7 @@ xpnet_init(void)
* MAC addresses. We chose the first octet of the MAC to be unlikely
* to collide with any vendor's officially issued MAC.
*/
- xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
+ xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
partid = xp_partition_id;
@@ -660,23 +630,22 @@ xpnet_init(void)
return result;
}
-module_init(xpnet_init);
+module_init(xpnet_init);
static void __exit
xpnet_exit(void)
{
dev_info(xpnet, "unregistering network device %s\n",
- xpnet_device[0].name);
+ xpnet_device[0].name);
unregister_netdev(xpnet_device);
free_netdev(xpnet_device);
}
-module_exit(xpnet_exit);
+module_exit(xpnet_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
MODULE_LICENSE("GPL");
-
--
^ permalink raw reply [flat|nested] 11+ messages in thread
* [Patch 5/5] run drivers/misc/xp through scripts/checkpatch.pl
2008-03-25 19:25 [Patch 0/5] prepare XPC and XPNET to support SGI UV dcn
` (2 preceding siblings ...)
2008-03-25 19:25 ` [Patch 4/5] run drivers/misc/xp through scripts/Lindent dcn
@ 2008-03-25 19:25 ` dcn
2008-03-25 20:05 ` Dean Nelson
2008-03-25 20:14 ` [Patch 0/5] prepare XPC and XPNET to support SGI UV Dean Nelson
4 siblings, 1 reply; 11+ messages in thread
From: dcn @ 2008-03-25 19:25 UTC (permalink / raw)
To: jes, tony.luck, linux-kernel, linux-ia64
[-- Attachment #1: checkpatch.pl --]
[-- Type: text/plain, Size: 49622 bytes --]
Addressed issues raised by scripts/checkpatch.pl. Removed unnecessary curly
braces. Eliminated uses of volatiles and use of kernel_thread() and
daemonize().
Signed-off-by: Dean Nelson <dcn@sgi.com>
---
drivers/misc/xp/xp_main.c | 68 ++++------
drivers/misc/xp/xp_sn2.c | 23 +--
drivers/misc/xp/xp_uv.c | 2
drivers/misc/xp/xpc.h | 116 ++++++++---------
drivers/misc/xp/xpc_channel.c | 243 +++++++++++++++---------------------
drivers/misc/xp/xpc_main.c | 239 ++++++++++++-----------------------
drivers/misc/xp/xpc_partition.c | 78 ++++-------
drivers/misc/xp/xpnet.c | 15 --
8 files changed, 324 insertions(+), 460 deletions(-)
Index: linux-2.6/drivers/misc/xp/xpnet.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpnet.c 2008-03-25 07:10:15.693148179 -0500
+++ linux-2.6/drivers/misc/xp/xpnet.c 2008-03-25 07:10:17.729402225 -0500
@@ -84,8 +84,8 @@ struct xpnet_message {
#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
-#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */
-#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */
+#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */
+#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */
#define XPNET_MAGIC 0x88786984 /* "XNET" */
#define XPNET_VALID_MSG(_m) \
@@ -571,9 +571,8 @@ xpnet_init(void)
short partid;
int result = -ENOMEM;
- if (!is_shub() && !is_uv()) {
+ if (!is_shub() && !is_uv())
return -ENODEV;
- }
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
@@ -583,9 +582,8 @@ xpnet_init(void)
*/
xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
XPNET_DEVICE_NAME, ether_setup);
- if (xpnet_device == NULL) {
+ if (xpnet_device == NULL)
return -ENOMEM;
- }
netif_carrier_off(xpnet_device);
@@ -603,7 +601,7 @@ xpnet_init(void)
* MAC addresses. We chose the first octet of the MAC to be unlikely
* to collide with any vendor's officially issued MAC.
*/
- xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
+ xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
partid = xp_partition_id;
@@ -624,9 +622,8 @@ xpnet_init(void)
xpnet_device->features = NETIF_F_NO_CSUM;
result = register_netdev(xpnet_device);
- if (result != 0) {
+ if (result != 0)
free_netdev(xpnet_device);
- }
return result;
}
Index: linux-2.6/drivers/misc/xp/xpc_partition.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc_partition.c 2008-03-25 07:10:15.693148179 -0500
+++ linux-2.6/drivers/misc/xp/xpc_partition.c 2008-03-25 07:10:17.737403223 -0500
@@ -64,19 +64,19 @@ xpc_kmalloc_cacheline_aligned(size_t siz
{
/* see if kmalloc will give us cachline aligned memory by default */
*base = kmalloc(size, flags);
- if (*base == NULL) {
+ if (*base == NULL)
return NULL;
- }
- if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) {
+
+ if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
return *base;
- }
+
kfree(*base);
/* nope, we'll have to do it ourselves */
*base = kmalloc(size + L1_CACHE_BYTES, flags);
- if (*base == NULL) {
+ if (*base == NULL)
return NULL;
- }
+
return (void *)L1_CACHE_ALIGN((u64)*base);
}
@@ -103,9 +103,8 @@ xpc_get_rsvd_page_pa(int nasid)
"x, address=0x%016" U64_ELL "x len=0x%016lx\n", ret,
cookie, rp_pa, len);
- if (ret != xpNeedMoreInfo) {
+ if (ret != xpNeedMoreInfo)
break;
- }
if (L1_CACHE_ALIGN(len) > buf_len) {
kfree(buf_base);
@@ -130,9 +129,9 @@ xpc_get_rsvd_page_pa(int nasid)
kfree(buf_base);
- if (ret != xpSuccess) {
+ if (ret != xpSuccess)
rp_pa = 0;
- }
+
dev_dbg(xpc_part, "reserved page at phys address 0x%016" U64_ELL "x\n",
rp_pa);
return rp_pa;
@@ -195,7 +194,8 @@ xpc_rsvd_page_init(void)
* that saved copy on subsequent loads of XPC. This AMO page is never
* freed, and its memory protections are never restricted.
*/
- if ((amos_page = xpc_vars->amos_page) == NULL) {
+ amos_page = xpc_vars->amos_page;
+ if (amos_page == NULL) {
n_amos = xpc_number_of_amos(XP_NPARTITIONS);
amos_page = xp_alloc_amos(n_amos);
if (amos_page == NULL) {
@@ -236,9 +236,8 @@ xpc_rsvd_page_init(void)
/* initialize the activate IRQ related AMO variables */
activate_irq_amos = xpc_activate_irq_amos(XP_NPARTITIONS);
- for (i = 0; i < xp_nasid_mask_words(); i++) {
+ for (i = 0; i < xp_nasid_mask_words(); i++)
(void)xpc_IPI_init(activate_irq_amos + i);
- }
/* initialize the engaged remote partitions related AMO variables */
engaged_partitions_amos = xpc_engaged_partitions_amos(XP_NPARTITIONS);
@@ -276,13 +275,11 @@ xpc_check_remote_hb(void)
for (partid = XP_MIN_PARTID; partid <= XP_MAX_PARTID; partid++) {
- if (xpc_exiting) {
+ if (xpc_exiting)
break;
- }
- if (partid == xp_partition_id) {
+ if (partid == xp_partition_id)
continue;
- }
part = &xpc_partitions[partid];
@@ -335,23 +332,20 @@ xpc_get_remote_rp(int nasid, u64 *discov
/* get the reserved page's physical address */
*remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
- if (*remote_rp_pa == 0) {
+ if (*remote_rp_pa == 0)
return xpNoRsvdPageAddr;
- }
/* pull over the reserved page header and part_nasids mask */
ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa,
XPC_RP_HEADER_SIZE + xp_sizeof_nasid_mask);
- if (ret != xpSuccess) {
+ if (ret != xpSuccess)
return ret;
- }
if (discovered_nasids != NULL) {
u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
- for (i = 0; i < xp_nasid_mask_words(); i++) {
+ for (i = 0; i < xp_nasid_mask_words(); i++)
discovered_nasids[i] |= remote_part_nasids[i];
- }
}
if (XPC_VERSION_MAJOR(remote_rp->version) !=
@@ -373,16 +367,14 @@ xpc_get_remote_vars(u64 remote_vars_pa,
{
enum xp_retval ret;
- if (remote_vars_pa == 0) {
+ if (remote_vars_pa == 0)
return xpVarsNotSet;
- }
/* pull over the cross partition variables */
ret = xp_remote_memcpy(remote_vars, (void *)remote_vars_pa,
XPC_RP_VARS_SIZE);
- if (ret != xpSuccess) {
+ if (ret != xpSuccess)
return ret;
- }
if (XPC_VERSION_MAJOR(remote_vars->version) !=
XPC_VERSION_MAJOR(XPC_V_VERSION)) {
@@ -391,8 +383,9 @@ xpc_get_remote_vars(u64 remote_vars_pa,
/* check that the partid is for another partition */
if (remote_vars->partid < XP_MIN_PARTID ||
- remote_vars->partid > XP_MAX_PARTID)
+ remote_vars->partid > XP_MAX_PARTID) {
return xpInvalidPartid;
+ }
if (remote_vars->partid == xp_partition_id)
return xpLocalPartid;
@@ -494,9 +487,8 @@ xpc_identify_act_IRQ_req(int nasid)
remote_vars_pa = remote_rp->vars_pa;
remote_rp_version = remote_rp->version;
- if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
+ if (XPC_SUPPORTS_RP_STAMP(remote_rp_version))
remote_rp_stamp = remote_rp->stamp;
- }
/* pull over the cross partition variables */
@@ -656,9 +648,8 @@ xpc_identify_act_IRQ_sender(void)
/* scan through activation AMO variables looking for non-zero entries */
for (w_index = 0; w_index < xp_nasid_mask_words(); w_index++) {
- if (xpc_exiting) {
+ if (xpc_exiting)
break;
- }
ret = xp_get_amo(amo_va, XP_AMO_CLEAR, &nasid_mask);
BUG_ON(ret != xpSuccess); /* should never happen */
@@ -733,13 +724,11 @@ xpc_partition_disengaged(struct xpc_part
DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
part->act_state != XPC_P_AS_INACTIVE);
- if (part->act_state != XPC_P_AS_INACTIVE) {
+ if (part->act_state != XPC_P_AS_INACTIVE)
xpc_wakeup_channel_mgr(part);
- }
- if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
+ if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version))
xpc_cancel_partition_disengage_request(part);
- }
}
return disengaged;
}
@@ -912,9 +901,9 @@ xpc_discovery(void)
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
xp_sizeof_nasid_mask,
GFP_KERNEL, &remote_rp_base);
- if (remote_rp == NULL) {
+ if (remote_rp == NULL)
return;
- }
+
remote_vars = (struct xpc_vars *)remote_rp;
discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words(),
@@ -947,18 +936,16 @@ xpc_discovery(void)
for (region = 0; region < max_regions; region++) {
- if ((volatile int)xpc_exiting) {
+ if (xpc_exiting)
break;
- }
dev_dbg(xpc_part, "searching region %d\n", region);
for (nasid = (region * region_size * 2);
nasid < ((region + 1) * region_size * 2); nasid += 2) {
- if ((volatile int)xpc_exiting) {
+ if (xpc_exiting)
break;
- }
dev_dbg(xpc_part, "checking nasid %d\n", nasid);
@@ -1027,8 +1014,8 @@ xpc_discovery(void)
ret = xpc_register_remote_amos(part);
if (ret != xpSuccess) {
dev_warn(xpc_part, "xpc_discovery() failed to "
- "register remote AMOs for partition %d,"
- "ret=%d\n", partid, ret);
+ "register remote AMOs for partition %d"
+ ", ret=%d\n", partid, ret);
XPC_SET_REASON(part, xpPhysAddrRegFailed,
__LINE__);
@@ -1072,9 +1059,8 @@ xpc_initiate_partid_to_nasids(short part
u64 part_nasid_pa;
part = &xpc_partitions[partid];
- if (part->remote_rp_pa == 0) {
+ if (part->remote_rp_pa == 0)
return xpPartitionDown;
- }
memset(nasid_mask, 0, xp_sizeof_nasid_mask);
Index: linux-2.6/drivers/misc/xp/xpc_channel.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc_channel.c 2008-03-25 07:10:15.693148179 -0500
+++ linux-2.6/drivers/misc/xp/xpc_channel.c 2008-03-25 13:34:50.777575812 -0500
@@ -24,7 +24,7 @@
#include "xpc.h"
#ifdef CONFIG_X86_64
-#define cmpxchg_rel(ptr,o,n) cmpxchg(ptr,o,n)
+#define cmpxchg_rel(ptr, o, n) cmpxchg(ptr, o, n)
#endif
/*
@@ -35,19 +35,19 @@ xpc_kzalloc_cacheline_aligned(size_t siz
{
/* see if kzalloc will give us cachline aligned memory by default */
*base = kzalloc(size, flags);
- if (*base == NULL) {
+ if (*base == NULL)
return NULL;
- }
- if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) {
+
+ if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
return *base;
- }
+
kfree(*base);
/* nope, we'll have to do it ourselves */
*base = kzalloc(size + L1_CACHE_BYTES, flags);
- if (*base == NULL) {
+ if (*base == NULL)
return NULL;
- }
+
return (void *)L1_CACHE_ALIGN((u64)*base);
}
@@ -98,6 +98,7 @@ xpc_setup_infrastructure(struct xpc_part
int ret, cpuid;
struct timer_list *timer;
short partid = XPC_PARTID(part);
+ enum xp_retval retval;
/*
* Zero out MOST of the entry for this partition. Only the fields
@@ -127,11 +128,10 @@ xpc_setup_infrastructure(struct xpc_part
GFP_KERNEL,
&part->local_GPs_base);
if (part->local_GPs == NULL) {
- kfree(part->channels);
- part->channels = NULL;
dev_err(xpc_chan, "can't get memory for local get/put "
"values\n");
- return xpNoMemory;
+ retval = xpNoMemory;
+ goto out_1;
}
part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
@@ -141,11 +141,8 @@ xpc_setup_infrastructure(struct xpc_part
if (part->remote_GPs == NULL) {
dev_err(xpc_chan, "can't get memory for remote get/put "
"values\n");
- kfree(part->local_GPs_base);
- part->local_GPs = NULL;
- kfree(part->channels);
- part->channels = NULL;
- return xpNoMemory;
+ retval = xpNoMemory;
+ goto out_2;
}
/* allocate all the required open and close args */
@@ -155,13 +152,8 @@ xpc_setup_infrastructure(struct xpc_part
&part->local_openclose_args_base);
if (part->local_openclose_args == NULL) {
dev_err(xpc_chan, "can't get memory for local connect args\n");
- kfree(part->remote_GPs_base);
- part->remote_GPs = NULL;
- kfree(part->local_GPs_base);
- part->local_GPs = NULL;
- kfree(part->channels);
- part->channels = NULL;
- return xpNoMemory;
+ retval = xpNoMemory;
+ goto out_3;
}
part->remote_openclose_args =
@@ -169,15 +161,8 @@ xpc_setup_infrastructure(struct xpc_part
&part->remote_openclose_args_base);
if (part->remote_openclose_args == NULL) {
dev_err(xpc_chan, "can't get memory for remote connect args\n");
- kfree(part->local_openclose_args_base);
- part->local_openclose_args = NULL;
- kfree(part->remote_GPs_base);
- part->remote_GPs = NULL;
- kfree(part->local_GPs_base);
- part->local_GPs = NULL;
- kfree(part->channels);
- part->channels = NULL;
- return xpNoMemory;
+ retval = xpNoMemory;
+ goto out_4;
}
xpc_initialize_channels(part, partid);
@@ -201,17 +186,8 @@ xpc_setup_infrastructure(struct xpc_part
if (ret != 0) {
dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
"errno=%d\n", -ret);
- kfree(part->remote_openclose_args_base);
- part->remote_openclose_args = NULL;
- kfree(part->local_openclose_args_base);
- part->local_openclose_args = NULL;
- kfree(part->remote_GPs_base);
- part->remote_GPs = NULL;
- kfree(part->local_GPs_base);
- part->local_GPs = NULL;
- kfree(part->channels);
- part->channels = NULL;
- return xpLackOfResources;
+ retval = xpLackOfResources;
+ goto out_5;
}
/* Setup a timer to check for dropped IPIs */
@@ -246,6 +222,25 @@ xpc_setup_infrastructure(struct xpc_part
xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
return xpSuccess;
+
+ /* setup of infrastructure failed */
+out_5:
+ kfree(part->remote_openclose_args_base);
+ part->remote_openclose_args = NULL;
+out_4:
+ kfree(part->local_openclose_args_base);
+ part->local_openclose_args = NULL;
+out_3:
+ kfree(part->remote_GPs_base);
+ part->remote_GPs = NULL;
+out_2:
+ kfree(part->local_GPs_base);
+ part->local_GPs = NULL;
+out_1:
+ kfree(part->channels);
+ part->channels = NULL;
+
+ return retval;
}
/*
@@ -266,9 +261,8 @@ xpc_pull_remote_cachelines(struct xpc_pa
DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
- if (part->act_state == XPC_P_AS_DEACTIVATING) {
+ if (part->act_state == XPC_P_AS_DEACTIVATING)
return part->reason;
- }
ret = xp_remote_memcpy(dst, src, cnt);
if (ret != xpSuccess) {
@@ -358,18 +352,16 @@ xpc_pull_remote_vars_part(struct xpc_par
part->remote_IPI_nasid = pulled_entry->IPI_nasid;
part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
- if (part->nchannels > pulled_entry->nchannels) {
+ if (part->nchannels > pulled_entry->nchannels)
part->nchannels = pulled_entry->nchannels;
- }
/* let the other side know that we've pulled their variables */
xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
}
- if (pulled_entry->magic == XPC_VP_MAGIC1) {
+ if (pulled_entry->magic == XPC_VP_MAGIC1)
return xpRetry;
- }
return xpSuccess;
}
@@ -389,9 +381,10 @@ xpc_get_IPI_flags(struct xpc_partition *
*/
spin_lock_irqsave(&part->IPI_lock, irq_flags);
- if ((IPI_amo = part->local_IPI_amo) != 0) {
+ IPI_amo = part->local_IPI_amo;
+ if (IPI_amo != 0)
part->local_IPI_amo = 0;
- }
+
spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
@@ -445,11 +438,9 @@ xpc_allocate_local_msgqueue(struct xpc_c
nbytes = nentries * ch->msg_size;
ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
GFP_KERNEL,
- &ch->
- local_msgqueue_base);
- if (ch->local_msgqueue == NULL) {
+ &ch->local_msgqueue_base);
+ if (ch->local_msgqueue == NULL)
continue;
- }
nbytes = nentries * sizeof(struct xpc_notify);
ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
@@ -493,11 +484,9 @@ xpc_allocate_remote_msgqueue(struct xpc_
nbytes = nentries * ch->msg_size;
ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
GFP_KERNEL,
- &ch->
- remote_msgqueue_base);
- if (ch->remote_msgqueue == NULL) {
+ &ch->remote_msgqueue_base);
+ if (ch->remote_msgqueue == NULL)
continue;
- }
spin_lock_irqsave(&ch->lock, irq_flags);
if (nentries < ch->remote_nentries) {
@@ -529,11 +518,12 @@ xpc_allocate_msgqueues(struct xpc_channe
DBUG_ON(ch->flags & XPC_C_SETUP);
- if ((ret = xpc_allocate_local_msgqueue(ch)) != xpSuccess) {
+ ret = xpc_allocate_local_msgqueue(ch);
+ if (ret != xpSuccess)
return ret;
- }
- if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpSuccess) {
+ ret = xpc_allocate_remote_msgqueue(ch);
+ if (ret != xpSuccess) {
kfree(ch->local_msgqueue_base);
ch->local_msgqueue = NULL;
kfree(ch->notify_queue);
@@ -573,12 +563,11 @@ xpc_process_connect(struct xpc_channel *
ret = xpc_allocate_msgqueues(ch);
spin_lock_irqsave(&ch->lock, *irq_flags);
- if (ret != xpSuccess) {
+ if (ret != xpSuccess)
XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
- }
- if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
+
+ if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
return;
- }
DBUG_ON(!(ch->flags & XPC_C_SETUP));
DBUG_ON(ch->local_msgqueue == NULL);
@@ -590,9 +579,8 @@ xpc_process_connect(struct xpc_channel *
xpc_IPI_send_openreply(ch, irq_flags);
}
- if (!(ch->flags & XPC_C_ROPENREPLY)) {
+ if (!(ch->flags & XPC_C_ROPENREPLY))
return;
- }
DBUG_ON(ch->remote_msgqueue_pa == 0);
@@ -711,9 +699,8 @@ xpc_process_disconnect(struct xpc_channe
DBUG_ON(!spin_is_locked(&ch->lock));
- if (!(ch->flags & XPC_C_DISCONNECTING)) {
+ if (!(ch->flags & XPC_C_DISCONNECTING))
return;
- }
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
@@ -728,26 +715,23 @@ xpc_process_disconnect(struct xpc_channe
if (part->act_state == XPC_P_AS_DEACTIVATING) {
/* can't proceed until the other side disengages from us */
- if (xpc_partition_engaged(ch->partid)) {
+ if (xpc_partition_engaged(ch->partid))
return;
- }
} else {
/* as long as the other side is up do the full protocol */
- if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
+ if (!(ch->flags & XPC_C_RCLOSEREQUEST))
return;
- }
if (!(ch->flags & XPC_C_CLOSEREPLY)) {
ch->flags |= XPC_C_CLOSEREPLY;
xpc_IPI_send_closereply(ch, irq_flags);
}
- if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
+ if (!(ch->flags & XPC_C_RCLOSEREPLY))
return;
- }
}
/* wake those waiting for notify completion */
@@ -807,9 +791,10 @@ xpc_process_openclose_IPI(struct xpc_par
spin_lock_irqsave(&ch->lock, irq_flags);
- again:
+again:
- if ((ch->flags & XPC_C_DISCONNECTED) && (ch->flags & XPC_C_WDISCONNECT)) {
+ if ((ch->flags & XPC_C_DISCONNECTED) &&
+ (ch->flags & XPC_C_WDISCONNECT)) {
/*
* Delay processing IPI flags until thread waiting disconnect
* has had a chance to see that the channel is disconnected.
@@ -882,11 +867,10 @@ xpc_process_openclose_IPI(struct xpc_par
if (!(ch->flags & XPC_C_DISCONNECTING)) {
reason = args->reason;
- if (reason <= xpSuccess || reason > xpUnknownReason) {
+ if (reason <= xpSuccess || reason > xpUnknownReason)
reason = xpUnknownReason;
- } else if (reason == xpUnregistering) {
+ else if (reason == xpUnregistering)
reason = xpOtherUnregistering;
- }
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
@@ -1060,9 +1044,8 @@ xpc_connect_channel(struct xpc_channel *
unsigned long irq_flags;
struct xpc_registration *registration = &xpc_registrations[ch->number];
- if (mutex_trylock(®istration->mutex) == 0) {
+ if (mutex_trylock(®istration->mutex) == 0)
return xpRetry;
- }
if (!XPC_CHANNEL_REGISTERED(ch->number)) {
mutex_unlock(®istration->mutex);
@@ -1151,7 +1134,7 @@ xpc_clear_local_msgqueue_flags(struct xp
(get % ch->local_nentries) *
ch->msg_size);
msg->flags = 0;
- } while (++get < (volatile s64)ch->remote_GP.get);
+ } while (++get < ch->remote_GP.get);
}
/*
@@ -1169,7 +1152,7 @@ xpc_clear_remote_msgqueue_flags(struct x
(put % ch->remote_nentries) *
ch->msg_size);
msg->flags = 0;
- } while (++put < (volatile s64)ch->remote_GP.put);
+ } while (++put < ch->remote_GP.put);
}
static void
@@ -1236,9 +1219,8 @@ xpc_process_msg_IPI(struct xpc_partition
* If anyone was waiting for message queue entries to become
* available, wake them up.
*/
- if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
+ if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
wake_up(&ch->msg_allocate_wq);
- }
}
/*
@@ -1265,9 +1247,8 @@ xpc_process_msg_IPI(struct xpc_partition
"delivered=%d, partid=%d, channel=%d\n",
nmsgs_sent, ch->partid, ch->number);
- if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
+ if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
xpc_activate_kthreads(ch, nmsgs_sent);
- }
}
}
@@ -1302,9 +1283,8 @@ xpc_process_channel_activity(struct xpc_
IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
- if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
+ if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags))
xpc_process_openclose_IPI(part, ch_number, IPI_flags);
- }
ch_flags = ch->flags; /* need an atomic snapshot of flags */
@@ -1315,9 +1295,8 @@ xpc_process_channel_activity(struct xpc_
continue;
}
- if (part->act_state == XPC_P_AS_DEACTIVATING) {
+ if (part->act_state == XPC_P_AS_DEACTIVATING)
continue;
- }
if (!(ch_flags & XPC_C_CONNECTED)) {
if (!(ch_flags & XPC_C_OPENREQUEST)) {
@@ -1337,9 +1316,8 @@ xpc_process_channel_activity(struct xpc_
* from the other partition.
*/
- if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
+ if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags))
xpc_process_msg_IPI(part, ch_number);
- }
}
}
@@ -1552,9 +1530,9 @@ xpc_disconnect_channel(const int line, s
DBUG_ON(!spin_is_locked(&ch->lock));
- if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
+ if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
return;
- }
+
DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
@@ -1570,9 +1548,8 @@ xpc_disconnect_channel(const int line, s
xpc_IPI_send_closerequest(ch, irq_flags);
- if (channel_was_connected) {
+ if (channel_was_connected)
ch->flags |= XPC_C_WASCONNECTED;
- }
spin_unlock_irqrestore(&ch->lock, *irq_flags);
@@ -1587,9 +1564,8 @@ xpc_disconnect_channel(const int line, s
}
/* wake those waiting to allocate an entry from the local msg queue */
- if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
+ if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
wake_up(&ch->msg_allocate_wq);
- }
spin_lock_irqsave(&ch->lock, *irq_flags);
}
@@ -1677,9 +1653,9 @@ xpc_allocate_msg(struct xpc_channel *ch,
while (1) {
- put = (volatile s64)ch->w_local_GP.put;
- if (put - (volatile s64)ch->w_remote_GP.get <
- ch->local_nentries) {
+ put = ch->w_local_GP.put;
+ rmb(); /* guarantee that .put loads before .get */
+ if (put - ch->w_remote_GP.get < ch->local_nentries) {
/* There are available message entries. We need to try
* to secure one for ourselves. We'll do this by trying
@@ -1703,9 +1679,8 @@ xpc_allocate_msg(struct xpc_channel *ch,
* that will cause the IPI handler to fetch the latest
* GP values as if an IPI was sent by the other side.
*/
- if (ret == xpTimeout) {
+ if (ret == xpTimeout)
xpc_IPI_send_local_msgrequest(ch);
- }
if (flags & XPC_NOWAIT) {
xpc_msgqueue_deref(ch);
@@ -1764,9 +1739,8 @@ xpc_initiate_allocate(short partid, int
ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
xpc_part_deref(part);
- if (msg != NULL) {
+ if (msg != NULL)
*payload = &msg->payload;
- }
}
return ret;
@@ -1787,17 +1761,15 @@ xpc_send_msgs(struct xpc_channel *ch, s6
while (1) {
while (1) {
- if (put == (volatile s64)ch->w_local_GP.put) {
+ if (put == ch->w_local_GP.put)
break;
- }
msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
(put % ch->local_nentries) *
ch->msg_size);
- if (!(msg->flags & XPC_M_READY)) {
+ if (!(msg->flags & XPC_M_READY))
break;
- }
put++;
}
@@ -1810,7 +1782,7 @@ xpc_send_msgs(struct xpc_channel *ch, s6
if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
initial_put) {
/* someone else beat us to it */
- DBUG_ON((volatile s64)ch->local_GP->put < initial_put);
+ DBUG_ON(ch->local_GP->put < initial_put);
break;
}
@@ -1829,9 +1801,8 @@ xpc_send_msgs(struct xpc_channel *ch, s6
initial_put = put;
}
- if (send_IPI) {
+ if (send_IPI)
xpc_IPI_send_msgrequest(ch);
- }
}
/*
@@ -1905,9 +1876,8 @@ xpc_send_msg(struct xpc_channel *ch, str
/* see if the message is next in line to be sent, if so send it */
put = ch->local_GP->put;
- if (put == msg_number) {
+ if (put == msg_number)
xpc_send_msgs(ch, put);
- }
/* drop the reference grabbed in xpc_allocate_msg() */
xpc_msgqueue_deref(ch);
@@ -2024,10 +1994,8 @@ xpc_pull_remote_msg(struct xpc_channel *
msg_index = ch->next_msg_to_pull % ch->remote_nentries;
- DBUG_ON(ch->next_msg_to_pull >=
- (volatile s64)ch->w_remote_GP.put);
- nmsgs = (volatile s64)ch->w_remote_GP.put -
- ch->next_msg_to_pull;
+ DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put);
+ nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull;
if (msg_index + nmsgs > ch->remote_nentries) {
/* ignore the ones that wrap the msg queue for now */
nmsgs = ch->remote_nentries - msg_index;
@@ -2038,10 +2006,9 @@ xpc_pull_remote_msg(struct xpc_channel *
remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
msg_offset);
- if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
- nmsgs * ch->msg_size)) !=
- xpSuccess) {
-
+ ret = xpc_pull_remote_cachelines(part, msg, remote_msg, nmsgs *
+ ch->msg_size);
+ if (ret != xpSuccess) {
dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
" msg %" U64_ELL "d from partition %d, "
"channel=%d, ret=%d\n", nmsgs,
@@ -2054,8 +2021,6 @@ xpc_pull_remote_msg(struct xpc_channel *
return NULL;
}
- mb(); /* >>> this may not be needed, we're not sure */
-
ch->next_msg_to_pull += nmsgs;
}
@@ -2078,14 +2043,13 @@ xpc_get_deliverable_msg(struct xpc_chann
s64 get;
do {
- if ((volatile u32)ch->flags & XPC_C_DISCONNECTING) {
+ if (ch->flags & XPC_C_DISCONNECTING)
break;
- }
- get = (volatile s64)ch->w_local_GP.get;
- if (get == (volatile s64)ch->w_remote_GP.put) {
+ get = ch->w_local_GP.get;
+ rmb(); /* guarantee that .get loads before .put */
+ if (get == ch->w_remote_GP.put)
break;
- }
/* There are messages waiting to be pulled and delivered.
* We need to try to secure one for ourselves. We'll do this
@@ -2125,7 +2089,8 @@ xpc_deliver_msg(struct xpc_channel *ch)
{
struct xpc_msg *msg;
- if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
+ msg = xpc_get_deliverable_msg(ch);
+ if (msg != NULL) {
/*
* This ref is taken to protect the payload itself from being
@@ -2171,17 +2136,15 @@ xpc_acknowledge_msgs(struct xpc_channel
while (1) {
while (1) {
- if (get == (volatile s64)ch->w_local_GP.get) {
+ if (get == ch->w_local_GP.get)
break;
- }
msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
(get % ch->remote_nentries) *
ch->msg_size);
- if (!(msg->flags & XPC_M_DONE)) {
+ if (!(msg->flags & XPC_M_DONE))
break;
- }
msg_flags |= msg->flags;
get++;
@@ -2195,7 +2158,7 @@ xpc_acknowledge_msgs(struct xpc_channel
if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
initial_get) {
/* someone else beat us to it */
- DBUG_ON((volatile s64)ch->local_GP->get <= initial_get);
+ DBUG_ON(ch->local_GP->get <= initial_get);
break;
}
@@ -2214,9 +2177,8 @@ xpc_acknowledge_msgs(struct xpc_channel
initial_get = get;
}
- if (send_IPI) {
+ if (send_IPI)
xpc_IPI_send_msgrequest(ch);
- }
}
/*
@@ -2270,9 +2232,8 @@ xpc_initiate_received(short partid, int
* been delivered.
*/
get = ch->local_GP->get;
- if (get == msg_number) {
+ if (get == msg_number)
xpc_acknowledge_msgs(ch, get, msg->flags);
- }
/* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
xpc_msgqueue_deref(ch);
Index: linux-2.6/drivers/misc/xp/xpc.h
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xpc.h 2008-03-25 07:10:15.693148179 -0500
+++ linux-2.6/drivers/misc/xp/xpc.h 2008-03-25 07:10:17.809412207 -0500
@@ -109,16 +109,16 @@ struct xpc_rsvd_page {
u8 SAL_partid; /* SAL: partition ID */
u8 version;
u8 pad[6];
- volatile u64 vars_pa; /* physical address of struct xpc_vars */
+ u64 vars_pa; /* physical address of struct xpc_vars */
struct timespec stamp; /* time when reserved page was setup by XPC */
u64 pad2[9]; /* align to last u64 in cacheline */
u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */
};
-#define XPC_RP_VERSION _XPC_VERSION(2,0) /* version 2.0 of the reserved page */
+#define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */
#define XPC_SUPPORTS_RP_STAMP(_version) \
- (_version >= _XPC_VERSION(1,1))
+ (_version >= _XPC_VERSION(1, 1))
/*
* compare stamps - the return value is:
@@ -132,9 +132,10 @@ xpc_compare_stamps(struct timespec *stam
{
int ret;
- if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
+ ret = stamp1->tv_sec - stamp2->tv_sec;
+ if (ret == 0)
ret = stamp1->tv_nsec - stamp2->tv_nsec;
- }
+
return ret;
}
@@ -166,10 +167,10 @@ struct xpc_vars {
u64 heartbeating_to_mask[BITS_TO_LONGS(XP_MAX_NPARTITIONS)];
};
-#define XPC_V_VERSION _XPC_VERSION(4,0) /* version 4.0 of the cross vars */
+#define XPC_V_VERSION _XPC_VERSION(4, 0) /* version 4.0 of the cross vars */
#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
- (_version >= _XPC_VERSION(3,1))
+ (_version >= _XPC_VERSION(3, 1))
static inline int
xpc_hb_allowed(short partid, struct xpc_vars *vars)
@@ -267,7 +268,7 @@ xpc_number_of_amos(int npartitions)
* occupies half a cacheline.
*/
struct xpc_vars_part {
- volatile u64 magic;
+ u64 magic;
u64 openclose_args_pa; /* physical address of open and close args */
u64 GPs_pa; /* physical address of Get/Put values */
@@ -290,8 +291,8 @@ struct xpc_vars_part {
* MAGIC2 indicates that this partition has pulled the remote partititions
* per partition variables that pertain to this partition.
*/
-#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
-#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
+#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
+#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
/* the reserved page sizes and offsets */
@@ -301,9 +302,10 @@ struct xpc_vars_part {
#define XPC_RP_PART_NASIDS(_rp) (u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE)
#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \
xp_nasid_mask_words())
-#define XPC_RP_VARS(_rp) (struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
- xp_nasid_mask_words())
-#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *)((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE)
+#define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
+ xp_nasid_mask_words()))
+#define XPC_RP_VARS_PART(_rp) ((struct xpc_vars_part *) \
+ ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE))
/*
* Functions registered by add_timer() or called by kernel_thread() only
@@ -322,8 +324,8 @@ struct xpc_vars_part {
* Define a Get/Put value pair (pointers) used with a message queue.
*/
struct xpc_gp {
- volatile s64 get; /* Get value */
- volatile s64 put; /* Put value */
+ s64 get; /* Get value */
+ s64 put; /* Put value */
};
#define XPC_GP_SIZE \
@@ -360,7 +362,7 @@ struct xpc_openclose_args {
* and consumed by the intended recipient.
*/
struct xpc_notify {
- volatile u8 type; /* type of notification */
+ u8 type; /* type of notification */
/* the following two fields are only used if type == XPC_N_CALL */
xpc_notify_func func; /* user's notify function */
@@ -466,10 +468,10 @@ struct xpc_channel {
void *local_msgqueue_base; /* base address of kmalloc'd space */
struct xpc_msg *local_msgqueue; /* local message queue */
void *remote_msgqueue_base; /* base address of kmalloc'd space */
- struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
- /* local message queue */
+ struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
+ /* local message queue */
u64 remote_msgqueue_pa; /* phys addr of remote partition's */
- /* local message queue */
+ /* local message queue */
atomic_t references; /* #of external references to queues */
@@ -477,21 +479,21 @@ struct xpc_channel {
wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
u8 delayed_IPI_flags; /* IPI flags received, but delayed */
- /* action until channel disconnected */
+ /* action until channel disconnected */
/* queue of msg senders who want to be notified when msg received */
atomic_t n_to_notify; /* #of msg senders to notify */
- struct xpc_notify *notify_queue; /* notify queue for messages sent */
+ struct xpc_notify *notify_queue; /* notify queue for messages sent */
xpc_channel_func func; /* user's channel function */
void *key; /* pointer to user's key */
struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
- struct completion wdisconnect_wait; /* wait for channel disconnect */
+ struct completion wdisconnect_wait; /* wait for channel disconnect */
- struct xpc_openclose_args *local_openclose_args; /* args passed on */
- /* opening or closing of channel */
+ struct xpc_openclose_args *local_openclose_args; /* args passed on */
+ /* opening or closing of channel */
/* various flavors of local and remote Get/Put values */
@@ -519,28 +521,28 @@ struct xpc_channel {
#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
-#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
+#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
-#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
-#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
+#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
+#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
#define XPC_C_CONNECTEDCALLOUT_MADE \
- 0x00000080 /* connected callout completed */
+ 0x00000080 /* connected callout completed */
#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
-#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
-#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
+#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
+#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
-#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
+#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
#define XPC_C_DISCONNECTINGCALLOUT \
- 0x00010000 /* disconnecting callout initiated */
+ 0x00010000 /* disconnecting callout initiated */
#define XPC_C_DISCONNECTINGCALLOUT_MADE \
- 0x00020000 /* disconnecting callout completed */
-#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
+ 0x00020000 /* disconnecting callout completed */
+#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
/*
* Manages channels on a partition basis. There is one of these structures
@@ -554,7 +556,7 @@ struct xpc_partition {
u8 remote_rp_version; /* version# of partition's rsvd pg */
short remote_npartitions; /* value of XPC_NPARTITIONS */
u32 flags; /* general flags */
- struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */
+ struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */
u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
u64 remote_vars_pa; /* phys addr of partition's vars */
u64 remote_vars_part_pa; /* phys addr of partition's vars part */
@@ -564,7 +566,7 @@ struct xpc_partition {
int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
u32 act_IRQ_rcvd; /* IRQs since activation */
spinlock_t lock; /* protect updating of act_state and */
- /* the general flags */
+ /* the general flags */
u8 act_state; /* from XPC HB viewpoint */
u8 remote_vars_version; /* version# of partition's vars */
enum xp_retval reason; /* reason partition is deactivating */
@@ -576,7 +578,7 @@ struct xpc_partition {
/* XPC infrastructure referencing and teardown control */
- volatile u8 setup_state; /* infrastructure setup state */
+ u8 setup_state; /* infrastructure setup state */
wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
atomic_t references; /* #of references to infrastructure */
@@ -588,25 +590,25 @@ struct xpc_partition {
*/
u8 nchannels; /* #of defined channels supported */
- atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
- atomic_t nchannels_engaged; /* #of channels engaged with remote part */
+ atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
+ atomic_t nchannels_engaged; /* #of channels engaged with remote part */
struct xpc_channel *channels; /* array of channel structures */
void *local_GPs_base; /* base address of kmalloc'd space */
struct xpc_gp *local_GPs; /* local Get/Put values */
void *remote_GPs_base; /* base address of kmalloc'd space */
- struct xpc_gp *remote_GPs; /* copy of remote partition's local Get/Put */
- /* values */
+ struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
+ /* values */
u64 remote_GPs_pa; /* phys address of remote partition's local */
- /* Get/Put values */
+ /* Get/Put values */
/* fields used to pass args when opening or closing a channel */
- void *local_openclose_args_base; /* base address of kmalloc'd space */
- struct xpc_openclose_args *local_openclose_args; /* local's args */
- void *remote_openclose_args_base; /* base address of kmalloc'd space */
- struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
- /* args */
+ void *local_openclose_args_base; /* base address of kmalloc'd space */
+ struct xpc_openclose_args *local_openclose_args; /* local's args */
+ void *remote_openclose_args_base; /* base address of kmalloc'd space */
+ struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
+ /* args */
u64 remote_openclose_args_pa; /* phys addr of remote's args */
/* IPI sending, receiving and handling related fields */
@@ -631,7 +633,7 @@ struct xpc_partition {
/* struct xpc_partition flags */
-#define XPC_P_RAMOSREGISTERED 0x00000001 /* remote AMOs were registered */
+#define XPC_P_RAMOSREGISTERED 0x00000001 /* remote AMOs were registered */
/* struct xpc_partition act_state values (for XPC HB) */
@@ -725,9 +727,8 @@ extern void xpc_teardown_infrastructure(
static inline void
xpc_wakeup_channel_mgr(struct xpc_partition *part)
{
- if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
+ if (atomic_inc_return(&part->channel_mgr_requests) == 1)
wake_up(&part->channel_mgr_wq);
- }
}
/*
@@ -746,9 +747,8 @@ xpc_msgqueue_deref(struct xpc_channel *c
s32 refs = atomic_dec_return(&ch->references);
DBUG_ON(refs < 0);
- if (refs == 0) {
+ if (refs == 0)
xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
- }
}
#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
@@ -764,9 +764,8 @@ xpc_part_deref(struct xpc_partition *par
s32 refs = atomic_dec_return(&part->references);
DBUG_ON(refs < 0);
- if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN) {
+ if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN)
wake_up(&part->teardown_wq);
- }
}
static inline int
@@ -776,9 +775,9 @@ xpc_part_ref(struct xpc_partition *part)
atomic_inc(&part->references);
setup = (part->setup_state == XPC_P_SS_SETUP);
- if (!setup) {
+ if (!setup)
xpc_part_deref(part);
- }
+
return setup;
}
@@ -955,7 +954,7 @@ xpc_activate_IRQ_send(u64 amos_page_pa,
BIT_MASK(from_nasid / 2),
remote_amo, to_nasid,
to_phys_cpuid, SGI_XPC_ACTIVATE);
- BUG_ON(!remote_amo && ret != xpSuccess); /* should never happen */
+ BUG_ON(!remote_amo && ret != xpSuccess); /* should never happen */
}
static inline void
@@ -1150,9 +1149,8 @@ xpc_check_for_channel_activity(struct xp
ret = xp_get_amo(part->local_IPI_amo_va, XP_AMO_CLEAR, &IPI_amo);
BUG_ON(ret != xpSuccess); /* should never happen */
- if (IPI_amo == 0) {
+ if (IPI_amo == 0)
return;
- }
spin_lock_irqsave(&part->IPI_lock, irq_flags);
part->local_IPI_amo |= IPI_amo;
Index: linux-2.6/drivers/misc/xp/xp_sn2.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xp_sn2.c 2008-03-25 07:10:15.693148179 -0500
+++ linux-2.6/drivers/misc/xp/xp_sn2.c 2008-03-25 07:10:17.829414703 -0500
@@ -22,8 +22,6 @@
#include <asm/sn/sn_sal.h>
#include "xp.h"
-extern struct device *xp;
-
/*
* Register a nofault code region which performs a cross-partition PIO read.
* If the PIO read times out, the MCA handler will consume the error and
@@ -187,10 +185,10 @@ xp_set_amo_sn2(u64 *amo_va, int op, u64
* it until the heartbeat times out.
*/
if (xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(amo_va),
- xp_nofault_PIOR_target))
- != 0)
+ xp_nofault_PIOR_target))
+ != 0) {
ret = xpPioReadError;
-
+ }
local_irq_restore(irq_flags);
}
@@ -226,10 +224,10 @@ xp_set_amo_with_interrupt_sn2(u64 *amo_v
* it until the heartbeat times out.
*/
if (xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(amo_va),
- xp_nofault_PIOR_target))
- != 0)
+ xp_nofault_PIOR_target))
+ != 0) {
ret = xpPioReadError;
-
+ }
local_irq_restore(irq_flags);
}
@@ -323,10 +321,10 @@ xp_change_memprotect_shub_wars_1_1_sn2(i
/* save current protection values */
xpc_prot_vec[node] =
(u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0));
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0));
/* open up everything */
HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0),
-1UL);
HUB_S((u64 *)
GLOBAL_MMR_ADDR(nasid,
@@ -338,15 +336,16 @@ xp_change_memprotect_shub_wars_1_1_sn2(i
nasid = cnodeid_to_nasid(node);
/* restore original protection values */
HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0),
xpc_prot_vec[node]);
HUB_S((u64 *)
GLOBAL_MMR_ADDR(nasid,
SH1_MD_DQRP_MMR_DIR_PRIVEC0),
xpc_prot_vec[node]);
}
- } else
+ } else {
BUG();
+ }
}
/* SH_IPI_ACCESS shub register value on startup */
Index: linux-2.6/drivers/misc/xp/xp_main.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xp_main.c 2008-03-25 07:10:15.697148678 -0500
+++ linux-2.6/drivers/misc/xp/xp_main.c 2008-03-25 07:10:17.845416699 -0500
@@ -38,38 +38,58 @@ struct device *xp = &xp_dbg_subname;
u64 xp_nofault_PIOR_target;
short xp_partition_id;
+EXPORT_SYMBOL_GPL(xp_partition_id);
u8 xp_region_size;
+EXPORT_SYMBOL_GPL(xp_region_size);
unsigned long xp_rtc_cycles_per_second;
+EXPORT_SYMBOL_GPL(xp_rtc_cycles_per_second);
enum xp_retval (*xp_remote_memcpy) (void *dst, const void *src, size_t len);
+EXPORT_SYMBOL_GPL(xp_remote_memcpy);
enum xp_retval (*xp_register_remote_amos) (u64 paddr, size_t len);
+EXPORT_SYMBOL_GPL(xp_register_remote_amos);
enum xp_retval (*xp_unregister_remote_amos) (u64 paddr, size_t len);
+EXPORT_SYMBOL_GPL(xp_unregister_remote_amos);
int xp_sizeof_nasid_mask;
+EXPORT_SYMBOL_GPL(xp_sizeof_nasid_mask);
int xp_sizeof_amo;
+EXPORT_SYMBOL_GPL(xp_sizeof_amo);
u64 *(*xp_alloc_amos) (int n_amos);
+EXPORT_SYMBOL_GPL(xp_alloc_amos);
void (*xp_free_amos) (u64 *amos_page, int n_amos);
+EXPORT_SYMBOL_GPL(xp_free_amos);
enum xp_retval (*xp_set_amo) (u64 *amo_va, int op, u64 operand, int remote);
+EXPORT_SYMBOL_GPL(xp_set_amo);
enum xp_retval (*xp_set_amo_with_interrupt) (u64 *amo_va, int op, u64 operand,
int remote, int nasid,
int phys_cpuid, int vector);
+EXPORT_SYMBOL_GPL(xp_set_amo_with_interrupt);
enum xp_retval (*xp_get_amo) (u64 *amo_va, int op, u64 *amo_value_addr);
+EXPORT_SYMBOL_GPL(xp_get_amo);
enum xp_retval (*xp_get_partition_rsvd_page_pa) (u64 buf, u64 *cookie,
u64 *paddr, size_t *len);
+EXPORT_SYMBOL_GPL(xp_get_partition_rsvd_page_pa);
enum xp_retval (*xp_change_memprotect) (u64 paddr, size_t len, int request,
u64 *nasid_array);
+EXPORT_SYMBOL_GPL(xp_change_memprotect);
void (*xp_change_memprotect_shub_wars_1_1) (int request);
+EXPORT_SYMBOL_GPL(xp_change_memprotect_shub_wars_1_1);
void (*xp_allow_IPI_ops) (void);
+EXPORT_SYMBOL_GPL(xp_allow_IPI_ops);
void (*xp_disallow_IPI_ops) (void);
+EXPORT_SYMBOL_GPL(xp_disallow_IPI_ops);
int (*xp_cpu_to_nasid) (int cpuid);
+EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
int (*xp_node_to_nasid) (int nid);
+EXPORT_SYMBOL_GPL(xp_node_to_nasid);
/*
* Initialize the XPC interface to indicate that XPC isn't loaded.
@@ -90,6 +110,7 @@ struct xpc_interface xpc_interface = {
(void (*)(short, int, void *))xpc_notloaded,
(enum xp_retval(*)(short, void *))xpc_notloaded
};
+EXPORT_SYMBOL_GPL(xpc_interface);
/*
* XPC calls this when it (the XPC module) has been loaded.
@@ -112,6 +133,7 @@ xpc_set_interface(void (*connect) (int),
xpc_interface.received = received;
xpc_interface.partid_to_nasids = partid_to_nasids;
}
+EXPORT_SYMBOL_GPL(xpc_set_interface);
/*
* XPC calls this when it (the XPC module) is being unloaded.
@@ -133,12 +155,14 @@ xpc_clear_interface(void)
xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
xpc_notloaded;
}
+EXPORT_SYMBOL_GPL(xpc_clear_interface);
/*
* xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
* users of XPC.
*/
struct xpc_registration xpc_registrations[XPC_NCHANNELS];
+EXPORT_SYMBOL_GPL(xpc_registrations);
/*
* Register for automatic establishment of a channel connection whenever
@@ -177,9 +201,8 @@ xpc_connect(int ch_number, xpc_channel_f
registration = &xpc_registrations[ch_number];
- if (mutex_lock_interruptible(®istration->mutex) != 0) {
+ if (mutex_lock_interruptible(®istration->mutex) != 0)
return xpInterrupted;
- }
/* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) {
@@ -201,6 +224,7 @@ xpc_connect(int ch_number, xpc_channel_f
return xpSuccess;
}
+EXPORT_SYMBOL_GPL(xpc_connect);
/*
* Remove the registration for automatic connection of the specified channel
@@ -251,9 +275,7 @@ xpc_disconnect(int ch_number)
return;
}
-
-extern enum xp_retval xp_init_sn2(void);
-extern enum xp_retval xp_init_uv(void);
+EXPORT_SYMBOL_GPL(xpc_disconnect);
int __init
xp_init(void)
@@ -268,23 +290,18 @@ xp_init(void)
else
ret = xpUnsupported;
- if (ret != xpSuccess) {
+ if (ret != xpSuccess)
return -ENODEV;
- }
/* initialize the connection registration mutex */
- for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
+ for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++)
mutex_init(&xpc_registrations[ch_number].mutex);
- }
return 0;
}
module_init(xp_init);
-extern void xp_exit_sn2(void);
-extern void xp_exit_uv(void);
-
void __exit
xp_exit(void)
{
@@ -299,30 +316,3 @@ module_exit(xp_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition (XP) base");
MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(xp_partition_id);
-EXPORT_SYMBOL(xp_region_size);
-EXPORT_SYMBOL(xp_rtc_cycles_per_second);
-EXPORT_SYMBOL(xp_remote_memcpy);
-EXPORT_SYMBOL(xp_register_remote_amos);
-EXPORT_SYMBOL(xp_unregister_remote_amos);
-EXPORT_SYMBOL(xp_sizeof_nasid_mask);
-EXPORT_SYMBOL(xp_sizeof_amo);
-EXPORT_SYMBOL(xp_alloc_amos);
-EXPORT_SYMBOL(xp_free_amos);
-EXPORT_SYMBOL(xp_set_amo);
-EXPORT_SYMBOL(xp_set_amo_with_interrupt);
-EXPORT_SYMBOL(xp_get_amo);
-EXPORT_SYMBOL(xp_get_partition_rsvd_page_pa);
-EXPORT_SYMBOL(xp_change_memprotect);
-EXPORT_SYMBOL(xp_change_memprotect_shub_wars_1_1);
-EXPORT_SYMBOL(xp_allow_IPI_ops);
-EXPORT_SYMBOL(xp_disallow_IPI_ops);
-EXPORT_SYMBOL(xp_cpu_to_nasid);
-EXPORT_SYMBOL(xp_node_to_nasid);
-EXPORT_SYMBOL(xpc_registrations);
-EXPORT_SYMBOL(xpc_interface);
-EXPORT_SYMBOL(xpc_clear_interface);
-EXPORT_SYMBOL(xpc_set_interface);
-EXPORT_SYMBOL(xpc_connect);
-EXPORT_SYMBOL(xpc_disconnect);
Index: linux-2.6/drivers/misc/xp/xp_uv.c
===================================================================
--- linux-2.6.orig/drivers/misc/xp/xp_uv.c 2008-03-25 07:10:15.697148678 -0500
+++ linux-2.6/drivers/misc/xp/xp_uv.c 2008-03-25 07:10:17.861418696 -0500
@@ -18,8 +18,6 @@
#include <linux/device.h>
#include "xp.h"
-extern struct device *xp;
-
static enum xp_retval
xp_register_nofault_code_uv(void)
{
--
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [Patch 5/5] run drivers/misc/xp through scripts/checkpatch.pl
2008-03-25 19:25 ` [Patch 5/5] run drivers/misc/xp through scripts/checkpatch.pl dcn
@ 2008-03-25 20:05 ` Dean Nelson
2008-03-26 10:03 ` Andy Whitcroft
0 siblings, 1 reply; 11+ messages in thread
From: Dean Nelson @ 2008-03-25 20:05 UTC (permalink / raw)
To: apw, rdunlap, jschopp; +Cc: jes, linux-kernel, tony.luck, linux-ia64
On Tue, Mar 25, 2008 at 02:25:29PM -0500, dcn@sgi.com wrote:
>
> Addressed issues raised by scripts/checkpatch.pl. Removed unnecessary curly
> braces. Eliminated uses of volatiles and use of kernel_thread() and
> daemonize().
>
> Signed-off-by: Dean Nelson <dcn@sgi.com>
>
Forgot to mention that scripts/checkpatch.pl gave 15 false positives of
the following type against drivers/misc/xp/xp_main.c.
> WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable
> #48: FILE: misc/xp/xp_main.c:48:
> +EXPORT_SYMBOL_GPL(xp_remote_memcpy);
The fact is that the EXPORT_SYMBOL(xp_remote_memcpy) does immediately follow
the function/variable as follows.
enum xp_retval (*xp_remote_memcpy) (void *dst, const void *src, size_t len);
EXPORT_SYMBOL_GPL(xp_remote_memcpy);
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [Patch 0/5] prepare XPC and XPNET to support SGI UV
2008-03-25 19:25 [Patch 0/5] prepare XPC and XPNET to support SGI UV dcn
` (3 preceding siblings ...)
2008-03-25 19:25 ` [Patch 5/5] run drivers/misc/xp through scripts/checkpatch.pl dcn
@ 2008-03-25 20:14 ` Dean Nelson
2008-03-25 22:49 ` Luck, Tony
4 siblings, 1 reply; 11+ messages in thread
From: Dean Nelson @ 2008-03-25 20:14 UTC (permalink / raw)
To: jes, tony.luck, linux-kernel, linux-ia64
On Tue, Mar 25, 2008 at 02:25:24PM -0500, dcn@sgi.com wrote:
> This set of five patches moves XPC and XPNET to drivers/misc/xp in preparation
> for enabling X86_64 support.
It looks like the 2nd patch of this set has been blocked from the mailing
list because of its size. At least that's my assumption. The size of the
five patches are as follows (listed in patch order):
-rw-r--r-- 1 dcn os1 4880 2008-03-25 14:05 uncached-pages
-rw-r--r-- 1 dcn os1 471050 2008-03-25 14:10 move-xp
-rw-r--r-- 1 dcn os1 203046 2008-03-25 14:14 generic-xp
-rw-r--r-- 1 dcn os1 163868 2008-03-25 14:17 Lindent
-rw-r--r-- 1 dcn os1 49723 2008-03-25 14:46 checkpatch.pl
The missing patch moved XPC and XPNET from arch/ia64/sn to drivers/misc/xp.
How should such a patch be submitted?
Thanks,
Dean
^ permalink raw reply [flat|nested] 11+ messages in thread
* RE: [Patch 0/5] prepare XPC and XPNET to support SGI UV
2008-03-25 20:14 ` [Patch 0/5] prepare XPC and XPNET to support SGI UV Dean Nelson
@ 2008-03-25 22:49 ` Luck, Tony
2008-03-25 23:04 ` Andreas Schwab
0 siblings, 1 reply; 11+ messages in thread
From: Luck, Tony @ 2008-03-25 22:49 UTC (permalink / raw)
To: Dean Nelson, jes, linux-kernel, linux-ia64
On Tue, Mar 25, 2008 at 02:25:24PM -0500, dcn@sgi.com wrote:
> This set of five patches moves XPC and XPNET to drivers/misc/xp in preparation
> for enabling X86_64 support.
Part 2 (which I got since you addressed it to me directly as well as
to the list) has a couple of "Space in indent is follwed by a tab"
complaints from GIT. Part 3 has one too. Apart from that all five
parts apply and the build is clean on all my configs. Boots on the
tiger (but this doesn't constitute a review ... just a "you didn't
break anything unrelated while making this change").
> The missing patch moved XPC and XPNET from arch/ia64/sn to drivers/misc/xp.
> How should such a patch be submitted?
I think git has an option to produce its own brand of diff output
that lists the renames without doing a full remove & add which is
needed for patch(1) comprehensibility. A few minutes staring at
git documentation hasn't revealed what that option is though :-(
For purposes of getting someone to review ... you could hand edit
to show the renames as:
$ mv arch/ia64/sn/kernel/xpc_channel.c drivers/misc/xp/xpc_channel.c
... etc.
and just include the diffs for the Makefiles, Kconfig etc.
Or break part 2 into:
2a - remove driver from arch/ia64
2b - add driver to drivers/misc/xp
-Tony
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [Patch 0/5] prepare XPC and XPNET to support SGI UV
2008-03-25 22:49 ` Luck, Tony
@ 2008-03-25 23:04 ` Andreas Schwab
0 siblings, 0 replies; 11+ messages in thread
From: Andreas Schwab @ 2008-03-25 23:04 UTC (permalink / raw)
To: Luck, Tony; +Cc: Dean Nelson, jes, linux-kernel, linux-ia64
"Luck, Tony" <tony.luck@intel.com> writes:
> I think git has an option to produce its own brand of diff output
> that lists the renames without doing a full remove & add which is
> needed for patch(1) comprehensibility. A few minutes staring at
> git documentation hasn't revealed what that option is though :-(
$ git diff -M
or "git config --global diff.renames true" to make it permanent.
Andreas.
--
Andreas Schwab, SuSE Labs, schwab@suse.de
SuSE Linux Products GmbH, Maxfeldstraße 5, 90409 Nürnberg, Germany
PGP key fingerprint = 58CA 54C7 6D53 942B 1756 01D3 44D5 214B 8276 4ED5
"And now for something completely different."
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [Patch 5/5] run drivers/misc/xp through scripts/checkpatch.pl
2008-03-25 20:05 ` Dean Nelson
@ 2008-03-26 10:03 ` Andy Whitcroft
2008-03-26 16:58 ` Dean Nelson
0 siblings, 1 reply; 11+ messages in thread
From: Andy Whitcroft @ 2008-03-26 10:03 UTC (permalink / raw)
To: Dean Nelson; +Cc: rdunlap, jschopp, jes, linux-kernel, tony.luck, linux-ia64
On Tue, Mar 25, 2008 at 03:05:17PM -0500, Dean Nelson wrote:
> On Tue, Mar 25, 2008 at 02:25:29PM -0500, dcn@sgi.com wrote:
> >
> > Addressed issues raised by scripts/checkpatch.pl. Removed unnecessary curly
> > braces. Eliminated uses of volatiles and use of kernel_thread() and
> > daemonize().
> >
> > Signed-off-by: Dean Nelson <dcn@sgi.com>
> >
>
> Forgot to mention that scripts/checkpatch.pl gave 15 false positives of
> the following type against drivers/misc/xp/xp_main.c.
>
> > WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable
> > #48: FILE: misc/xp/xp_main.c:48:
> > +EXPORT_SYMBOL_GPL(xp_remote_memcpy);
>
> The fact is that the EXPORT_SYMBOL(xp_remote_memcpy) does immediately follow
> the function/variable as follows.
>
> enum xp_retval (*xp_remote_memcpy) (void *dst, const void *src, size_t len);
> EXPORT_SYMBOL_GPL(xp_remote_memcpy);
Gah, who ever came up with that syntax. Yep, thats a falsie. Should be
fixed in the latest version:
http://www.kernel.org/pub/linux/kernel/people/apw/checkpatch/checkpatch.pl-next
Thanks for the report.
-apw
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [Patch 5/5] run drivers/misc/xp through scripts/checkpatch.pl
2008-03-26 10:03 ` Andy Whitcroft
@ 2008-03-26 16:58 ` Dean Nelson
0 siblings, 0 replies; 11+ messages in thread
From: Dean Nelson @ 2008-03-26 16:58 UTC (permalink / raw)
To: Andy Whitcroft; +Cc: rdunlap, jschopp, jes, linux-kernel, tony.luck, linux-ia64
On Wed, Mar 26, 2008 at 10:03:36AM +0000, Andy Whitcroft wrote:
> On Tue, Mar 25, 2008 at 03:05:17PM -0500, Dean Nelson wrote:
>
> Gah, who ever came up with that syntax. Yep, thats a falsie. Should be
> fixed in the latest version:
>
> http://www.kernel.org/pub/linux/kernel/people/apw/checkpatch/checkpatch.pl-next
>
> Thanks for the report.
You're welcome and thanks for the quick fix to checkpatch.pl.
I used the new version and it fixed the problem except for ones that were of
the following form:
enum xp_retval (*xp_set_amo_with_interrupt) (u64 *amo_va, int op, u64 operand,
int remote, int nasid,
int phys_cpuid, int vector);
EXPORT_SYMBOL_GPL(xp_set_amo_with_interrupt);
which still generated the false positive:
WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable
#70: FILE: misc/xp/xp_main.c:70:
+EXPORT_SYMBOL_GPL(xp_set_amo_with_interrupt);
Thanks,
Dean
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2008-03-26 16:58 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2008-03-25 19:25 [Patch 0/5] prepare XPC and XPNET to support SGI UV dcn
2008-03-25 19:25 ` [Patch 1/5] add multi-page allocation to the uncached allocator dcn
2008-03-25 19:25 ` [Patch 3/5] prepare XPC and XPNET for future support of SGIs UV architecture dcn
2008-03-25 19:25 ` [Patch 4/5] run drivers/misc/xp through scripts/Lindent dcn
2008-03-25 19:25 ` [Patch 5/5] run drivers/misc/xp through scripts/checkpatch.pl dcn
2008-03-25 20:05 ` Dean Nelson
2008-03-26 10:03 ` Andy Whitcroft
2008-03-26 16:58 ` Dean Nelson
2008-03-25 20:14 ` [Patch 0/5] prepare XPC and XPNET to support SGI UV Dean Nelson
2008-03-25 22:49 ` Luck, Tony
2008-03-25 23:04 ` Andreas Schwab
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).