vmci-only/0000755000000000000000000000000012025726725011501 5ustar rootrootvmci-only/autoconf/0000755000000000000000000000000012025726724013316 5ustar rootrootvmci-only/autoconf/geninclude.c0000444000000000000000000000226412025726724015601 0ustar rootroot/********************************************************* * Copyright (C) 2003 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include #ifdef CONFIG_X86_VOYAGER APATH/mach-voyager #endif #ifdef CONFIG_X86_VISWS APATH/mach-visws #endif #ifdef CONFIG_X86_NUMAQ APATH/mach-numaq #endif #ifdef CONFIG_X86_BIGSMP APATH/mach-bigsmp #endif #ifdef CONFIG_X86_SUMMIT APATH/mach-summit #endif #ifdef CONFIG_X86_GENERICARCH APATH/mach-generic #endif APATH/mach-default vmci-only/autoconf/epoll.c0000444000000000000000000000235512025726724014600 0ustar rootroot/********************************************************* * Copyright (C) 2004 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * Detect whether we have 'struct poll_wqueues' * 2.6.x kernels always had this struct. Stock 2.4.x kernels * never had it, but some distros backported epoll patch. */ #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) #include void poll_test(void) { struct poll_wqueues test; return poll_initwait(&test); } #endif vmci-only/vmware.h0000444000000000000000000000354112025726724013153 0ustar rootroot/********************************************************* * Copyright (C) 2003 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmware.h -- * * Standard include file for VMware source code. */ #ifndef _VMWARE_H_ #define _VMWARE_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #include "vm_basic_types.h" #include "vm_basic_defs.h" #include "vm_assert.h" /* * Global error codes. Currently used internally, but may be exported * to customers one day, like VM_E_XXX in vmcontrol_constants.h */ typedef enum VMwareStatus { VMWARE_STATUS_SUCCESS, /* success */ VMWARE_STATUS_ERROR, /* generic error */ VMWARE_STATUS_NOMEM, /* generic memory allocation error */ VMWARE_STATUS_INSUFFICIENT_RESOURCES, /* internal or system resource limit exceeded */ VMWARE_STATUS_INVALID_ARGS /* invalid arguments */ } VMwareStatus; #define VMWARE_SUCCESS(s) ((s) == VMWARE_STATUS_SUCCESS) #endif // ifndef _VMWARE_H_ vmci-only/vm_basic_types.h0000444000000000000000000005624212025726724014667 0ustar rootroot/********************************************************* * Copyright (C) 1998-2008 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * * vm_basic_types.h -- * * basic data types. */ #ifndef _VM_BASIC_TYPES_H_ #define _VM_BASIC_TYPES_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMEXT #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMIROM #include "includeCheck.h" /* STRICT ANSI means the Xserver build and X defines Bool differently. */ #if !defined(__STRICT_ANSI__) || defined(__FreeBSD__) typedef char Bool; #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #define IsBool(x) (((x) & ~1) == 0) #define IsBool2(x, y) ((((x) | (y)) & ~1) == 0) /* * Macros __i386__ and __ia64 are intrinsically defined by GCC */ #ifdef __i386__ #define VM_I386 #endif #ifdef _WIN64 #define __x86_64__ #endif #ifdef __x86_64__ #define VM_X86_64 #define VM_I386 #define vm_x86_64 (1) #else #define vm_x86_64 (0) #endif #ifdef _WIN32 /* safe assumption for a while */ #define VM_I386 #endif #ifdef _MSC_VER typedef unsigned __int64 uint64; typedef signed __int64 int64; #pragma warning (3 :4505) // unreferenced local function #pragma warning (disable :4018) // signed/unsigned mismatch #pragma warning (disable :4761) // integral size mismatch in argument; conversion supplied #pragma warning (disable :4305) // truncation from 'const int' to 'short' #pragma warning (disable :4244) // conversion from 'unsigned short' to 'unsigned char' #pragma warning (disable :4267) // truncation of 'size_t' #if !defined VMX86_DEVEL // XXX until we clean up all the code -- edward #pragma warning (disable :4133) // incompatible types - from 'struct VM *' to 'int *' #pragma warning (disable :4047) // differs in levels of indirection #endif #pragma warning (disable :4146) // unary minus operator applied to unsigned type, result still unsigned #pragma warning (disable :4142) // benign redefinition of type #elif __GNUC__ /* The Xserver source compiles with -ansi -pendantic */ #ifndef __STRICT_ANSI__ #if defined(VM_X86_64) typedef unsigned long uint64; typedef long int64; #else typedef unsigned long long uint64; typedef long long int64; #endif #elif __FreeBSD__ typedef unsigned long long uint64; typedef long long int64; #endif #else #error - Need compiler define for int64/uint64 #endif typedef unsigned int uint32; typedef unsigned short uint16; typedef unsigned char uint8; typedef int int32; typedef short int16; typedef char int8; /* * FreeBSD (for the tools build) unconditionally defines these in * sys/inttypes.h so don't redefine them if this file has already * been included. [greg] * * This applies to Solaris as well. */ /* * Before trying to do the includes based on OS defines, see if we can use * feature-based defines to get as much functionality as possible */ #ifdef HAVE_INTTYPES_H #include #endif #ifdef HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_SYS_INTTYPES_H #include #endif #ifdef HAVE_STDINT_H #include #endif #ifdef HAVE_STDLIB_H #include #endif #if !defined(USING_AUTOCONF) # if defined(__FreeBSD__) || defined(sun) # ifdef KLD_MODULE # include # else # if (BSD_VERSION >= 50) # include # include # else # include # endif # endif # elif defined __APPLE__ # if KERNEL # include # include /* mostly for size_t */ # include # else # include # include # include # include # endif # else # if !defined(__intptr_t_defined) && !defined(intptr_t) # define __intptr_t_defined # define intptr_t intptr_t # ifdef VM_I386 # ifdef VM_X86_64 typedef int64 intptr_t; # else typedef int32 intptr_t; # endif # endif # endif # ifndef _STDINT_H # ifdef VM_I386 # ifdef VM_X86_64 typedef uint64 uintptr_t; # else typedef uint32 uintptr_t; # endif # endif # endif # endif #endif /* * Time * XXX These should be cleaned up. -- edward */ typedef int64 VmTimeType; /* Time in microseconds */ typedef int64 VmTimeRealClock; /* Real clock kept in microseconds */ typedef int64 VmTimeVirtualClock; /* Virtual Clock kept in CPU cycles */ /* * Printf format specifiers for size_t and 64-bit number. * Use them like this: * printf("%"FMT64"d\n", big); * * FMTH is for handles/fds. */ #ifdef _MSC_VER #define FMT64 "I64" #ifdef VM_X86_64 #define FMTSZ "I64" #define FMTPD "I64" #define FMTH "I64" #else #define FMTSZ "I" #define FMTPD "I" #define FMTH "I" #endif #elif __GNUC__ #define FMTH "" #if defined(N_PLAT_NLM) || defined(sun) || \ (defined(__FreeBSD__) && (__FreeBSD__ + 0) && ((__FreeBSD__ + 0) < 5)) /* * Why (__FreeBSD__ + 0)? See bug 141008. * Yes, we really need to test both (__FreeBSD__ + 0) and * ((__FreeBSD__ + 0) < 5). No, we can't remove "+ 0" from * ((__FreeBSD__ + 0) < 5). */ #ifdef VM_X86_64 #define FMTSZ "l" #define FMTPD "l" #else #define FMTSZ "" #define FMTPD "" #endif #elif defined(__linux__) \ || (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 200112L) \ || (defined(_POSIX_VERSION) && _POSIX_VERSION >= 200112L) \ || (defined(_POSIX2_VERSION) && _POSIX2_VERSION >= 200112L) /* BSD/Darwin, Linux */ #define FMTSZ "z" #define FMTPD "t" #else /* Systems with a pre-C99 libc */ #define FMTSZ "Z" #ifdef VM_X86_64 #define FMTPD "l" #else #define FMTPD "" #endif #endif #ifdef VM_X86_64 #define FMT64 "l" #elif defined(sun) || defined(__APPLE__) || defined(__FreeBSD__) #define FMT64 "ll" #else #define FMT64 "L" #endif #else #error - Need compiler define for FMT64 and FMTSZ #endif /* * Suffix for 64-bit constants. Use it like this: * CONST64(0x7fffffffffffffff) for signed or * CONST64U(0x7fffffffffffffff) for unsigned. * * 2004.08.30(thutt): * The vmcore/asm64/gen* programs are compiled as 32-bit * applications, but must handle 64 bit constants. If the * 64-bit-constant defining macros are already defined, the * definition will not be overwritten. */ #if !defined(CONST64) || !defined(CONST64U) #ifdef _MSC_VER #define CONST64(c) c##I64 #define CONST64U(c) c##uI64 #elif __GNUC__ #ifdef VM_X86_64 #define CONST64(c) c##L #define CONST64U(c) c##uL #else #define CONST64(c) c##LL #define CONST64U(c) c##uLL #endif #else #error - Need compiler define for CONST64 #endif #endif /* * Use CONST3264/CONST3264U if you want a constant to be * treated as a 32-bit number on 32-bit compiles and * a 64-bit number on 64-bit compiles. Useful in the case * of shifts, like (CONST3264U(1) << x), where x could be * more than 31 on a 64-bit compile. */ #ifdef VM_X86_64 #define CONST3264(a) CONST64(a) #define CONST3264U(a) CONST64U(a) #else #define CONST3264(a) (a) #define CONST3264U(a) (a) #endif #define MIN_INT32 ((int32)0x80000000) #define MAX_INT32 ((int32)0x7fffffff) #define MIN_UINT32 ((uint32)0) #define MAX_UINT32 ((uint32)0xffffffff) #define MIN_INT64 (CONST64(0x8000000000000000)) #define MAX_INT64 (CONST64(0x7fffffffffffffff)) #define MIN_UINT64 (CONST64U(0)) #define MAX_UINT64 (CONST64U(0xffffffffffffffff)) typedef uint8 *TCA; /* Pointer into TC (usually). */ /* * Type big enough to hold an integer between 0..100 */ typedef uint8 Percent; #define AsPercent(v) ((Percent)(v)) #define CHOOSE_PERCENT AsPercent(101) typedef uintptr_t VA; typedef uintptr_t VPN; typedef uint64 PA; typedef uint32 PPN; typedef uint64 PhysMemOff; typedef uint64 PhysMemSize; /* The Xserver source compiles with -ansi -pendantic */ #ifndef __STRICT_ANSI__ typedef uint64 BA; #endif typedef uint32 BPN; typedef uint32 PageNum; typedef unsigned MemHandle; typedef int32 World_ID; #define INVALID_WORLD_ID ((World_ID)0) typedef World_ID User_CartelID; #define INVALID_CARTEL_ID INVALID_WORLD_ID typedef User_CartelID User_SessionID; #define INVALID_SESSION_ID INVALID_CARTEL_ID typedef User_CartelID User_CartelGroupID; #define INVALID_CARTELGROUP_ID INVALID_CARTEL_ID typedef uint32 Worldlet_ID; #define INVALID_WORLDLET_ID ((Worldlet_ID)0) /* world page number */ typedef uint32 WPN; /* The Xserver source compiles with -ansi -pendantic */ #ifndef __STRICT_ANSI__ typedef uint64 MA; typedef uint32 MPN; #endif /* * This type should be used for variables that contain sector * position/quantity. */ typedef uint64 SectorType; /* * Linear address */ typedef uintptr_t LA; typedef uintptr_t LPN; #define LA_2_LPN(_la) ((_la) >> PAGE_SHIFT) #define LPN_2_LA(_lpn) ((_lpn) << PAGE_SHIFT) #define LAST_LPN ((((LA) 1) << (8 * sizeof(LA) - PAGE_SHIFT)) - 1) #define LAST_LPN32 ((((LA32)1) << (8 * sizeof(LA32) - PAGE_SHIFT)) - 1) #define LAST_LPN64 ((((LA64)1) << (8 * sizeof(LA64) - PAGE_SHIFT)) - 1) /* Valid bits in a LPN. */ #define LPN_MASK LAST_LPN #define LPN_MASK32 LAST_LPN32 #define LPN_MASK64 LAST_LPN64 /* * On 64 bit platform, address and page number types default * to 64 bit. When we need to represent a 32 bit address, we use * types defined below. * * On 32 bit platform, the following types are the same as the * default types. */ typedef uint32 VA32; typedef uint32 VPN32; typedef uint32 LA32; typedef uint32 LPN32; typedef uint32 PA32; typedef uint32 PPN32; typedef uint32 MA32; typedef uint32 MPN32; /* * On 64 bit platform, the following types are the same as the * default types. */ typedef uint64 VA64; typedef uint64 VPN64; typedef uint64 LA64; typedef uint64 LPN64; typedef uint64 PA64; typedef uint64 PPN64; typedef uint64 MA64; typedef uint64 MPN64; /* * VA typedefs for user world apps. */ typedef VA32 UserVA32; typedef VA64 UserVA64; typedef UserVA32 UserVAConst; /* Userspace ptr to data that we may only read. */ typedef UserVA64 UserVA64Const; /* Used by 64-bit syscalls until conversion is finished. */ #ifdef VMKERNEL typedef UserVA32 UserVA; #else typedef void * UserVA; #endif /* * Maximal possible PPN value (errors too) that PhysMem can handle. * Must be at least as large as MAX_PPN which is the maximum PPN * for any region other than buserror. */ #define PHYSMEM_MAX_PPN ((PPN)0xffffffff) #define MAX_PPN ((PPN)0x1fffffff) /* Maximal observable PPN value. */ #define INVALID_PPN ((PPN)0xffffffff) #define INVALID_BPN ((BPN) 0x1fffffff) #define INVALID_MPN ((MPN)-1) #define MEMREF_MPN ((MPN)-2) #define RESERVED_MPN ((MPN) 0) /* Support 43 bits of address space. */ #define MAX_MPN ((MPN)0x7fffffff) #define INVALID_LPN ((LPN)-1) #define INVALID_VPN ((VPN)-1) #define INVALID_LPN64 ((LPN64)-1) #define INVALID_PAGENUM ((PageNum)-1) #define INVALID_WPN ((WPN) -1) /* * Format modifier for printing VA, LA, and VPN. * Use them like this: Log("%#"FMTLA"x\n", laddr) */ #if defined(VMM64) || defined(FROBOS64) || vm_x86_64 || defined __APPLE__ # define FMTLA "l" # define FMTVA "l" # define FMTVPN "l" #else # define FMTLA "" # define FMTVA "" # define FMTVPN "" #endif #define EXTERN extern #define CONST const #ifndef INLINE # ifdef _MSC_VER # define INLINE __inline # else # define INLINE inline # endif #endif /* * Annotation for data that may be exported into a DLL and used by other * apps that load that DLL and import the data. */ #if defined(_WIN32) && defined(VMX86_IMPORT_DLLDATA) # define VMX86_EXTERN_DATA extern __declspec(dllimport) #else // !_WIN32 # define VMX86_EXTERN_DATA extern #endif #if defined(_WIN32) && !defined(VMX86_NO_THREADS) #define THREADSPECIFIC __declspec(thread) #else #define THREADSPECIFIC #endif /* * Due to the wonderful "registry redirection" feature introduced in * 64-bit Windows, if you access any key under HKLM\Software in 64-bit * code, you need to open/create/delete that key with * VMKEY_WOW64_32KEY if you want a consistent view with 32-bit code. */ #ifdef _WIN32 #ifdef _WIN64 #define VMW_KEY_WOW64_32KEY KEY_WOW64_32KEY #else #define VMW_KEY_WOW64_32KEY 0x0 #endif #endif /* * Consider the following reasons functions are inlined: * * 1) inlined for performance reasons * 2) inlined because it's a single-use function * * Functions which meet only condition 2 should be marked with this * inline macro; It is not critical to be inlined (but there is a * code-space & runtime savings by doing so), so when other callers * are added the inline-ness should be removed. */ #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3) /* * Starting at version 3.3, gcc does not always inline functions marked * 'inline' (it depends on their size). To force gcc to do so, one must use the * extra __always_inline__ attribute. */ # define INLINE_SINGLE_CALLER INLINE __attribute__((__always_inline__)) # if defined(VMM) \ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 1)) # warning Verify INLINE_SINGLE_CALLER '__always_inline__' attribute (did \ monitor size change?) # endif #else # define INLINE_SINGLE_CALLER INLINE #endif /* * Used when a hard guaranteed of no inlining is needed. Very few * instances need this since the absence of INLINE is a good hint * that gcc will not do inlining. */ #if defined(__GNUC__) && defined(VMM) #define ABSOLUTELY_NOINLINE __attribute__((__noinline__)) #endif /* * Attributes placed on function declarations to tell the compiler * that the function never returns. */ #ifdef _MSC_VER #define NORETURN __declspec(noreturn) #elif __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 9) #define NORETURN __attribute__((__noreturn__)) #else #define NORETURN #endif /* * GCC 3.2 inline asm needs the + constraint for input/ouput memory operands. * Older GCCs don't know about it --hpreg */ #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 2) # define VM_ASM_PLUS 1 #else # define VM_ASM_PLUS 0 #endif /* * Branch prediction hints: * LIKELY(exp) - Expression exp is likely TRUE. * UNLIKELY(exp) - Expression exp is likely FALSE. * Usage example: * if (LIKELY(excCode == EXC_NONE)) { * or * if (UNLIKELY(REAL_MODE(vc))) { * * We know how to predict branches on gcc3 and later (hopefully), * all others we don't so we do nothing. */ #if (__GNUC__ >= 3) /* * gcc3 uses __builtin_expect() to inform the compiler of an expected value. * We use this to inform the static branch predictor. The '!!' in LIKELY * will convert any !=0 to a 1. */ #define LIKELY(_exp) __builtin_expect(!!(_exp), 1) #define UNLIKELY(_exp) __builtin_expect((_exp), 0) #else #define LIKELY(_exp) (_exp) #define UNLIKELY(_exp) (_exp) #endif /* * GCC's argument checking for printf-like functions * This is conditional until we have replaced all `"%x", void *' * with `"0x%08x", (uint32) void *'. Note that %p prints different things * on different platforms. Argument checking is enabled for the * vmkernel, which has already been cleansed. * * fmtPos is the position of the format string argument, beginning at 1 * varPos is the position of the variable argument, beginning at 1 */ #if defined(__GNUC__) # define PRINTF_DECL(fmtPos, varPos) __attribute__((__format__(__printf__, fmtPos, varPos))) #else # define PRINTF_DECL(fmtPos, varPos) #endif #if defined(__GNUC__) # define SCANF_DECL(fmtPos, varPos) __attribute__((__format__(__scanf__, fmtPos, varPos))) #else # define SCANF_DECL(fmtPos, varPos) #endif /* * UNUSED_PARAM should surround the parameter name and type declaration, * e.g. "int MyFunction(int var1, UNUSED_PARAM(int var2))" * */ #ifndef UNUSED_PARAM # if defined(__GNUC__) # define UNUSED_PARAM(_parm) _parm __attribute__((__unused__)) # else # define UNUSED_PARAM(_parm) _parm # endif #endif /* * REGPARM defaults to REGPARM3, i.e., a requent that gcc * puts the first three arguments in registers. (It is fine * if the function has fewer than three args.) Gcc only. * Syntactically, put REGPARM where you'd put INLINE or NORETURN. */ #if defined(__GNUC__) # define REGPARM0 __attribute__((regparm(0))) # define REGPARM1 __attribute__((regparm(1))) # define REGPARM2 __attribute__((regparm(2))) # define REGPARM3 __attribute__((regparm(3))) # define REGPARM REGPARM3 #else # define REGPARM0 # define REGPARM1 # define REGPARM2 # define REGPARM3 # define REGPARM #endif /* * ALIGNED specifies minimum alignment in "n" bytes. */ #ifdef __GNUC__ #define ALIGNED(n) __attribute__((__aligned__(n))) #else #define ALIGNED(n) #endif /* *********************************************************************** * STRUCT_OFFSET_CHECK -- */ /** * * \brief Check if the actual offsef of a member in a structure * is what is expected * * * \param[in] STRUCT Structure the member is a part of. * \param[in] MEMBER Member to check the offset of. * \param[in] OFFSET Expected offset of MEMBER in STRUCTURE. * \param[in] DEBUG_EXTRA Additional bytes to be added to OFFSET to * compensate for extra info in debug builds. * *********************************************************************** */ #ifdef VMX86_DEBUG #define STRUCT_OFFSET_CHECK(STRUCT, MEMBER, OFFSET, DEBUG_EXTRA) \ ASSERT_ON_COMPILE(vmk_offsetof(STRUCT, MEMBER) == (OFFSET + DEBUG_EXTRA)) #else #define STRUCT_OFFSET_CHECK(STRUCT, MEMBER, OFFSET, DEBUG_EXTRA) \ ASSERT_ON_COMPILE(vmk_offsetof(STRUCT, MEMBER) == OFFSET) #endif /* *********************************************************************** * STRUCT_SIZE_CHECK -- */ /** * * \brief Check if the actual size of a structure is what is expected * * * \param[in] STRUCT Structure whose size is to be checked. * \param[in] SIZE Expected size of STRUCT. * \param[in] DEBUG_EXTRA Additional bytes to be added to SIZE to * compensate for extra info in debug builds. * *********************************************************************** */ #ifdef VMX86_DEBUG #define STRUCT_SIZE_CHECK(STRUCT, SIZE, DEBUG_EXTRA) \ ASSERT_ON_COMPILE(sizeof(STRUCT) == (SIZE + DEBUG_EXTRA)) #else #define STRUCT_SIZE_CHECK(STRUCT, SIZE, DEBUG_EXTRA) \ ASSERT_ON_COMPILE(sizeof(STRUCT) == SIZE) #endif /* * __func__ is a stringified function name that is part of the C99 standard. The block * below defines __func__ on older systems where the compiler does not support that * macro. */ #if defined(__GNUC__) \ && ((__GNUC__ == 2 && __GNUC_MINOR < 96) \ || (__GNUC__ < 2)) # define __func__ __FUNCTION__ #endif /* * Once upon a time, this was used to silence compiler warnings that * get generated when the compiler thinks that a function returns * when it is marked noreturn. Don't do it. Use NOT_REACHED(). */ #define INFINITE_LOOP() do { } while (1) /* * On FreeBSD (for the tools build), size_t is typedef'd if _BSD_SIZE_T_ * is defined. Use the same logic here so we don't define it twice. [greg] */ #ifdef __FreeBSD__ # ifdef _BSD_SIZE_T_ # undef _BSD_SIZE_T_ # ifdef VM_I386 # ifdef VM_X86_64 typedef uint64 size_t; # else typedef uint32 size_t; # endif # endif /* VM_I386 */ # endif # ifdef _BSD_SSIZE_T_ # undef _BSD_SSIZE_T_ # define _SSIZE_T # define __ssize_t_defined # define _SSIZE_T_DECLARED # ifdef VM_I386 # ifdef VM_X86_64 typedef int64 ssize_t; # else typedef int32 ssize_t; # endif # endif /* VM_I386 */ # endif #else # ifndef _SIZE_T # define _SIZE_T # ifdef VM_I386 # ifdef VM_X86_64 typedef uint64 size_t; # else typedef uint32 size_t; # endif # endif /* VM_I386 */ # endif # if !defined(FROBOS) && !defined(_SSIZE_T) && !defined(ssize_t) && \ !defined(__ssize_t_defined) && !defined(_SSIZE_T_DECLARED) # define _SSIZE_T # define __ssize_t_defined # define _SSIZE_T_DECLARED # ifdef VM_I386 # ifdef VM_X86_64 typedef int64 ssize_t; # else typedef int32 ssize_t; # endif # endif /* VM_I386 */ # endif #endif /* * Format modifier for printing pid_t. On sun the pid_t is a ulong, but on * Linux it's an int. * Use this like this: printf("The pid is %"FMTPID".\n", pid); */ #ifdef sun # ifdef VM_X86_64 # define FMTPID "d" # else # define FMTPID "lu" # endif #else # define FMTPID "d" #endif /* * Format modifier for printing uid_t. On sun the uid_t is a ulong, but on * Linux it's an int. * Use this like this: printf("The uid is %"FMTUID".\n", uid); */ #ifdef sun # ifdef VM_X86_64 # define FMTUID "u" # else # define FMTUID "lu" # endif #else # define FMTUID "u" #endif /* * Format modifier for printing mode_t. On sun the mode_t is a ulong, but on * Linux it's an int. * Use this like this: printf("The mode is %"FMTMODE".\n", mode); */ #ifdef sun # ifdef VM_X86_64 # define FMTMODE "o" # else # define FMTMODE "lo" # endif #else # define FMTMODE "o" #endif /* * Format modifier for printing time_t. Most platforms define a time_t to be * a long int, but on FreeBSD (as of 5.0, it seems), the time_t is a signed * size quantity. Refer to the definition of FMTSZ to see why we need silly * preprocessor arithmetic. * Use this like this: printf("The mode is %"FMTTIME".\n", time); */ #if defined(__FreeBSD__) && (__FreeBSD__ + 0) && ((__FreeBSD__ + 0) >= 5) # define FMTTIME FMTSZ"d" #else # define FMTTIME "ld" #endif /* * Define MXSemaHandle here so both vmmon and vmx see this definition. */ #ifdef _WIN32 typedef uintptr_t MXSemaHandle; #else typedef int MXSemaHandle; #endif /* * Define type for poll device handles. */ #ifdef _WIN32 typedef uintptr_t PollDevHandle; #else typedef int PollDevHandle; #endif /* * Define the utf16_t type. */ #if defined(_WIN32) && defined(_NATIVE_WCHAR_T_DEFINED) typedef wchar_t utf16_t; #else typedef uint16 utf16_t; #endif #endif /* _VM_BASIC_TYPES_H_ */ vmci-only/vm_device_version.h0000444000000000000000000001611412025726724015360 0ustar rootroot/********************************************************* * Copyright (C) 1998 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef VM_DEVICE_VERSION_H #define VM_DEVICE_VERSION_H #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMEXT #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #ifdef _WIN32 #include "guiddef.h" #endif /* Our own PCI IDs * VMware SVGA II (Unified VGA) * VMware SVGA (PCI Accelerator) * VMware vmxnet (Idealized NIC) * VMware vmxscsi (Abortive idealized SCSI controller) * VMware chipset (Subsystem ID for our motherboards) * VMware e1000 (Subsystem ID) * VMware vmxnet3 (Uniform Pass Through NIC) */ #define PCI_VENDOR_ID_VMWARE 0x15AD #define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405 #define PCI_DEVICE_ID_VMWARE_SVGA 0x0710 #define PCI_DEVICE_ID_VMWARE_NET 0x0720 #define PCI_DEVICE_ID_VMWARE_SCSI 0x0730 #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 #define PCI_DEVICE_ID_VMWARE_CHIPSET 0x1976 #define PCI_DEVICE_ID_VMWARE_82545EM 0x0750 /* single port */ #define PCI_DEVICE_ID_VMWARE_82546EB 0x0760 /* dual port */ #define PCI_DEVICE_ID_VMWARE_EHCI 0x0770 #define PCI_DEVICE_ID_VMWARE_1394 0x0780 #define PCI_DEVICE_ID_VMWARE_BRIDGE 0x0790 #define PCI_DEVICE_ID_VMWARE_ROOTPORT 0x07A0 #define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0 #define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0 /* The hypervisor device might grow. Please leave room * for 7 more subfunctions. */ #define PCI_DEVICE_ID_VMWARE_HYPER 0x0800 #define PCI_DEVICE_ID_VMWARE_VMI 0x0801 #define PCI_DEVICE_VMI_CLASS 0x05 #define PCI_DEVICE_VMI_SUBCLASS 0x80 #define PCI_DEVICE_VMI_INTERFACE 0x00 #define PCI_DEVICE_VMI_REVISION 0x01 /* From linux/pci_ids.h: * AMD Lance Ethernet controller * BusLogic SCSI controller * Ensoniq ES1371 sound controller */ #define PCI_VENDOR_ID_AMD 0x1022 #define PCI_DEVICE_ID_AMD_VLANCE 0x2000 #define PCI_VENDOR_ID_BUSLOGIC 0x104B #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140 #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040 #define PCI_VENDOR_ID_ENSONIQ 0x1274 #define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371 /* From linux/pci_ids.h: * Intel 82439TX (430 HX North Bridge) * Intel 82371AB (PIIX4 South Bridge) * Intel 82443BX (440 BX North Bridge and AGP Bridge) * Intel 82545EM (e1000, server adapter, single port) * Intel 82546EB (e1000, server adapter, dual port) */ #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_DEVICE_ID_INTEL_82439TX 0x7100 #define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110 #define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112 #define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113 #define PCI_DEVICE_ID_INTEL_82371AB 0x7111 #define PCI_DEVICE_ID_INTEL_82443BX 0x7190 #define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191 #define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 /* Used when no AGP support */ #define PCI_DEVICE_ID_INTEL_82545EM 0x100f #define PCI_DEVICE_ID_INTEL_82546EB 0x1010 /************* Strings for IDE Identity Fields **************************/ #define VIDE_ID_SERIAL_STR "00000000000000000001" /* Must be 20 Bytes */ #define VIDE_ID_FIRMWARE_STR "00000001" /* Must be 8 Bytes */ /* No longer than 40 Bytes */ #define VIDE_ATA_MODEL_STR PRODUCT_GENERIC_NAME " Virtual IDE Hard Drive" #define VIDE_ATAPI_MODEL_STR PRODUCT_GENERIC_NAME " Virtual IDE CDROM Drive" #define ATAPI_VENDOR_ID "NECVMWar" /* Must be 8 Bytes */ #define ATAPI_PRODUCT_ID PRODUCT_GENERIC_NAME " IDE CDROM" /* Must be 16 Bytes */ #define ATAPI_REV_LEVEL "1.00" /* Must be 4 Bytes */ #define IDE_NUM_INTERFACES 2 /* support for two interfaces */ #define IDE_DRIVES_PER_IF 2 /************* Strings for SCSI Identity Fields **************************/ #define SCSI_DISK_MODEL_STR PRODUCT_GENERIC_NAME " Virtual SCSI Hard Drive" #define SCSI_DISK_VENDOR_NAME COMPANY_NAME #define SCSI_DISK_REV_LEVEL "1.0" #define SCSI_CDROM_MODEL_STR PRODUCT_GENERIC_NAME " Virtual SCSI CDROM Drive" #define SCSI_CDROM_VENDOR_NAME COMPANY_NAME #define SCSI_CDROM_REV_LEVEL "1.0" /************* SCSI implementation limits ********************************/ #define SCSI_MAX_CONTROLLERS 4 // Need more than 1 for MSCS clustering #define SCSI_MAX_DEVICES 16 // BT-958 emulates only 16 #define SCSI_IDE_CHANNEL SCSI_MAX_CONTROLLERS #define SCSI_IDE_HOSTED_CHANNEL (SCSI_MAX_CONTROLLERS + 1) #define SCSI_MAX_CHANNELS (SCSI_MAX_CONTROLLERS + 2) /************* Strings for the VESA BIOS Identity Fields *****************/ #define VBE_OEM_STRING COMPANY_NAME " SVGA" #define VBE_VENDOR_NAME COMPANY_NAME #define VBE_PRODUCT_NAME PRODUCT_GENERIC_NAME /************* PCI implementation limits ********************************/ #define PCI_MAX_BRIDGES 15 /************* Ethernet implementation limits ***************************/ #define MAX_ETHERNET_CARDS 10 /************* PCI Passthrough implementation limits ********************/ #define MAX_PCI_PASSTHRU_DEVICES 2 /************* USB implementation limits ********************************/ #define MAX_USB_DEVICES_PER_HOST_CONTROLLER 127 /************* Strings for Host USB Driver *******************************/ #ifdef _WIN32 /* * Globally unique ID for the VMware device interface. Define INITGUID before including * this header file to instantiate the variable. */ DEFINE_GUID(GUID_DEVICE_INTERFACE_VMWARE_USB_DEVICES, 0x2da1fe75, 0xaab3, 0x4d2c, 0xac, 0xdf, 0x39, 0x8, 0x8c, 0xad, 0xa6, 0x65); /* * Globally unique ID for the VMware device setup class. */ DEFINE_GUID(GUID_CLASS_VMWARE_USB_DEVICES, 0x3b3e62a5, 0x3556, 0x4d7e, 0xad, 0xad, 0xf5, 0xfa, 0x3a, 0x71, 0x2b, 0x56); /* * This string defines the device ID string of a VMware USB device. * The format is USB\Vid_XXXX&Pid_YYYY, where XXXX and YYYY are the * hexadecimal representations of the vendor and product ids, respectively. * * The official vendor ID for VMware, Inc. is 0x0E0F. * The product id for USB generic devices is 0x0001. */ #define USB_VMWARE_DEVICE_ID_WIDE L"USB\\Vid_0E0F&Pid_0001" #define USB_DEVICE_ID_LENGTH (sizeof(USB_VMWARE_DEVICE_ID_WIDE) / sizeof(WCHAR)) #ifdef UNICODE #define USB_PNP_SETUP_CLASS_NAME L"VMwareUSBDevices" #define USB_PNP_DRIVER_NAME L"vmusb" #else #define USB_PNP_SETUP_CLASS_NAME "VMwareUSBDevices" #define USB_PNP_DRIVER_NAME "vmusb" #endif #endif #endif /* VM_DEVICE_VERSION_H */ vmci-only/vm_basic_defs.h0000444000000000000000000003156512025726724014445 0ustar rootroot/********************************************************* * Copyright (C) 2003 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_basic_defs.h -- * * Standard macros for VMware source code. */ #ifndef _VM_BASIC_DEFS_H_ #define _VM_BASIC_DEFS_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMEXT #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMIROM #include "includeCheck.h" #include "vm_basic_types.h" // For INLINE. #if defined _WIN32 && defined USERLEVEL #include /* * We re-define offsetof macro from stddef, make * sure that its already defined before we do it */ #include // for Sleep() and LOWORD() etc. #endif /* * Simple macros */ #if defined __APPLE__ && !defined KERNEL # include #else // XXX the __cplusplus one matches that of VC++, to prevent redefinition warning // XXX the other one matches that of gcc3.3.3/glibc2.2.4 to prevent redefinition warnings #ifndef offsetof #ifdef __cplusplus #define offsetof(s,m) (size_t)&(((s *)0)->m) #else #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif #endif #endif // __APPLE__ #ifndef ARRAYSIZE #define ARRAYSIZE(a) (sizeof (a) / sizeof *(a)) #endif #ifndef MIN #define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b)) #endif /* The Solaris 9 cross-compiler complains about these not being used */ #ifndef sun static INLINE int Min(int a, int b) { return a < b ? a : b; } #endif #ifndef MAX #define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b)) #endif #ifndef sun static INLINE int Max(int a, int b) { return a > b ? a : b; } #endif #define ROUNDUP(x,y) (((x) + (y) - 1) / (y) * (y)) #define ROUNDDOWN(x,y) ((x) / (y) * (y)) #define ROUNDUPBITS(x, bits) (((uintptr_t) (x) + MASK(bits)) & ~MASK(bits)) #define ROUNDDOWNBITS(x, bits) ((uintptr_t) (x) & ~MASK(bits)) #define CEILING(x, y) (((x) + (y) - 1) / (y)) #if defined __APPLE__ #include #undef MASK #endif #define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */ #define DWORD_ALIGN(x) ((((x)+3) >> 2) << 2) #define QWORD_ALIGN(x) ((((x)+4) >> 3) << 3) #define IMPLIES(a,b) (!(a) || (b)) /* * Not everybody (e.g., the monitor) has NULL */ #ifndef NULL #ifdef __cplusplus #define NULL 0 #else #define NULL ((void *)0) #endif #endif /* * Token concatenation * * The C preprocessor doesn't prescan arguments when they are * concatenated or stringified. So we need extra levels of * indirection to convince the preprocessor to expand its * arguments. */ #define CONC(x, y) x##y #define XCONC(x, y) CONC(x, y) #define XXCONC(x, y) XCONC(x, y) #define MAKESTR(x) #x #define XSTR(x) MAKESTR(x) /* * Page operations * * It has been suggested that these definitions belong elsewhere * (like x86types.h). However, I deem them common enough * (since even regular user-level programs may want to do * page-based memory manipulation) to be here. * -- edward */ #ifndef PAGE_SHIFT // { #if defined VM_I386 #define PAGE_SHIFT 12 #elif defined __APPLE__ #define PAGE_SHIFT 12 #else #error #endif #endif // } #ifndef PAGE_SIZE #define PAGE_SIZE (1<> PAGE_SHIFT) #endif #ifndef BYTES_2_PAGES #define BYTES_2_PAGES(_nbytes) ((_nbytes) >> PAGE_SHIFT) #endif #ifndef PAGES_2_BYTES #define PAGES_2_BYTES(_npages) (((uint64)(_npages)) << PAGE_SHIFT) #endif #ifndef MBYTES_2_PAGES #define MBYTES_2_PAGES(_nbytes) ((_nbytes) << (20 - PAGE_SHIFT)) #endif #ifndef PAGES_2_MBYTES #define PAGES_2_MBYTES(_npages) ((_npages) >> (20 - PAGE_SHIFT)) #endif #ifndef VM_PAE_LARGE_PAGE_SHIFT #define VM_PAE_LARGE_PAGE_SHIFT 21 #endif #ifndef VM_PAE_LARGE_PAGE_SIZE #define VM_PAE_LARGE_PAGE_SIZE (1 << VM_PAE_LARGE_PAGE_SHIFT) #endif #ifndef VM_PAE_LARGE_PAGE_MASK #define VM_PAE_LARGE_PAGE_MASK (VM_PAE_LARGE_PAGE_SIZE - 1) #endif #ifndef VM_PAE_LARGE_2_SMALL_PAGES #define VM_PAE_LARGE_2_SMALL_PAGES (BYTES_2_PAGES(VM_PAE_LARGE_PAGE_SIZE)) #endif /* * Word operations */ #ifndef LOWORD #define LOWORD(_dw) ((_dw) & 0xffff) #endif #ifndef HIWORD #define HIWORD(_dw) (((_dw) >> 16) & 0xffff) #endif #ifndef LOBYTE #define LOBYTE(_w) ((_w) & 0xff) #endif #ifndef HIBYTE #define HIBYTE(_w) (((_w) >> 8) & 0xff) #endif #define HIDWORD(_qw) ((uint32)((_qw) >> 32)) #define LODWORD(_qw) ((uint32)(_qw)) #define QWORD(_hi, _lo) ((((uint64)(_hi)) << 32) | ((uint32)(_lo))) /* * Deposit a field _src at _pos bits from the right, * with a length of _len, into the integer _target. */ #define DEPOSIT_BITS(_src,_pos,_len,_target) { \ unsigned mask = ((1 << _len) - 1); \ unsigned shiftedmask = ((1 << _len) - 1) << _pos; \ _target = (_target & ~shiftedmask) | ((_src & mask) << _pos); \ } /* * Get return address. */ #ifdef _MSC_VER #ifdef __cplusplus extern "C" #endif void *_ReturnAddress(void); #pragma intrinsic(_ReturnAddress) #define GetReturnAddress() _ReturnAddress() #elif __GNUC__ #define GetReturnAddress() __builtin_return_address(0) #endif #ifdef __GNUC__ #ifndef sun /* * Get the frame pointer. We use this assembly hack instead of * __builtin_frame_address() due to a bug introduced in gcc 4.1.1 */ static INLINE_SINGLE_CALLER uintptr_t GetFrameAddr(void) { uintptr_t bp; #if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)) bp = (uintptr_t)__builtin_frame_address(0); #elif (__GNUC__ == 4 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 3) # if defined(VMM64) || defined(VM_X86_64) __asm__ __volatile__("movq %%rbp, %0\n" : "=g" (bp)); # else __asm__ __volatile__("movl %%ebp, %0\n" : "=g" (bp)); # endif #else __asm__ __volatile__( #ifdef __linux__ ".print \"This newer version of GCC may or may not have the " "__builtin_frame_address bug. Need to update this. " "See bug 147638.\"\n" ".abort" #else /* MacOS */ ".abort \"This newer version of GCC may or may not have the " "__builtin_frame_address bug. Need to update this. " "See bug 147638.\"\n" #endif : "=g" (bp) ); #endif return bp; } /* * Returns the frame pointer of the calling function. * Equivalent to __builtin_frame_address(1). */ static INLINE_SINGLE_CALLER uintptr_t GetCallerFrameAddr(void) { return *(uintptr_t*)GetFrameAddr(); } #endif // sun #endif // __GNUC__ /* * Data prefetch was added in gcc 3.1.1 * http://www.gnu.org/software/gcc/gcc-3.1/changes.html */ #ifdef __GNUC__ # if ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ > 1) || \ (__GNUC__ == 3 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 1)) # define PREFETCH_R(var) __builtin_prefetch((var), 0 /* read */, \ 3 /* high temporal locality */) # define PREFETCH_W(var) __builtin_prefetch((var), 1 /* write */, \ 3 /* high temporal locality */) # else # define PREFETCH_R(var) ((void)(var)) # define PREFETCH_W(var) ((void)(var)) # endif #endif /* __GNUC__ */ #ifdef USERLEVEL // { /* * Note this might be a problem on NT b/c while sched_yield guarantees it * moves you to the end of your priority list, Sleep(0) offers no such * guarantee. Bummer. --Jeremy. */ #if defined(N_PLAT_NLM) || defined(__FreeBSD__) /* We do not have YIELD() as we do not need it yet... */ #elif defined(_WIN32) # define YIELD() Sleep(0) #else # include // For sched_yield. Don't ask. --Jeremy. # define YIELD() sched_yield() #endif /* * Standardize some Posix names on Windows. */ #ifdef _WIN32 // { #define snprintf _snprintf #define vsnprintf _vsnprintf #define strtok_r strtok_s static INLINE void sleep(unsigned int sec) { Sleep(sec * 1000); } static INLINE void usleep(unsigned long usec) { Sleep(CEILING(usec, 1000)); } typedef int pid_t; #define F_OK 0 #define X_OK 1 #define W_OK 2 #define R_OK 4 #endif // } /* * Macro for username comparison. */ #ifdef _WIN32 // { #define USERCMP(x,y) Str_Strcasecmp(x,y) #else #define USERCMP(x,y) strcmp(x,y) #endif // } #endif // } #ifndef va_copy #ifdef _WIN32 /* * Windows needs va_copy. This works for both 32 and 64-bit Windows * based on inspection of how varags.h from the Visual C CRTL is * implemented. (Future versions of the RTL may break this). */ #define va_copy(dest, src) ((dest) = (src)) #elif defined(__APPLE__) && defined(KERNEL) /* * MacOS kernel-mode needs va_copy. Based on inspection of stdarg.h * from the MacOSX10.4u.sdk kernel framework, this should work. * (Future versions of the SDK may break this). */ #define va_copy(dest, src) ((dest) = (src)) #elif defined(__GNUC__) && (__GNUC__ < 3) /* * Old versions of gcc recognize __va_copy, but not va_copy. */ #define va_copy(dest, src) __va_copy(dest, src) #endif // _WIN32 #endif // va_copy /* * This one is outside USERLEVEL because it's used by * files compiled into the Windows hgfs driver or the display * driver. */ #ifdef _WIN32 #define PATH_MAX 256 #ifndef strcasecmp #define strcasecmp(_s1,_s2) _stricmp((_s1),(_s2)) #endif #ifndef strncasecmp #define strncasecmp(_s1,_s2,_n) _strnicmp((_s1),(_s2),(_n)) #endif #endif /* * Convenience macro for COMMUNITY_SOURCE */ #undef EXCLUDE_COMMUNITY_SOURCE #ifdef COMMUNITY_SOURCE #define EXCLUDE_COMMUNITY_SOURCE(x) #else #define EXCLUDE_COMMUNITY_SOURCE(x) x #endif #undef COMMUNITY_SOURCE_INTEL_SECRET #if !defined(COMMUNITY_SOURCE) || defined(INTEL_SOURCE) /* * It's ok to include INTEL_SECRET source code for non-commsrc, * or for drops directed at Intel. */ #define COMMUNITY_SOURCE_INTEL_SECRET #endif /* * Convenience macros and definitions. Can often be used instead of #ifdef. */ #undef DEBUG_ONLY #undef SL_DEBUG_ONLY #undef VMX86_SL_DEBUG #ifdef VMX86_DEBUG #define vmx86_debug 1 #define DEBUG_ONLY(x) x /* * Be very, very, very careful with SL_DEBUG. Pls ask ganesh or min before * using it. */ #define VMX86_SL_DEBUG #define vmx86_sl_debug 1 #define SL_DEBUG_ONLY(x) x #else #define vmx86_debug 0 #define DEBUG_ONLY(x) #define vmx86_sl_debug 0 #define SL_DEBUG_ONLY(x) #endif #ifdef VMX86_STATS #define vmx86_stats 1 #define STATS_ONLY(x) x #else #define vmx86_stats 0 #define STATS_ONLY(x) #endif #ifdef VMX86_DEVEL #define vmx86_devel 1 #define DEVEL_ONLY(x) x #else #define vmx86_devel 0 #define DEVEL_ONLY(x) #endif #ifdef VMX86_LOG #define vmx86_log 1 #define LOG_ONLY(x) x #else #define vmx86_log 0 #define LOG_ONLY(x) #endif #ifdef VMX86_VMM_SERIAL_LOGGING #define vmx86_vmm_serial_log 1 #define VMM_SERIAL_LOG_ONLY(x) x #else #define vmx86_vmm_serial_log 0 #define VMM_SERIAL_LOG_ONLY(x) #endif #ifdef VMX86_SERVER #define vmx86_server 1 #define SERVER_ONLY(x) x #define HOSTED_ONLY(x) #else #define vmx86_server 0 #define SERVER_ONLY(x) #define HOSTED_ONLY(x) x #endif #ifdef VMX86_WGS #define vmx86_wgs 1 #define WGS_ONLY(x) x #else #define vmx86_wgs 0 #define WGS_ONLY(x) #endif #ifdef VMKERNEL #define vmkernel 1 #define VMKERNEL_ONLY(x) x #else #define vmkernel 0 #define VMKERNEL_ONLY(x) #endif #ifdef _WIN32 #define WIN32_ONLY(x) x #define POSIX_ONLY(x) #else #define WIN32_ONLY(x) #define POSIX_ONLY(x) x #endif #ifdef VMM #define VMM_ONLY(x) x #define USER_ONLY(x) #else #define VMM_ONLY(x) #define USER_ONLY(x) x #endif /* VMVISOR ifdef only allowed in the vmkernel */ #ifdef VMKERNEL #ifdef VMVISOR #define vmvisor 1 #define VMVISOR_ONLY(x) x #else #define vmvisor 0 #define VMVISOR_ONLY(x) #endif #endif #ifdef _WIN32 #define VMW_INVALID_HANDLE INVALID_HANDLE_VALUE #else #define VMW_INVALID_HANDLE -1 #endif #endif // ifndef _VM_BASIC_DEFS_H_ vmci-only/vm_assert.h0000444000000000000000000002422012025726724013652 0ustar rootroot/********************************************************* * Copyright (C) 1998-2004 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_assert.h -- * * The basic assertion facility for all VMware code. * * For proper use, see * http://vmweb.vmware.com/~mts/WebSite/guide/programming/asserts.html */ #ifndef _VM_ASSERT_H_ #define _VM_ASSERT_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMEXT #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMIROM #include "includeCheck.h" // XXX not necessary except some places include vm_assert.h improperly #include "vm_basic_types.h" #include "vm_basic_defs.h" /* * XXX old file code */ #ifdef FILECODEINT #error "Don't define FILECODEINT. It is obsolete." #endif #ifdef FILECODE #error "Don't define FILECODE. It is obsolete." #endif /* * Panic and log functions */ EXTERN void Log(const char *fmt, ...) PRINTF_DECL(1, 2); EXTERN void Warning(const char *fmt, ...) PRINTF_DECL(1, 2); EXTERN NORETURN void Panic(const char *fmt, ...) PRINTF_DECL(1, 2); EXTERN void LogThrottled(uint32 *count, const char *fmt, ...) PRINTF_DECL(2, 3); EXTERN void WarningThrottled(uint32 *count, const char *fmt, ...) PRINTF_DECL(2, 3); /* DB family: messages which are parsed by logfile database system */ #define WarningDB Warning #define LogDB Log #define WarningThrottledDB WarningThrottled #define LogThrottledDB LogThrottled /* * Stress testing: redefine ASSERT_IFNOT() to taste */ #ifndef ASSERT_IFNOT #ifdef __cplusplus #define ASSERT_IFNOT(cond, panic) (UNLIKELY(!(cond)) ? (panic) : (void)0) #else #define ASSERT_IFNOT(cond, panic) (UNLIKELY(!(cond)) ? (panic) : 0) #endif #endif /* * Assert, panic, and log macros * * Some of these are redefined below undef !VMX86_DEBUG. * ASSERT() is special cased because of interaction with Windows DDK. */ #if defined VMX86_DEBUG || defined ASSERT_ALWAYS_AVAILABLE #undef ASSERT #define ASSERT(cond) \ ASSERT_IFNOT(cond, _ASSERT_PANIC(AssertAssert)) #endif #define ASSERT_BUG(bug, cond) \ ASSERT_IFNOT(cond, _ASSERT_PANIC_BUG(bug, AssertAssert)) #define ASSERT_BUG_DEBUGONLY(bug, cond) ASSERT_BUG(bug, cond) #define PANIC() _ASSERT_PANIC(AssertPanic) #define PANIC_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertPanic) #define ASSERT_NOT_IMPLEMENTED(cond) \ ASSERT_IFNOT(cond, NOT_IMPLEMENTED()) #define ASSERT_NOT_IMPLEMENTED_BUG(bug, cond) \ ASSERT_IFNOT(cond, NOT_IMPLEMENTED_BUG(bug)) #define NOT_IMPLEMENTED() _ASSERT_PANIC(AssertNotImplemented) #define NOT_IMPLEMENTED_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertNotImplemented) #define NOT_REACHED() _ASSERT_PANIC(AssertNotReached) #define NOT_REACHED_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertNotReached) #define ASSERT_MEM_ALLOC(cond) \ ASSERT_IFNOT(cond, _ASSERT_PANIC(AssertMemAlloc)) #ifdef VMX86_DEVEL #define ASSERT_LENGTH(real, expected) \ ASSERT_IFNOT((real) == (expected), \ Panic(AssertLengthFmt, __FILE__, __LINE__, real, expected)) #else #define ASSERT_LENGTH(real, expected) ASSERT((real) == (expected)) #endif #ifdef VMX86_DEVEL #define ASSERT_DEVEL(cond) ASSERT(cond) #else #define ASSERT_DEVEL(cond) ((void) 0) #endif #define ASSERT_NO_INTERRUPTS() ASSERT(!INTERRUPTS_ENABLED()) #define ASSERT_HAS_INTERRUPTS() ASSERT(INTERRUPTS_ENABLED()) #define ASSERT_LOG_UNEXPECTED(bug, cond) \ (UNLIKELY(!(cond)) ? LOG_UNEXPECTED(bug) : 0) #ifdef VMX86_DEVEL #define LOG_UNEXPECTED(bug) \ Warning(AssertUnexpectedFmt, __FILE__, __LINE__, bug) #else #define LOG_UNEXPECTED(bug) \ Log(AssertUnexpectedFmt, __FILE__, __LINE__, bug) #endif #define ASSERT_NOT_TESTED(cond) (UNLIKELY(!(cond)) ? NOT_TESTED() : 0) #ifdef VMX86_DEVEL #define NOT_TESTED() Warning(AssertNotTestedFmt, __FILE__, __LINE__) #else #define NOT_TESTED() Log(AssertNotTestedFmt, __FILE__, __LINE__) #endif #define NOT_TESTED_ONCE() \ do { \ static Bool alreadyPrinted = FALSE; \ if (UNLIKELY(!alreadyPrinted)) { \ alreadyPrinted = TRUE; \ NOT_TESTED(); \ } \ } while (0) #define NOT_TESTED_1024() \ do { \ static uint16 count = 0; \ if (UNLIKELY(count == 0)) { NOT_TESTED(); } \ count = (count + 1) & 1023; \ } while (0) #define LOG_ONCE(_s) \ do { \ static Bool logged = FALSE; \ if (!logged) { \ Log _s; \ logged = TRUE; \ } \ } while (0) /* * Redefine macros that are only in debug versions */ #if !defined VMX86_DEBUG && !defined ASSERT_ALWAYS_AVAILABLE // { #undef ASSERT #define ASSERT(cond) ((void) 0) #undef ASSERT_BUG_DEBUGONLY #define ASSERT_BUG_DEBUGONLY(bug, cond) ((void) 0) #undef ASSERT_LENGTH #define ASSERT_LENGTH(real, expected) ((void) 0) /* * Expand NOT_REACHED() as appropriate for each situation. * * Mainly, we want the compiler to infer the same control-flow * information as it would from Panic(). Otherwise, different * compilation options will lead to different control-flow-derived * errors, causing some make targets to fail while others succeed. * * VC++ has the __assume() built-in function which we don't trust * (see bug 43485); gcc has no such construct; we just panic in * userlevel code. The monitor doesn't want to pay the size penalty * (measured at 212 bytes for the release vmm for a minimal infinite * loop; panic would cost even more) so it does without and lives * with the inconsistency. */ #ifdef VMM #undef NOT_REACHED #define NOT_REACHED() ((void) 0) #else // keep debug definition #endif #undef ASSERT_LOG_UNEXPECTED #define ASSERT_LOG_UNEXPECTED(bug, cond) ((void) 0) #undef LOG_UNEXPECTED #define LOG_UNEXPECTED(bug) ((void) 0) #undef ASSERT_NOT_TESTED #define ASSERT_NOT_TESTED(cond) ((void) 0) #undef NOT_TESTED #define NOT_TESTED() ((void) 0) #undef NOT_TESTED_ONCE #define NOT_TESTED_ONCE() ((void) 0) #undef NOT_TESTED_1024 #define NOT_TESTED_1024() ((void) 0) #endif // !VMX86_DEBUG } /* * Compile-time assertions. * * ASSERT_ON_COMPILE does not use the common * switch (0) { case 0: case (e): ; } trick because some compilers (e.g. MSVC) * generate code for it. * * The implementation uses both enum and typedef because the typedef alone is * insufficient; gcc allows arrays to be declared with non-constant expressions * (even in typedefs, where it makes no sense). */ #define ASSERT_ON_COMPILE(e) \ do { \ enum { AssertOnCompileMisused = ((e) ? 1 : -1) }; \ typedef char AssertOnCompileFailed[AssertOnCompileMisused]; \ } while (0) /* * To put an ASSERT_ON_COMPILE() outside a function, wrap it * in MY_ASSERTS(). The first parameter must be unique in * each .c file where it appears. For example, * * MY_ASSERTS(FS3_INT, * ASSERT_ON_COMPILE(sizeof(FS3_DiskLock) == 128); * ASSERT_ON_COMPILE(sizeof(FS3_DiskLockReserved) == DISK_BLOCK_SIZE); * ASSERT_ON_COMPILE(sizeof(FS3_DiskBlock) == DISK_BLOCK_SIZE); * ASSERT_ON_COMPILE(sizeof(Hardware_DMIUUID) == 16); * ) * * Caution: ASSERT() within MY_ASSERTS() is silently ignored. * The same goes for anything else not evaluated at compile time. */ #define MY_ASSERTS(name, assertions) \ static INLINE void name(void) { \ assertions \ } /* * Internal macros, functions, and strings * * The monitor wants to save space at call sites, so it has specialized * functions for each situation. User level wants to save on implementation * so it uses generic functions. */ #if !defined VMM || defined MONITOR_APP // { #define _ASSERT_PANIC(name) \ Panic(_##name##Fmt "\n", __FILE__, __LINE__) #define _ASSERT_PANIC_BUG(bug, name) \ Panic(_##name##Fmt " bugNr=%d\n", __FILE__, __LINE__, bug) #define AssertLengthFmt _AssertLengthFmt #define AssertUnexpectedFmt _AssertUnexpectedFmt #define AssertNotTestedFmt _AssertNotTestedFmt #endif // } // these don't have newline so a bug can be tacked on #define _AssertPanicFmt "PANIC %s:%d" #define _AssertAssertFmt "ASSERT %s:%d" #define _AssertNotImplementedFmt "NOT_IMPLEMENTED %s:%d" #define _AssertNotReachedFmt "NOT_REACHED %s:%d" #define _AssertMemAllocFmt "MEM_ALLOC %s:%d" // these are complete formats with newline #define _AssertLengthFmt "LENGTH %s:%d r=%#x e=%#x\n" #define _AssertUnexpectedFmt "UNEXPECTED %s:%d bugNr=%d\n" #define _AssertNotTestedFmt "NOT_TESTED %s:%d\n" #endif /* ifndef _VM_ASSERT_H_ */ vmci-only/vmci_defs.h0000444000000000000000000002224712025726724013615 0ustar rootroot/********************************************************* * Copyright (C) 2005-2008 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef _VMCI_DEF_H_ #define _VMCI_DEF_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMEXT #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #include "vm_basic_types.h" /* Register offsets. */ #define VMCI_STATUS_ADDR 0x00 #define VMCI_CONTROL_ADDR 0x04 #define VMCI_ICR_ADDR 0x08 #define VMCI_IMR_ADDR 0x0c #define VMCI_DATA_OUT_ADDR 0x10 #define VMCI_DATA_IN_ADDR 0x14 #define VMCI_CAPS_ADDR 0x18 #define VMCI_RESULT_LOW_ADDR 0x1c #define VMCI_RESULT_HIGH_ADDR 0x20 /* Max number of devices. */ #define VMCI_MAX_DEVICES 1 /* Status register bits. */ #define VMCI_STATUS_INT_ON 0x1 /* Control register bits. */ #define VMCI_CONTROL_RESET 0x1 #define VMCI_CONTROL_INT_ENABLE 0x2 #define VMCI_CONTROL_INT_DISABLE 0x4 /* Capabilities register bits. */ #define VMCI_CAPS_HYPERCALL 0x1 #define VMCI_CAPS_GUESTCALL 0x2 #define VMCI_CAPS_DATAGRAM 0x4 /* Interrupt Cause register bits. */ #define VMCI_ICR_DATAGRAM 0x1 /* Interrupt Mask register bits. */ #define VMCI_IMR_DATAGRAM 0x1 /* * We have a fixed set of resource IDs available in the VMX. * This allows us to have a very simple implementation since we statically * know how many will create datagram handles. If a new caller arrives and * we have run out of slots we can manually increment the maximum size of * available resource IDs. */ typedef uint32 VMCI_Resource; /* VMCI reserved hypervisor datagram resource IDs. */ #define VMCI_RESOURCES_QUERY 0 #define VMCI_GET_CONTEXT_ID 1 #define VMCI_SHAREDMEM_CREATE 2 #define VMCI_SHAREDMEM_ATTACH 3 #define VMCI_SHAREDMEM_DETACH 4 #define VMCI_SHAREDMEM_QUERY 5 #define VMCI_DATAGRAM_REQUEST_MAP 6 #define VMCI_DATAGRAM_REMOVE_MAP 7 #define VMCI_EVENT_SUBSCRIBE 8 #define VMCI_EVENT_UNSUBSCRIBE 9 #define VMCI_QUEUEPAIR_ALLOC 10 #define VMCI_QUEUEPAIR_DETACH 11 #define VMCI_RESOURCE_MAX 12 /* VMCI Ids. */ typedef uint32 VMCIId; typedef struct VMCIHandle { VMCIId context; VMCIId resource; } VMCIHandle; static INLINE VMCIHandle VMCI_MAKE_HANDLE(VMCIId cid, VMCIId rid) { VMCIHandle h = {cid, rid}; return h; } #define VMCI_HANDLE_TO_CONTEXT_ID(_handle) ((_handle).context) #define VMCI_HANDLE_TO_RESOURCE_ID(_handle) ((_handle).resource) #define VMCI_HANDLE_EQUAL(_h1, _h2) ((_h1).context == (_h2).context && \ (_h1).resource == (_h2).resource) #define VMCI_INVALID_ID 0xFFFFFFFF static const VMCIHandle VMCI_INVALID_HANDLE = {VMCI_INVALID_ID, VMCI_INVALID_ID}; #define VMCI_HANDLE_INVALID(_handle) \ VMCI_HANDLE_EQUAL((_handle), VMCI_INVALID_HANDLE) /* * The below defines can be used to send anonymous requests. * This also indicates that no response is expected. */ #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID #define VMCI_ANON_SRC_HANDLE VMCI_MAKE_HANDLE(VMCI_ANON_SRC_CONTEXT_ID, \ VMCI_ANON_SRC_RESOURCE_ID) /* The lowest 16 context ids are reserved for internal use. */ #define VMCI_RESERVED_CID_LIMIT 16 /* * Hypervisor context id, used for calling into hypervisor * supplied services from the VM. */ #define VMCI_HYPERVISOR_CONTEXT_ID 0 /* * Well-known context id, a logical context that contains * a set of well-known services. */ #define VMCI_WELL_KNOWN_CONTEXT_ID 1 /* Todo: Change host context id to dynamic/random id. */ #define VMCI_HOST_CONTEXT_ID 2 /* * The VMCI_CONTEXT_RESOURCE_ID is used together with VMCI_MAKE_HANDLE to make * handles that refer to a specific context. */ #define VMCI_CONTEXT_RESOURCE_ID 0 /* VMCI error codes. */ #define VMCI_SUCCESS_QUEUEPAIR_ATTACH 5 #define VMCI_SUCCESS_QUEUEPAIR_CREATE 4 #define VMCI_SUCCESS_LAST_DETACH 3 #define VMCI_SUCCESS_ACCESS_GRANTED 2 #define VMCI_SUCCESS_ENTRY_DEAD 1 #define VMCI_SUCCESS 0 #define VMCI_ERROR_INVALID_RESOURCE -1 #define VMCI_ERROR_INVALID_ARGS -2 #define VMCI_ERROR_NO_MEM -3 #define VMCI_ERROR_DATAGRAM_FAILED -4 #define VMCI_ERROR_MORE_DATA -5 #define VMCI_ERROR_NO_MORE_DATAGRAMS -6 #define VMCI_ERROR_NO_ACCESS -7 #define VMCI_ERROR_NO_HANDLE -8 #define VMCI_ERROR_DUPLICATE_ENTRY -9 #define VMCI_ERROR_DST_UNREACHABLE -10 #define VMCI_ERROR_PAYLOAD_TOO_LARGE -11 #define VMCI_ERROR_INVALID_PRIV -12 #define VMCI_ERROR_GENERIC -13 #define VMCI_ERROR_PAGE_ALREADY_SHARED -14 #define VMCI_ERROR_CANNOT_SHARE_PAGE -15 #define VMCI_ERROR_CANNOT_UNSHARE_PAGE -16 #define VMCI_ERROR_NO_PROCESS -17 #define VMCI_ERROR_NO_DATAGRAM -18 #define VMCI_ERROR_NO_RESOURCES -19 #define VMCI_ERROR_UNAVAILABLE -20 #define VMCI_ERROR_NOT_FOUND -21 #define VMCI_ERROR_ALREADY_EXISTS -22 #define VMCI_ERROR_NOT_PAGE_ALIGNED -23 #define VMCI_ERROR_INVALID_SIZE -24 #define VMCI_ERROR_REGION_ALREADY_SHARED -25 #define VMCI_ERROR_TIMEOUT -26 #define VMCI_ERROR_DATAGRAM_INCOMPLETE -27 #define VMCI_ERROR_INCORRECT_IRQL -28 #define VMCI_ERROR_EVENT_UNKNOWN -29 #define VMCI_ERROR_OBSOLETE -30 #define VMCI_ERROR_QUEUEPAIR_MISMATCH -31 #define VMCI_ERROR_QUEUEPAIR_NOTSET -32 #define VMCI_ERROR_QUEUEPAIR_NOTOWNER -33 #define VMCI_ERROR_QUEUEPAIR_NOTATTACHED -34 #define VMCI_ERROR_QUEUEPAIR_NOSPACE -35 #define VMCI_ERROR_QUEUEPAIR_NODATA -36 #define VMCI_ERROR_BUSMEM_INVALIDATION -37 /* Internal error codes. */ #define VMCI_SHAREDMEM_ERROR_BAD_CONTEXT -1000 #define VMCI_PATH_MAX 256 /* VMCI reserved events. */ typedef uint32 VMCI_Event; #define VMCI_EVENT_CTX_ID_UPDATE 0 #define VMCI_EVENT_CTX_REMOVED 1 #define VMCI_EVENT_QP_RESUMED 2 #define VMCI_EVENT_QP_PEER_ATTACH 3 #define VMCI_EVENT_QP_PEER_DETACH 4 #define VMCI_EVENT_MAX 5 /* Reserved guest datagram resource ids. */ #define VMCI_EVENT_HANDLER 0 /* VMCI privileges. */ typedef enum VMCIResourcePrivilegeType { VMCI_PRIV_CH_PRIV, VMCI_PRIV_DESTROY_RESOURCE, VMCI_PRIV_ASSIGN_CLIENT, VMCI_PRIV_DG_CREATE, VMCI_PRIV_DG_SEND, VMCI_PRIV_SM_CREATE, VMCI_PRIV_SM_ATTACH, VMCI_NUM_PRIVILEGES, } VMCIResourcePrivilegeType; /* * VMCI coarse-grained privileges (per context or host * process/endpoint. An entity with the restricted flag is only * allowed to interact with the hypervisor and trusted entities. */ typedef uint32 VMCIPrivilegeFlags; #define VMCI_PRIVILEGE_FLAG_RESTRICTED 0x01 #define VMCI_PRIVILEGE_FLAG_TRUSTED 0x02 #define VMCI_PRIVILEGE_ALL_FLAGS (VMCI_PRIVILEGE_FLAG_RESTRICTED | \ VMCI_PRIVILEGE_FLAG_TRUSTED) #define VMCI_NO_PRIVILEGE_FLAGS 0x00 #define VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS VMCI_NO_PRIVILEGE_FLAGS #define VMCI_LEAST_PRIVILEGE_FLAGS VMCI_PRIVILEGE_FLAG_RESTRICTED #define VMCI_MAX_PRIVILEGE_FLAGS VMCI_PRIVILEGE_FLAG_TRUSTED /* VMCI Discovery Service. */ /* Well-known handle to the discovery service. */ #define VMCI_DS_RESOURCE_ID 1 /* Reserved resource ID for discovery service. */ #define VMCI_DS_HANDLE VMCI_MAKE_HANDLE(VMCI_WELL_KNOWN_CONTEXT_ID, \ VMCI_DS_RESOURCE_ID) #define VMCI_DS_CONTEXT VMCI_MAKE_HANDLE(VMCI_WELL_KNOWN_CONTEXT_ID, \ VMCI_CONTEXT_RESOURCE_ID) /* Maximum length of a DS message. */ #define VMCI_DS_MAX_MSG_SIZE 300 /* Command actions. */ #define VMCI_DS_ACTION_LOOKUP 0 #define VMCI_DS_ACTION_REGISTER 1 #define VMCI_DS_ACTION_UNREGISTER 2 /* Defines wire-protocol format for a request send to the DS from a context. */ typedef struct VMCIDsRequestHeader { int32 action; int32 msgid; VMCIHandle handle; int32 nameLen; int8 name[1]; } VMCIDsRequestHeader; /* Defines the wire-protocol format for a request send from the DS to a context. */ typedef struct VMCIDsReplyHeader { int32 msgid; int32 code; VMCIHandle handle; int32 msgLen; int8 msg[1]; } VMCIDsReplyHeader; #define VMCI_PUBLIC_GROUP_NAME "vmci public group" /* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */ #define VMCI_RESERVED_RESOURCE_ID_MAX 1023 #define VMCI_DOMAIN_NAME_MAXLEN 32 #define VMCI_LGPFX "VMCI: " #endif vmci-only/vmci_infrastructure.h0000444000000000000000000000530712025726724015752 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmci_infrastructure.h -- * * This file implements the VMCI infrastructure. */ #ifndef _VMCI_INFRASTRUCTURE_H_ #define _VMCI_INFRASTRUCTURE_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMEXT #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #include "vmware.h" #include "vmci_defs.h" typedef enum { VMCIOBJ_VMX_VM = 10, VMCIOBJ_CONTEXT, VMCIOBJ_PROCESS, VMCIOBJ_DATAGRAM_PROCESS, VMCIOBJ_NOT_SET, } VMCIObjType; /* Guestcalls currently support a maximum of 8 uint64 arguments. */ #define VMCI_GUESTCALL_MAX_ARGS_SIZE 64 /* Used to determine what checkpoint state to get and set. */ #define VMCI_NOTIFICATION_CPT_STATE 0x1 #define VMCI_WELLKNOWN_CPT_STATE 0x2 #define VMCI_QP_CPT_STATE 0x3 #define VMCI_QP_INFO_CPT_STATE 0x4 /* Used to control the VMCI device in the vmkernel */ #define VMCI_DEV_RESET 0x01 #define VMCI_DEV_QP_RESET 0x02 #define VMCI_DEV_QUIESCE 0x03 #define VMCI_DEV_UNQUIESCE 0x04 #define VMCI_DEV_QP_BREAK_SHARING 0x05 /* *------------------------------------------------------------------------- * * VMCI_Hash -- * * Hash function used by the Simple Datagram API. Based on the djb2 * hash function by Dan Bernstein. * * Result: * Returns guest call size. * * Side effects: * None. * *------------------------------------------------------------------------- */ static INLINE int VMCI_Hash(VMCIHandle handle, // IN unsigned size) // IN { int i; int hash = 5381; uint64 handleValue = (uint64)handle.resource << 32 | handle.context; for (i = 0; i < sizeof handle; i++) { hash = ((hash << 5) + hash) + (uint8)(handleValue >> (i*8)); } return hash & (size -1); } #endif // _VMCI_INFRASTRUCTURE_H_ vmci-only/vmci_kernel_if.h0000444000000000000000000002241212025726724014624 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmci_kernel_if.h -- * * This file defines helper functions for VMCI host _and_ guest * kernel code. It must work for windows, macosx, vmkernel and * linux kernel, ie. using defines where necessary. */ #ifndef _VMCI_KERNEL_IF_H_ #define _VMCI_KERNEL_IF_H_ #if !defined(linux) && !defined(_WIN32) && !defined(__APPLE__) && \ !defined(VMKERNEL) && !defined(SOLARIS) #error "Platform not supported." #endif #if defined(_WIN32) #include #endif #if defined(linux) && !defined(VMKERNEL) # include "compat_version.h" # include "compat_wait.h" # include "compat_spinlock.h" # include "compat_semaphore.h" #endif // linux #ifdef __APPLE__ # include #include #include #endif #ifdef VMKERNEL #include "splock.h" #include "semaphore_ext.h" #endif #ifdef SOLARIS # include # include # include #endif #include "vm_basic_types.h" #include "vmci_defs.h" /* Flags for specifying memory type. */ #define VMCI_MEMORY_NORMAL 0x0 #define VMCI_MEMORY_ATOMIC 0x1 #define VMCI_MEMORY_NONPAGED 0x2 /* Platform specific type definitions. */ #if defined(VMKERNEL) typedef SP_SpinLock VMCILock; typedef SP_IRQL VMCILockFlags; typedef Semaphore VMCIEvent; typedef Semaphore VMCIMutex; #elif defined(linux) typedef spinlock_t VMCILock; typedef unsigned long VMCILockFlags; typedef wait_queue_head_t VMCIEvent; typedef struct semaphore VMCIMutex; typedef PPN *VMCIPpnList; /* List of PPNs in produce/consume queue. */ #elif defined(__APPLE__) typedef IOLock *VMCILock; typedef unsigned long VMCILockFlags; typedef semaphore_t VMCIEvent; typedef IOLock *VMCIMutex; #elif defined(_WIN32) typedef KSPIN_LOCK VMCILock; typedef KIRQL VMCILockFlags; typedef KEVENT VMCIEvent; typedef FAST_MUTEX VMCIMutex; typedef PMDL VMCIPpnList; /* MDL to map the produce/consume queue. */ #elif defined(SOLARIS) typedef kmutex_t VMCILock; typedef unsigned long VMCILockFlags; typedef ksema_t VMCIEvent; #endif // VMKERNEL /* Callback needed for correctly waiting on events. */ typedef int (*VMCIEventReleaseCB)(void *clientData); /* * The VMCI locks use a ranking scheme similar to the one used by * vmkernel. While holding a lock L1 with rank R1, only locks with * rank higher than R1 may be grabbed. The available ranks for VMCI * locks are (in descending order): * - VMCI_LOCK_RANK_HIGH_BH : to be used for locks grabbed while executing * in a bottom half and not held while grabbing other locks. * - VMCI_LOCK_RANK_MIDDLE_BH : to be for locks grabbed while executing in a * bottom half and held while grabbing locks of rank VMCI_LOCK_RANK_HIGH_BH. * - VMCI_LOCK_RANK_LOW_BH : to be for locks grabbed while executing in a * bottom half and held while grabbing locks of rank * VMCI_LOCK_RANK_MIDDLE_BH. * - VMCI_LOCK_RANK_HIGHEST : to be used for locks that are not held while * grabbing other locks except system locks with higher ranks and bottom * half locks. * - VMCI_LOCK_RANK_HIGHER : to be used for locks that are held while * grabbing locks of rank VMCI_LOCK_RANK_HIGHEST or higher. * - VMCI_LOCK_RANK_HIGH : to be used for locks that are held while * grabbing locks of rank VMCI_LOCK_RANK_HIGHER or higher. This is * the highest lock rank used by core VMCI services * - VMCI_LOCK_RANK_MIDDLE : to be used for locks that are held while * grabbing locks of rank VMCI_LOCK_RANK_HIGH or higher. * - VMCI_LOCK_RANK_LOW : to be used for locks that are held while * grabbing locks of rank VMCI_LOCK_RANK_MIDDLE or higher. * - VMCI_LOCK_RANK_LOWEST : to be used for locks that are held while * grabbing locks of rank VMCI_LOCK_RANK_LOW or higher. */ #ifdef VMKERNEL typedef SP_Rank VMCILockRank; #define VMCI_LOCK_RANK_HIGH_BH SP_RANK_IRQ_LEAF #define VMCI_LOCK_RANK_MIDDLE_BH (SP_RANK_IRQ_LEAF-1) #define VMCI_LOCK_RANK_LOW_BH SP_RANK_IRQ_LOWEST #define VMCI_LOCK_RANK_HIGHEST SP_RANK_SHM_MGR-1 #else typedef unsigned long VMCILockRank; #define VMCI_LOCK_RANK_HIGH_BH 0x4000 #define VMCI_LOCK_RANK_MIDDLE_BH 0x2000 #define VMCI_LOCK_RANK_LOW_BH 0x1000 #define VMCI_LOCK_RANK_HIGHEST 0x0fff #endif // VMKERNEL #define VMCI_LOCK_RANK_HIGHER (VMCI_LOCK_RANK_HIGHEST-1) #define VMCI_LOCK_RANK_HIGH (VMCI_LOCK_RANK_HIGHER-1) #define VMCI_LOCK_RANK_MIDDLE_HIGH (VMCI_LOCK_RANK_HIGH-1) #define VMCI_LOCK_RANK_MIDDLE (VMCI_LOCK_RANK_MIDDLE_HIGH-1) #define VMCI_LOCK_RANK_MIDDLE_LOW (VMCI_LOCK_RANK_MIDDLE-1) #define VMCI_LOCK_RANK_LOW (VMCI_LOCK_RANK_MIDDLE_LOW-1) #define VMCI_LOCK_RANK_LOWEST (VMCI_LOCK_RANK_LOW-1) /* * In vmkernel, we try to reduce the amount of memory mapped into the * virtual address space by only mapping the memory of buffered * datagrams when copying from and to the guest. In other OSes, * regular kernel memory is used. VMCIBuffer is used to reference * possibly unmapped memory. */ #ifdef VMKERNEL typedef MPN VMCIBuffer; #define VMCI_BUFFER_INVALID INVALID_MPN #else typedef void * VMCIBuffer; #define VMCI_BUFFER_INVALID NULL #endif /* * Host specific struct used for signalling. */ typedef struct VMCIHost { #if defined(VMKERNEL) World_ID vmmWorldID; #elif defined(linux) wait_queue_head_t waitQueue; #elif defined(__APPLE__) struct Socket *socket; /* vmci Socket object on Mac OS. */ #elif defined(_WIN32) KEVENT *callEvent; /* Ptr to userlevel event used when signalling * new pending guestcalls in kernel. */ #elif defined(SOLARIS) struct pollhead pollhead; /* Per datagram handle pollhead structure to * be treated as a black-box. None of its * fields should be referenced. */ #endif } VMCIHost; void VMCI_InitLock(VMCILock *lock, char *name, VMCILockRank rank); void VMCI_CleanupLock(VMCILock *lock); void VMCI_GrabLock(VMCILock *lock, VMCILockFlags *flags); void VMCI_ReleaseLock(VMCILock *lock, VMCILockFlags flags); void VMCI_GrabLock_BH(VMCILock *lock, VMCILockFlags *flags); void VMCI_ReleaseLock_BH(VMCILock *lock, VMCILockFlags flags); void VMCIHost_InitContext(VMCIHost *hostContext, uintptr_t eventHnd); void VMCIHost_ReleaseContext(VMCIHost *hostContext); void VMCIHost_SignalCall(VMCIHost *hostContext); void VMCIHost_ClearCall(VMCIHost *hostContext); Bool VMCIHost_WaitForCallLocked(VMCIHost *hostContext, VMCILock *lock, VMCILockFlags *flags, Bool useBH); void *VMCI_AllocKernelMem(size_t size, int flags); void VMCI_FreeKernelMem(void *ptr, size_t size); VMCIBuffer VMCI_AllocBuffer(size_t size, int flags); void *VMCI_MapBuffer(VMCIBuffer buf); void VMCI_ReleaseBuffer(void *ptr); void VMCI_FreeBuffer(VMCIBuffer buf, size_t size); #ifdef SOLARIS int VMCI_CopyToUser(void *dst, const void *src, unsigned int len, int mode); #else int VMCI_CopyToUser(void *dst, const void *src, unsigned int len); /* * Don't need the following for guests, hence no Solaris code for this * function. */ Bool VMCIWellKnownID_AllowMap(VMCIId wellKnownID, VMCIPrivilegeFlags privFlags); #endif void VMCI_CreateEvent(VMCIEvent *event); void VMCI_DestroyEvent(VMCIEvent *event); void VMCI_SignalEvent(VMCIEvent *event); void VMCI_WaitOnEvent(VMCIEvent *event, VMCIEventReleaseCB releaseCB, void *clientData); /* XXX TODO for VMKERNEL (host) and Solaris (guest). */ #if !defined(VMKERNEL) && (defined(__linux__) || defined(_WIN32) || \ defined(__APPLE__)) int VMCI_CopyFromUser(void *dst, const void *src, size_t len); #endif #if !defined(SOLARIS) int VMCIMutex_Init(VMCIMutex *mutex); void VMCIMutex_Destroy(VMCIMutex *mutex); void VMCIMutex_Acquire(VMCIMutex *mutex); void VMCIMutex_Release(VMCIMutex *mutex); #endif /* XXX TODO for Solaris (guest). */ #if !defined(VMKERNEL) && (defined(__linux__) || defined(_WIN32)) VA VMCI_AllocQueueKVA(uint64 size); void VMCI_FreeQueueKVA(VA va, uint64 size); typedef struct PPNSet { uint64 numProducePages; uint64 numConsumePages; VMCIPpnList producePPNs; VMCIPpnList consumePPNs; Bool initialized; } PPNSet; int VMCI_AllocPPNSet(VA produceVA, uint64 numProducePages, VA consumeVA, uint64 numConsumePages, PPNSet *ppnSet); void VMCI_FreePPNSet(PPNSet *ppnSet); int VMCI_PopulatePPNList(uint8 *callBuf, const PPNSet *ppnSet); #endif #endif // _VMCI_KERNEL_IF_H_ vmci-only/vmci_call_defs.h0000444000000000000000000001752112025726724014607 0ustar rootroot/********************************************************* * Copyright (C) 2006-2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef _VMCI_CALL_DEFS_H_ #define _VMCI_CALL_DEFS_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMEXT #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMKMOD #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #include "vm_basic_types.h" #include "vmci_defs.h" /* * All structs here are an integral size of their largest member, ie. a struct * with at least one 8-byte member will have a size that is an integral of 8. * A struct which has a largest member of size 4 will have a size that is an * integral of 4. This is because Windows CL enforces this rule. 32 bit gcc * doesn't e.g. 32 bit gcc can misalign an 8 byte member if it is preceeded by * a 4 byte member. */ /* * Base struct for vmci datagrams. */ typedef struct VMCIDatagram { VMCIHandle dst; VMCIHandle src; uint64 payloadSize; } VMCIDatagram; typedef int (*VMCIDatagramRecvCB)(void *clientData, // IN: client data for handler VMCIDatagram *msg); // IN: /* Flag for creating a wellknown handle instead of a per context handle. */ #define VMCI_FLAG_WELLKNOWN_DG_HND 0x1 /* * Maximum supported size of a VMCI datagram for routable datagrams. * Datagrams going to the hypervisor are allowed to be larger. */ #define VMCI_MAX_DG_SIZE (17 * 4096) #define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - sizeof(VMCIDatagram)) #define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + sizeof(VMCIDatagram)) #define VMCI_DG_HEADERSIZE sizeof(VMCIDatagram) #define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payloadSize) #define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (size_t)CONST64U(0xfffffffffffffff8)) #define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2) /* * Struct for sending VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP * datagrams. Struct size is 32 bytes. All fields in struct are aligned to * their natural alignment. */ typedef struct VMCIDatagramWellKnownMapMsg { VMCIDatagram hdr; VMCIId wellKnownID; uint32 _pad; } VMCIDatagramWellKnownMapMsg; /* * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of * hypervisor resources. * Struct size is 16 bytes. All fields in struct are aligned to their natural * alignment. */ typedef struct VMCIResourcesQueuryHdr { VMCIDatagram hdr; uint32 numResources; uint32 _padding; } VMCIResourcesQueryHdr; /* * Convenience struct for negotiating vectors. Must match layout of * VMCIResourceQueryHdr minus the VMCIDatagram header. */ typedef struct VMCIResourcesQueryMsg { uint32 numResources; uint32 _padding; VMCI_Resource resources[1]; } VMCIResourcesQueryMsg; /* * The maximum number of resources that can be queried using * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31 * bits of a positive return value. Negative values are reserved for * errors. */ #define VMCI_RESOURCE_QUERY_MAX_NUM 31 /* Maximum size for the VMCI_RESOURCE_QUERY request. */ #define VMCI_RESOURCE_QUERY_MAX_SIZE sizeof(VMCIResourcesQueryHdr) \ + VMCI_RESOURCE_QUERY_MAX_NUM * sizeof(VMCI_Resource) /* * Struct used for making VMCI_SHAREDMEM_CREATE message. Struct size is 24 bytes. * All fields in struct are aligned to their natural alignment. */ typedef struct VMCISharedMemCreateMsg { VMCIDatagram hdr; VMCIHandle handle; uint32 memSize; uint32 _padding; /* PPNs placed after struct. */ } VMCISharedMemCreateMsg; /* * Struct used for sending VMCI_SHAREDMEM_ATTACH messages. Same as struct used * for create messages. */ typedef VMCISharedMemCreateMsg VMCISharedMemAttachMsg; /* * Struct used for sending VMCI_SHAREDMEM_DETACH messsages. Struct size is 16 * bytes. All fields in struct are aligned to their natural alignment. */ typedef struct VMCISharedMemDetachMsg { VMCIDatagram hdr; VMCIHandle handle; } VMCISharedMemDetachMsg; /* * Struct used for sending VMCI_SHAREDMEM_QUERY messages. Same as struct used * for detach messages. */ typedef VMCISharedMemDetachMsg VMCISharedMemQueryMsg; /* * This struct is used to contain data for events. Size of this struct is a * multiple of 8 bytes, and all fields are aligned to their natural alignment. */ typedef struct VMCI_EventData { VMCI_Event event; /* 4 bytes. */ uint32 _pad; /* * Event payload is put here. */ } VMCI_EventData; /* * We use the following inline function to access the payload data associated * with an event data. */ static INLINE void * VMCIEventDataPayload(VMCI_EventData *evData) // IN: { return (void *)((char *)evData + sizeof *evData); } /* * Define the different VMCI_EVENT payload data types here. All structs must * be a multiple of 8 bytes, and fields must be aligned to their natural * alignment. */ typedef struct VMCIEventPayload_Context { VMCIId contextID; /* 4 bytes. */ uint32 _pad; } VMCIEventPayload_Context; typedef struct VMCIEventPayload_QP { VMCIHandle handle; /* QueuePair handle. */ VMCIId peerId; /* Context id of attaching/detaching VM. */ uint32 _pad; } VMCIEventPayload_QP; /* * We define the following struct to get the size of the maximum event data * the hypervisor may send to the guest. If adding a new event payload type * above, add it to the following struct too (inside the union). */ typedef struct VMCIEventData_Max { VMCI_EventData eventData; union { VMCIEventPayload_Context contextPayload; VMCIEventPayload_QP qpPayload; } evDataPayload; } VMCIEventData_Max; /* * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and VMCI_EVENT_HANDLER * messages. Struct size is 32 bytes. All fields in struct are aligned to * their natural alignment. */ typedef struct VMCIEventMsg { VMCIDatagram hdr; VMCI_EventData eventData; /* Has event type and payload. */ /* * Payload gets put here. */ } VMCIEventMsg; /* * We use the following inline function to access the payload data associated * with an event message. */ static INLINE void * VMCIEventMsgPayload(VMCIEventMsg *eMsg) // IN: { return VMCIEventDataPayload(&eMsg->eventData); } /* Flags for VMCI QueuePair API. */ #define VMCI_QPFLAG_ATTACH_ONLY 0x1 /* Fail alloc if QP not created by peer. */ #define VMCI_QPFLAG_LOCAL 0x2 /* Only allow attaches from local context. */ /* Update the following (bitwise OR flags) while adding new flags. */ #define VMCI_QP_ALL_FLAGS (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL) /* * Structs used for QueuePair alloc and detach messages. We align fields of * these structs to 64bit boundaries. */ typedef struct VMCIQueuePairAllocMsg { VMCIDatagram hdr; VMCIHandle handle; VMCIId peer; /* 32bit field. */ uint32 flags; uint64 produceSize; uint64 consumeSize; uint64 numPPNs; /* List of PPNs placed here. */ } VMCIQueuePairAllocMsg; typedef struct VMCIQueuePairDetachMsg { VMCIDatagram hdr; VMCIHandle handle; } VMCIQueuePairDetachMsg; #endif vmci-only/vmci_handle_array.h0000444000000000000000000002030012025726724015311 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmci_handle_array.h -- * * Simple dynamic array. */ #ifndef _VMCI_HANDLE_ARRAY_H_ #define _VMCI_HANDLE_ARRAY_H_ #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" #include "vmci_kernel_if.h" #include "vmware.h" #include "vmci_defs.h" #include "vm_assert.h" #ifdef VMKERNEL #include "vm_libc.h" #endif // VMKERNEL #ifdef SOLARIS #include #include #include #include #endif #define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4 typedef struct VMCIHandleArray { uint32 capacity; uint32 size; VMCIHandle entries[1]; } VMCIHandleArray; /* *----------------------------------------------------------------------------------- * * VMCIHandleArray_Create -- * * Results: * Array if successful, NULL if not. * * Side effects: * None. * *----------------------------------------------------------------------------------- */ static INLINE VMCIHandleArray * VMCIHandleArray_Create(uint32 capacity) { VMCIHandleArray *array; if (capacity == 0) { capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE; } array = (VMCIHandleArray *)VMCI_AllocKernelMem(sizeof array->capacity + sizeof array->size + capacity * sizeof(VMCIHandle), VMCI_MEMORY_NONPAGED); if (array == NULL) { return NULL; } array->capacity = capacity; array->size = 0; return array; } /* *----------------------------------------------------------------------------------- * * VMCIHandleArray_Destroy -- * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------------- */ static INLINE void VMCIHandleArray_Destroy(VMCIHandleArray *array) { VMCI_FreeKernelMem(array, sizeof array->capacity + sizeof array->size + array->capacity * sizeof(VMCIHandle)); } /* *----------------------------------------------------------------------------------- * * VMCIHandleArray_AppendEntry -- * * Results: * None. * * Side effects: * Array may be reallocated. * *----------------------------------------------------------------------------------- */ static INLINE void VMCIHandleArray_AppendEntry(VMCIHandleArray **arrayPtr, VMCIHandle handle) { VMCIHandleArray *array; ASSERT(arrayPtr && *arrayPtr); array = *arrayPtr; if (UNLIKELY(array->size >= array->capacity)) { /* reallocate. */ uint32 arraySize = sizeof array->capacity + sizeof array->size + array->capacity * sizeof(VMCIHandle); VMCIHandleArray *newArray = VMCI_AllocKernelMem(arraySize + array->capacity * sizeof(VMCIHandle), VMCI_MEMORY_NONPAGED); if (newArray == NULL) { return; } memcpy(newArray, array, arraySize); newArray->capacity *= 2; VMCI_FreeKernelMem(array, arraySize); *arrayPtr = newArray; array = newArray; } array->entries[array->size] = handle; array->size++; } /* *----------------------------------------------------------------------------------- * * VMCIHandleArray_RemoveEntry -- * * Results: * Handle that was removed, VMCI_INVALID_HANDLE if entry not found. * * Side effects: * None. * *----------------------------------------------------------------------------------- */ static INLINE VMCIHandle VMCIHandleArray_RemoveEntry(VMCIHandleArray *array, VMCIHandle entryHandle) { int i; VMCIHandle handle = VMCI_INVALID_HANDLE; ASSERT(array); for (i = 0; i < array->size; i++) { if (VMCI_HANDLE_EQUAL(array->entries[i], entryHandle)) { handle = array->entries[i]; array->entries[i] = array->entries[array->size-1]; array->entries[array->size-1] = VMCI_INVALID_HANDLE; array->size--; break; } } return handle; } /* *----------------------------------------------------------------------------------- * * VMCIHandleArray_RemoveTail -- * * Results: * Handle that was removed, VMCI_INVALID_HANDLE if array was empty. * * Side effects: * None. * *----------------------------------------------------------------------------------- */ static INLINE VMCIHandle VMCIHandleArray_RemoveTail(VMCIHandleArray *array) { VMCIHandle handle; if (array->size == 0) { return VMCI_INVALID_HANDLE; } handle = array->entries[array->size-1]; array->entries[array->size-1] = VMCI_INVALID_HANDLE; array->size--; return handle; } /* *----------------------------------------------------------------------------------- * * VMCIHandleArray_GetEntry -- * * Results: * Handle at given index, VMCI_INVALID_HANDLE if invalid index. * * Side effects: * None. * *----------------------------------------------------------------------------------- */ static INLINE VMCIHandle VMCIHandleArray_GetEntry(const VMCIHandleArray *array, uint32 index) { ASSERT(array); if (UNLIKELY(index >= array->size)) { return VMCI_INVALID_HANDLE; } return array->entries[index]; } /* *----------------------------------------------------------------------------------- * * VMCIHandleArray_GetSize -- * * Results: * Number of entries in array. * * Side effects: * None. * *----------------------------------------------------------------------------------- */ static INLINE uint32 VMCIHandleArray_GetSize(const VMCIHandleArray *array) { ASSERT(array); return array->size; } /* *----------------------------------------------------------------------------------- * * VMCIHandleArray_HasEntry -- * * Results: * TRUE is entry exists in array, FALSE if not. * * Side effects: * None. * *----------------------------------------------------------------------------------- */ static INLINE Bool VMCIHandleArray_HasEntry(const VMCIHandleArray *array, VMCIHandle entryHandle) { int i; ASSERT(array); for (i = 0; i < array->size; i++) { if (VMCI_HANDLE_EQUAL(array->entries[i], entryHandle)) { return TRUE; } } return FALSE; } /* *----------------------------------------------------------------------------------- * * VMCIHandleArray_GetCopy -- * * Results: * Returns pointer to copy of array on success or NULL, if memory allocation * fails. * * Side effects: * Allocates nonpaged memory. * *----------------------------------------------------------------------------------- */ static INLINE VMCIHandleArray * VMCIHandleArray_GetCopy(const VMCIHandleArray *array) { VMCIHandleArray *arrayCopy; ASSERT(array); arrayCopy = (VMCIHandleArray *)VMCI_AllocKernelMem(sizeof array->capacity + sizeof array->size + array->size * sizeof(VMCIHandle), VMCI_MEMORY_NONPAGED); if (arrayCopy != NULL) { memcpy(&arrayCopy->size, &array->size, sizeof array->size + array->size * sizeof(VMCIHandle)); arrayCopy->capacity = array->size; } return arrayCopy; } #endif // _VMCI_HANDLE_ARRAY_H_ vmci-only/vmci_iocontrols.h0000444000000000000000000003326012025726724015064 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmci_iocontrols.h * * The VMCI driver io controls. */ #ifndef _VMCI_IOCONTROLS_H_ #define _VMCI_IOCONTROLS_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" #include "vmci_defs.h" /* * Driver version. * * Increment major version when you make an incompatible change. * Compatibility goes both ways (old driver with new executable * as well as new driver with old executable). */ #define VMCI_VERSION_SHIFT_WIDTH 16 /* Never change this. */ #define VMCI_MAJOR_VERSION_VALUE 8 /* Bump major version number here. */ #define VMCI_MINOR_VERSION_VALUE 0 /* Bump minor version number here. */ /* Don't modify the next three macros. */ #define VMCI_VERSION (VMCI_MAJOR_VERSION_VALUE << \ VMCI_VERSION_SHIFT_WIDTH | \ VMCI_MINOR_VERSION_VALUE) #define VMCI_VERSION_MAJOR(v) ((uint32) (v) >> VMCI_VERSION_SHIFT_WIDTH) #define VMCI_VERSION_MINOR(v) ((uint16) (v)) #if defined(__linux__) || defined(__APPLE__) || defined(SOLARIS) || defined(VMKERNEL) /* * Linux defines _IO* macros, but the core kernel code ignore the encoded * ioctl value. It is up to individual drivers to decode the value (for * example to look at the size of a structure to determine which version * of a specific command should be used) or not (which is what we * currently do, so right now the ioctl value for a given command is the * command itself). * * Hence, we just define the IOCTL_VMCI_foo values directly, with no * intermediate IOCTLCMD_ representation. */ # define IOCTLCMD(_cmd) IOCTL_VMCI_ ## _cmd #else // if defined(__linux__) /* * On platforms other than Linux, IOCTLCMD_foo values are just numbers, and * we build the IOCTL_VMCI_foo values around these using platform-specific * format for encoding arguments and sizes. */ # define IOCTLCMD(_cmd) IOCTLCMD_VMCI_ ## _cmd #endif enum IOCTLCmd_VMCI { /* * We need to bracket the range of values used for ioctls, because x86_64 * Linux forces us to explicitly register ioctl handlers by value for * handling 32 bit ioctl syscalls. Hence FIRST and LAST. Pick something * for FIRST that doesn't collide with vmmon (2001+). */ #if defined(__linux__) IOCTLCMD(FIRST) = 1951, #else /* Start at 0. */ IOCTLCMD(FIRST), #endif IOCTLCMD(VERSION) = IOCTLCMD(FIRST), /* BEGIN VMCI */ IOCTLCMD(INIT_CONTEXT), IOCTLCMD(CREATE_PROCESS), IOCTLCMD(CREATE_DATAGRAM_PROCESS), IOCTLCMD(SHAREDMEM_CREATE), IOCTLCMD(SHAREDMEM_ATTACH), IOCTLCMD(SHAREDMEM_QUERY), IOCTLCMD(SHAREDMEM_DETACH), IOCTLCMD(VERSION2), IOCTLCMD(QUEUEPAIR_ALLOC), IOCTLCMD(QUEUEPAIR_SETPAGEFILE), IOCTLCMD(QUEUEPAIR_DETACH), IOCTLCMD(DATAGRAM_SEND), IOCTLCMD(DATAGRAM_RECEIVE), IOCTLCMD(DATAGRAM_REQUEST_MAP), IOCTLCMD(DATAGRAM_REMOVE_MAP), IOCTLCMD(CTX_ADD_NOTIFICATION), IOCTLCMD(CTX_REMOVE_NOTIFICATION), IOCTLCMD(CTX_GET_CPT_STATE), IOCTLCMD(CTX_SET_CPT_STATE), IOCTLCMD(GET_CONTEXT_ID), /* END VMCI */ /* * BEGIN VMCI SOCKETS * * We mark the end of the vmci commands and the start of the vmci sockets * commands since they are used in separate modules on Linux. * */ IOCTLCMD(LAST), IOCTLCMD(SOCKETS_FIRST) = IOCTLCMD(LAST), IOCTLCMD(SOCKETS_ACCEPT) = IOCTLCMD(SOCKETS_FIRST), IOCTLCMD(SOCKETS_BIND), IOCTLCMD(SOCKETS_CLOSE), IOCTLCMD(SOCKETS_CONNECT), /* * The next two values are public (vmci_sockets.h) and cannot be changed. * That means the number of values above these cannot be changed either * unless the base index (specified below) is updated accordingly. */ IOCTLCMD(SOCKETS_GET_AF_VALUE), IOCTLCMD(SOCKETS_GET_LOCAL_CID), IOCTLCMD(SOCKETS_GET_SOCK_NAME), IOCTLCMD(SOCKETS_GET_SOCK_OPT), IOCTLCMD(SOCKETS_GET_VM_BY_NAME), IOCTLCMD(SOCKETS_LISTEN), IOCTLCMD(SOCKETS_RECV), IOCTLCMD(SOCKETS_RECV_FROM), IOCTLCMD(SOCKETS_SELECT), IOCTLCMD(SOCKETS_SEND), IOCTLCMD(SOCKETS_SEND_TO), IOCTLCMD(SOCKETS_SET_SOCK_OPT), IOCTLCMD(SOCKETS_SHUTDOWN), IOCTLCMD(SOCKETS_SOCKET), /* END VMCI SOCKETS */ // Must be last. IOCTLCMD(SOCKETS_LAST) }; #if defined _WIN32 /* * Windows VMCI ioctl definitions. */ /* These values cannot be changed since some of the ioctl values are public. */ #define FILE_DEVICE_VMCI 0x8103 #define VMCI_IOCTL_BASE_INDEX 0x801 #define VMCIIOCTL_BUFFERED(name) \ CTL_CODE(FILE_DEVICE_VMCI, \ VMCI_IOCTL_BASE_INDEX + IOCTLCMD_VMCI_ ## name, \ METHOD_BUFFERED, \ FILE_ANY_ACCESS) #define VMCIIOCTL_NEITHER(name) \ CTL_CODE(FILE_DEVICE_VMCI, \ VMCI_IOCTL_BASE_INDEX + IOCTLCMD_VMCI_ ## name, \ METHOD_NEITHER, \ FILE_ANY_ACCESS) #define IOCTL_VMCI_VERSION VMCIIOCTL_BUFFERED(VERSION) /* BEGIN VMCI */ #define IOCTL_VMCI_INIT_CONTEXT VMCIIOCTL_BUFFERED(INIT_CONTEXT) #define IOCTL_VMCI_CREATE_PROCESS VMCIIOCTL_BUFFERED(CREATE_PROCESS) #define IOCTL_VMCI_CREATE_DATAGRAM_PROCESS \ VMCIIOCTL_BUFFERED(CREATE_DATAGRAM_PROCESS) #define IOCTL_VMCI_HYPERCALL VMCIIOCTL_BUFFERED(HYPERCALL) #define IOCTL_VMCI_SHAREDMEM_CREATE \ VMCIIOCTL_BUFFERED(SHAREDMEM_CREATE) #define IOCTL_VMCI_SHAREDMEM_ATTACH \ VMCIIOCTL_BUFFERED(SHAREDMEM_ATTACH) #define IOCTL_VMCI_SHAREDMEM_QUERY \ VMCIIOCTL_BUFFERED(SHAREDMEM_QUERY) #define IOCTL_VMCI_SHAREDMEM_DETACH \ VMCIIOCTL_BUFFERED(SHAREDMEM_DETACH) #define IOCTL_VMCI_VERSION2 VMCIIOCTL_BUFFERED(VERSION2) #define IOCTL_VMCI_QUEUEPAIR_ALLOC \ VMCIIOCTL_BUFFERED(QUEUEPAIR_ALLOC) #define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE \ VMCIIOCTL_BUFFERED(QUEUEPAIR_SETPAGEFILE) #define IOCTL_VMCI_QUEUEPAIR_DETACH \ VMCIIOCTL_BUFFERED(QUEUEPAIR_DETACH) #define IOCTL_VMCI_DATAGRAM_SEND VMCIIOCTL_BUFFERED(DATAGRAM_SEND) #define IOCTL_VMCI_DATAGRAM_RECEIVE VMCIIOCTL_NEITHER(DATAGRAM_RECEIVE) #define IOCTL_VMCI_DATAGRAM_REQUEST_MAP VMCIIOCTL_BUFFERED(DATAGRAM_REQUEST_MAP) #define IOCTL_VMCI_DATAGRAM_REMOVE_MAP VMCIIOCTL_BUFFERED(DATAGRAM_REMOVE_MAP) #define IOCTL_VMCI_CTX_ADD_NOTIFICATION VMCIIOCTL_BUFFERED(CTX_ADD_NOTIFICATION) #define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION \ VMCIIOCTL_BUFFERED(CTX_REMOVE_NOTIFICATION) #define IOCTL_VMCI_CTX_GET_CPT_STATE \ VMCIIOCTL_BUFFERED(CTX_GET_CPT_STATE) #define IOCTL_VMCI_CTX_SET_CPT_STATE \ VMCIIOCTL_BUFFERED(CTX_SET_CPT_STATE) #define IOCTL_VMCI_GET_CONTEXT_ID \ VMCIIOCTL_BUFFERED(GET_CONTEXT_ID) /* END VMCI */ /* BEGIN VMCI SOCKETS */ #define IOCTL_VMCI_SOCKETS_ACCEPT \ VMCIIOCTL_BUFFERED(SOCKETS_ACCEPT) #define IOCTL_VMCI_SOCKETS_BIND \ VMCIIOCTL_BUFFERED(SOCKETS_BIND) #define IOCTL_VMCI_SOCKETS_CLOSE \ VMCIIOCTL_BUFFERED(SOCKETS_CLOSE) #define IOCTL_VMCI_SOCKETS_CONNECT \ VMCIIOCTL_BUFFERED(SOCKETS_CONNECT) #define IOCTL_VMCI_SOCKETS_GET_AF_VALUE \ VMCIIOCTL_BUFFERED(SOCKETS_GET_AF_VALUE) #define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID \ VMCIIOCTL_BUFFERED(SOCKETS_GET_LOCAL_CID) #define IOCTL_VMCI_SOCKETS_GET_SOCK_NAME \ VMCIIOCTL_BUFFERED(SOCKETS_GET_SOCK_NAME) #define IOCTL_VMCI_SOCKETS_GET_SOCK_OPT \ VMCIIOCTL_BUFFERED(SOCKETS_GET_SOCK_OPT) #define IOCTL_VMCI_SOCKETS_GET_VM_BY_NAME \ VMCIIOCTL_BUFFERED(SOCKETS_GET_VM_BY_NAME) #define IOCTL_VMCI_SOCKETS_LISTEN \ VMCIIOCTL_BUFFERED(SOCKETS_LISTEN) #define IOCTL_VMCI_SOCKETS_RECV \ VMCIIOCTL_BUFFERED(SOCKETS_RECV) #define IOCTL_VMCI_SOCKETS_RECV_FROM \ VMCIIOCTL_BUFFERED(SOCKETS_RECV_FROM) #define IOCTL_VMCI_SOCKETS_SELECT \ VMCIIOCTL_BUFFERED(SOCKETS_SELECT) #define IOCTL_VMCI_SOCKETS_SEND \ VMCIIOCTL_BUFFERED(SOCKETS_SEND) #define IOCTL_VMCI_SOCKETS_SEND_TO \ VMCIIOCTL_BUFFERED(SOCKETS_SEND_TO) #define IOCTL_VMCI_SOCKETS_SET_SOCK_OPT \ VMCIIOCTL_BUFFERED(SOCKETS_SET_SOCK_OPT) #define IOCTL_VMCI_SOCKETS_SHUTDOWN \ VMCIIOCTL_BUFFERED(SOCKETS_SHUTDOWN) #define IOCTL_VMCI_SOCKETS_SOCKET \ VMCIIOCTL_BUFFERED(SOCKETS_SOCKET) /* END VMCI SOCKETS */ #endif // _WIN32 /* * VMCI driver initialization. This block can also be used to * pass initial group membership etc. */ typedef struct VMCIInitBlock { VMCIId cid; VMCIPrivilegeFlags flags; #ifdef _WIN32 uint64 event; /* Handle for signalling vmci calls on windows. */ #endif // _WIN32 } VMCIInitBlock; typedef struct VMCISharedMemInfo { VMCIHandle handle; uint32 size; uint32 result; VA64 va; /* Currently only used in the guest. */ char pageFileName[VMCI_PATH_MAX]; } VMCISharedMemInfo; typedef struct VMCIQueuePairAllocInfo { VMCIHandle handle; VMCIId peer; uint32 flags; uint64 produceSize; uint64 consumeSize; VA64 producePageFile; /* User VA. */ VA64 consumePageFile; /* User VA. */ uint64 producePageFileSize; /* Size of the file name array. */ uint64 consumePageFileSize; /* Size of the file name array. */ int32 result; uint32 _pad; } VMCIQueuePairAllocInfo; typedef struct VMCIQueuePairPageFileInfo { VMCIHandle handle; VA64 producePageFile; /* User VA. */ VA64 consumePageFile; /* User VA. */ uint64 producePageFileSize; /* Size of the file name array. */ uint64 consumePageFileSize; /* Size of the file name array. */ int32 result; uint32 _pad; } VMCIQueuePairPageFileInfo; typedef struct VMCIQueuePairDetachInfo { VMCIHandle handle; int32 result; uint32 _pad; } VMCIQueuePairDetachInfo; typedef struct VMCIDatagramSendRecvInfo { VA64 addr; uint32 len; int32 result; } VMCIDatagramSendRecvInfo; /* Used to create datagram endpoints in guest or host userlevel. */ typedef struct VMCIDatagramCreateInfo { VMCIId resourceID; uint32 flags; int eventHnd; int result; // result of handle create operation VMCIHandle handle; // handle if successfull } VMCIDatagramCreateInfo; /* Used to add/remove well-known datagram mappings. */ typedef struct VMCIDatagramMapInfo { VMCIId wellKnownID; int result; } VMCIDatagramMapInfo; /* Used to add/remove remote context notifications. */ typedef struct VMCINotifyAddRemoveInfo { VMCIId remoteCID; int result; } VMCINotifyAddRemoveInfo; /* Used to set/get current context's checkpoint state. */ typedef struct VMCICptBufInfo { VA64 cptBuf; uint32 cptType; uint32 bufSize; int32 result; uint32 _pad; } VMCICptBufInfo; #ifdef __APPLE__ /* * Mac OS ioctl definitions. * * Mac OS defines _IO* macros, and the core kernel code uses the size encoded * in the ioctl value to copy the memory back and forth (depending on the * direction encoded in the ioctl value) between the user and kernel address * spaces. * See iocontrolsMacOS.h for details on how this is done. We use sockets only * for vmci. */ #include enum VMCrossTalkSockOpt { VMCI_SO_VERSION = 0, VMCI_SO_CONTEXT = IOCTL_VMCI_INIT_CONTEXT, VMCI_SO_PROCESS = IOCTL_VMCI_CREATE_PROCESS, VMCI_SO_DATAGRAM_PROCESS = IOCTL_VMCI_CREATE_DATAGRAM_PROCESS, VMCI_SO_SHAREDMEM_CREATE = IOCTL_VMCI_SHAREDMEM_CREATE, VMCI_SO_SHAREDMEM_ATTACH = IOCTL_VMCI_SHAREDMEM_ATTACH, VMCI_SO_SHAREDMEM_QUERY = IOCTL_VMCI_SHAREDMEM_QUERY, VMCI_SO_SHAREDMEM_DETACH = IOCTL_VMCI_SHAREDMEM_DETACH, VMCI_SO_VERSION2 = IOCTL_VMCI_VERSION2, VMCI_SO_QUEUEPAIR_ALLOC = IOCTL_VMCI_QUEUEPAIR_ALLOC, VMCI_SO_QUEUEPAIR_SETPAGEFILE = IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE, VMCI_SO_QUEUEPAIR_DETACH = IOCTL_VMCI_QUEUEPAIR_DETACH, VMCI_SO_DATAGRAM_SEND = IOCTL_VMCI_DATAGRAM_SEND, VMCI_SO_DATAGRAM_RECEIVE = IOCTL_VMCI_DATAGRAM_RECEIVE, VMCI_SO_DATAGRAM_REQUEST_MAP = IOCTL_VMCI_DATAGRAM_REQUEST_MAP, VMCI_SO_DATAGRAM_REMOVE_MAP = IOCTL_VMCI_DATAGRAM_REMOVE_MAP, VMCI_SO_CTX_ADD_NOTIFICATION = IOCTL_VMCI_CTX_ADD_NOTIFICATION, VMCI_SO_CTX_REMOVE_NOTIFICATION = IOCTL_VMCI_CTX_REMOVE_NOTIFICATION, VMCI_SO_CTX_GET_CPT_STATE = IOCTL_VMCI_CTX_GET_CPT_STATE, VMCI_SO_CTX_SET_CPT_STATE = IOCTL_VMCI_CTX_SET_CPT_STATE, VMCI_SO_GET_CONTEXT_ID = IOCTL_VMCI_GET_CONTEXT_ID, VMCI_SO_USERFD, }; # define VMCI_MACOS_HOST_DEVICE_BASE "com.vmware.kext.vmci" # ifdef VMX86_DEVEL # define VMCI_MACOS_HOST_DEVICE VMCI_MACOS_HOST_DEVICE_BASE ".devel" # else # define VMCI_MACOS_HOST_DEVICE VMCI_MACOS_HOST_DEVICE_BASE # endif #endif /* Clean up helper macros */ #undef IOCTLCMD #endif // ifndef _VMCI_IOCONTROLS_H_ vmci-only/vmci_queue_pair.h0000444000000000000000000005113312025726724015027 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef _VMCI_QUEUE_PAIR_H_ #define _VMCI_QUEUE_PAIR_H_ /* * * vmci_queue_pair.h -- * * Defines queue layout in memory, and helper functions to enqueue and * dequeue items. XXX needs checksumming? */ #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMX #include "includeCheck.h" #include "vm_basic_defs.h" #include "vm_basic_types.h" #include "vm_atomic.h" #include "vmci_defs.h" #include "vm_assert.h" #if defined(__linux__) && defined(__KERNEL__) # include "vmci_kernel_if.h" #endif #if defined(__linux__) && defined(__KERNEL__) struct page; #endif /* * For a queue of buffer 'size' bytes, the tail and head pointers will be in * the range [0, size-1]. */ typedef struct VMCIQueueHeader { /* All fields are 64bit and aligned. */ VMCIHandle handle; /* Identifier. */ Atomic_uint64 producerTail; /* Offset in this queue. */ Atomic_uint64 consumerHead; /* Offset in peer queue. */ } VMCIQueueHeader; typedef struct VMCIQueue { VMCIQueueHeader queueHeader; uint8 _padding[PAGE_SIZE - sizeof(VMCIQueueHeader)]; #if defined(__linux__) && defined(__KERNEL__) struct page *page[0]; /* List of pages containing queue data. */ #else uint8 buffer[0]; /* Buffer containing data. */ #endif } VMCIQueue; typedef int VMCIMemcpyToQueueFunc(VMCIQueue *queue, uint64 queueOffset, const void *src, size_t srcOffset, size_t size); typedef int VMCIMemcpyFromQueueFunc(void *dest, size_t destOffset, const VMCIQueue *queue, uint64 queueOffset, size_t size); #if defined(__linux__) && defined(__KERNEL__) int VMCIMemcpyToQueue(VMCIQueue *queue, uint64 queueOffset, const void *src, size_t srcOffset, size_t size); int VMCIMemcpyFromQueue(void *dest, size_t destOffset, const VMCIQueue *queue, uint64 queueOffset, size_t size); int VMCIMemcpyToQueueV(VMCIQueue *queue, uint64 queueOffset, const void *src, size_t srcOffset, size_t size); int VMCIMemcpyFromQueueV(void *dest, size_t destOffset, const VMCIQueue *queue, uint64 queueOffset, size_t size); #elif defined(_WIN32) && defined(WINNT_DDK) int VMCIMemcpyToQueue(VMCIQueue *queue, uint64 queueOffset, const void *src, size_t srcOffset, size_t size); int VMCIMemcpyFromQueue(void *dest, size_t destOffset, const VMCIQueue *queue, uint64 queueOffset, size_t size); #else /* *----------------------------------------------------------------------------- * * VMCIMemcpyToQueue -- * * Wrapper for memcpy --- copies from a given buffer to a VMCI Queue. * Assumes that offset + size does not wrap around in the queue. * * Results: * Zero on success, negative error code on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE int VMCIMemcpyToQueue(VMCIQueue *queue, // OUT: uint64 queueOffset, // IN: const void *src, // IN: size_t srcOffset, // IN: size_t size) // IN: { memcpy(queue->buffer + queueOffset, (uint8 *)src + srcOffset, size); return 0; } /* *----------------------------------------------------------------------------- * * VMCIMemcpyFromQueue -- * * Wrapper for memcpy --- copies to a given buffer from a VMCI Queue. * Assumes that offset + size does not wrap around in the queue. * * Results: * Zero on success, negative error code on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE int VMCIMemcpyFromQueue(void *dest, // OUT: size_t destOffset, // IN: const VMCIQueue *queue, // IN: uint64 queueOffset, // IN: size_t size) // IN: { memcpy((uint8 *)dest + destOffset, queue->buffer + queueOffset, size); return 0; } #endif /* __linux__ && __KERNEL__ */ /* * If one client of a QueuePair is a 32bit entity, we restrict the QueuePair * size to be less than 4GB, and use 32bit atomic operations on the head and * tail pointers. 64bit atomic read on a 32bit entity involves cmpxchg8b which * is an atomic read-modify-write. This will cause traces to fire when a 32bit * consumer tries to read the producer's tail pointer, for example, because the * consumer has read-only access to the producer's tail pointer. * * We provide the following macros to invoke 32bit or 64bit atomic operations * based on the architecture the code is being compiled on. */ /* Architecture independent maximum queue size. */ #define QP_MAX_QUEUE_SIZE_ARCH_ANY CONST64U(0xffffffff) #ifdef __x86_64__ # define QP_MAX_QUEUE_SIZE_ARCH CONST64U(0xffffffffffffffff) # define QPAtomic_ReadOffset(x) Atomic_Read64(x) # define QPAtomic_WriteOffset(x, y) Atomic_Write64(x, y) #else # define QP_MAX_QUEUE_SIZE_ARCH CONST64U(0xffffffff) # define QPAtomic_ReadOffset(x) Atomic_Read32((Atomic_uint32 *)(x)) # define QPAtomic_WriteOffset(x, y) \ Atomic_Write32((Atomic_uint32 *)(x), (uint32)(y)) #endif /* *----------------------------------------------------------------------------- * * VMCIQueue_CheckAlignment -- * * Checks if the given queue is aligned to page boundary. * * Results: * TRUE or FALSE. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE Bool VMCIQueue_CheckAlignment(const VMCIQueue *queue) // IN: { return ((uintptr_t)queue & (PAGE_SIZE - 1)) == 0; } static INLINE void VMCIQueue_GetPointers(const VMCIQueue *produceQ, const VMCIQueue *consumeQ, uint64 *producerTail, uint64 *consumerHead) { *producerTail = QPAtomic_ReadOffset(&produceQ->queueHeader.producerTail); *consumerHead = QPAtomic_ReadOffset(&consumeQ->queueHeader.consumerHead); } /* *----------------------------------------------------------------------------- * * VMCIQueue_ResetPointers -- * * Reset the tail pointer (of "this" queue) and the head pointer (of * "peer" queue). * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void VMCIQueue_ResetPointers(VMCIQueue *queue) // IN: { QPAtomic_WriteOffset(&queue->queueHeader.producerTail, CONST64U(0)); QPAtomic_WriteOffset(&queue->queueHeader.consumerHead, CONST64U(0)); } /* *----------------------------------------------------------------------------- * * VMCIQueue_Init -- * * Initializes a queue's state (head & tail pointers). * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void VMCIQueue_Init(const VMCIHandle handle, // IN: VMCIQueue *queue) // IN: { ASSERT_NOT_IMPLEMENTED(VMCIQueue_CheckAlignment(queue)); queue->queueHeader.handle = handle; VMCIQueue_ResetPointers(queue); } /* *----------------------------------------------------------------------------- * * VMCIQueueFreeSpaceInt -- * * Finds available free space in a produce queue to enqueue more * data or reports an error if queue pair corruption is detected. * * Results: * Free space size in bytes. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE int VMCIQueueFreeSpaceInt(const VMCIQueue *produceQueue, // IN: const VMCIQueue *consumeQueue, // IN: const uint64 produceQSize, // IN: uint64 *freeSpace) // OUT: { const uint64 tail = QPAtomic_ReadOffset(&produceQueue->queueHeader.producerTail); const uint64 head = QPAtomic_ReadOffset(&consumeQueue->queueHeader.consumerHead); ASSERT(freeSpace); if (tail >= produceQSize || head >= produceQSize) { return VMCI_ERROR_INVALID_SIZE; } /* * Deduct 1 to avoid tail becoming equal to head which causes ambiguity. If * head and tail are equal it means that the queue is empty. */ if (tail >= head) { *freeSpace = produceQSize - (tail - head) - 1; } else { *freeSpace = head - tail - 1; } return VMCI_SUCCESS; } /* *----------------------------------------------------------------------------- * * VMCIQueue_FreeSpace -- * * Finds available free space in a produce queue to enqueue more data. * * Results: * On success, free space size in bytes (up to MAX_INT64). * On failure, appropriate error code. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE int64 VMCIQueue_FreeSpace(const VMCIQueue *produceQueue, // IN: const VMCIQueue *consumeQueue, // IN: const uint64 produceQSize) // IN: { uint64 freeSpace; int retval; retval = VMCIQueueFreeSpaceInt(produceQueue, consumeQueue, produceQSize, &freeSpace); if (retval != VMCI_SUCCESS) { return retval; } return MIN(freeSpace, MAX_INT64); } /* *----------------------------------------------------------------------------- * * VMCIQueue_BufReady -- * * Finds available data to dequeue from a consume queue. * * Results: * On success, available data size in bytes (up to MAX_INT64). * On failure, appropriate error code. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE int64 VMCIQueue_BufReady(const VMCIQueue *consumeQueue, // IN: const VMCIQueue *produceQueue, // IN: const uint64 consumeQSize) // IN: { int retval; uint64 freeSpace; retval = VMCIQueueFreeSpaceInt(consumeQueue, produceQueue, consumeQSize, &freeSpace); if (retval != VMCI_SUCCESS) { return retval; } else { uint64 available = consumeQSize - freeSpace - 1; return MIN(available, MAX_INT64); } } /* *----------------------------------------------------------------------------- * * AddPointer -- * * Helper to add a given offset to a head or tail pointer. Wraps the value * of the pointer around the max size of the queue. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void AddPointer(Atomic_uint64 *var, // IN: size_t add, // IN: uint64 max) // IN: { uint64 newVal = QPAtomic_ReadOffset(var); if (newVal >= max - add) { newVal -= max; } newVal += add; QPAtomic_WriteOffset(var, newVal); } /* *----------------------------------------------------------------------------- * * __VMCIQueue_Enqueue -- * * Enqueues a given buffer to the produce queue using the provided * function. As many bytes as possible (space available in the queue) * are enqueued. * * Results: * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue data. * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue * (as defined by the queue size). * Otherwise, the number of bytes written to the queue is returned. * * Side effects: * Updates the tail pointer of the produce queue. * *----------------------------------------------------------------------------- */ static INLINE ssize_t __VMCIQueue_Enqueue(VMCIQueue *produceQueue, // IN: const VMCIQueue *consumeQueue, // IN: const uint64 produceQSize, // IN: const void *buf, // IN: size_t bufSize, // IN: VMCIMemcpyToQueueFunc memcpyToQueue) // IN: { const int64 freeSpace = VMCIQueue_FreeSpace(produceQueue, consumeQueue, produceQSize); const uint64 tail = QPAtomic_ReadOffset(&produceQueue->queueHeader.producerTail); size_t written; if (!freeSpace) { return VMCI_ERROR_QUEUEPAIR_NOSPACE; } if (freeSpace < 0) { return (ssize_t)freeSpace; } written = (size_t)(freeSpace > bufSize ? bufSize : freeSpace); if (LIKELY(tail + written < produceQSize)) { memcpyToQueue(produceQueue, tail, buf, 0, written); } else { /* Tail pointer wraps around. */ const size_t tmp = (size_t)(produceQSize - tail); memcpyToQueue(produceQueue, tail, buf, 0, tmp); memcpyToQueue(produceQueue, 0, buf, tmp, written - tmp); } AddPointer(&produceQueue->queueHeader.producerTail, written, produceQSize); return written; } /* *----------------------------------------------------------------------------- * * VMCIQueue_Enqueue -- * * Enqueues a given buffer to the produce queue. As many bytes as possible * (space available in the queue) are enqueued. If bufSize is larger than * the maximum value of ssize_t the result is unspecified. * * Results: * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue data. * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue * (as defined by the queue size). * Otherwise, the number of bytes written to the queue is returned. * * Side effects: * Updates the tail pointer of the produce queue. * *----------------------------------------------------------------------------- */ static INLINE ssize_t VMCIQueue_Enqueue(VMCIQueue *produceQueue, // IN: const VMCIQueue *consumeQueue, // IN: const uint64 produceQSize, // IN: const void *buf, // IN: size_t bufSize) // IN: { return __VMCIQueue_Enqueue(produceQueue, consumeQueue, produceQSize, buf, bufSize, VMCIMemcpyToQueue); } #if defined(__linux__) && defined(__KERNEL__) /* *----------------------------------------------------------------------------- * * VMCIQueue_EnqueueV -- * * Enqueues a given iovec to the produce queue. As many bytes as possible * (space available in the queue) are enqueued. If bufSize is larger than * the maximum value of ssize_t the result is unspecified. * * Results: * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue data. * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue * (as defined by the queue size). * Otherwise, the number of bytes written to the queue is returned. * * Side effects: * Updates the tail pointer of the produce queue. * *----------------------------------------------------------------------------- */ static INLINE ssize_t VMCIQueue_EnqueueV(VMCIQueue *produceQueue, // IN: const VMCIQueue *consumeQueue, // IN: const uint64 produceQSize, // IN: struct iovec *iov, // IN: size_t iovSize) // IN: { return __VMCIQueue_Enqueue(produceQueue, consumeQueue, produceQSize, (void *)iov, iovSize, VMCIMemcpyToQueueV); } #endif /* *----------------------------------------------------------------------------- * * __VMCIQueue_Dequeue -- * * Dequeues data (if available) from the given consume queue. Writes data * to the user provided buffer using the provided function. * * Results: * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue * (as defined by the queue size). * Otherwise the number of bytes dequeued is returned. * * Side effects: * Updates the head pointer of the consume queue. * *----------------------------------------------------------------------------- */ static INLINE ssize_t __VMCIQueue_Dequeue(VMCIQueue *produceQueue, // IN: const VMCIQueue *consumeQueue, // IN: const uint64 consumeQSize, // IN: void *buf, // IN: size_t bufSize, // IN: VMCIMemcpyFromQueueFunc memcpyFromQueue) // IN: { const int64 bufReady = VMCIQueue_BufReady(consumeQueue, produceQueue, consumeQSize); const uint64 head = QPAtomic_ReadOffset(&produceQueue->queueHeader.consumerHead); size_t written; if (!bufReady) { return VMCI_ERROR_QUEUEPAIR_NODATA; } if (bufReady < 0) { return (ssize_t)bufReady; } written = (size_t)(bufReady > bufSize ? bufSize : bufReady); if (LIKELY(head + written < consumeQSize)) { memcpyFromQueue(buf, 0, consumeQueue, head, written); } else { /* Head pointer wraps around. */ const size_t tmp = (size_t)(consumeQSize - head); memcpyFromQueue(buf, 0, consumeQueue, head, tmp); memcpyFromQueue(buf, tmp, consumeQueue, 0, written - tmp); } AddPointer(&produceQueue->queueHeader.consumerHead, written, consumeQSize); return written; } /* *----------------------------------------------------------------------------- * * VMCIQueue_Dequeue -- * * Dequeues data (if available) from the given consume queue. Writes data * to the user provided buffer. If bufSize is larger than the maximum * value of ssize_t the result is unspecified. * * Results: * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue * (as defined by the queue size). * Otherwise the number of bytes dequeued is returned. * * Side effects: * Updates the head pointer of the consume queue. * *----------------------------------------------------------------------------- */ static INLINE ssize_t VMCIQueue_Dequeue(VMCIQueue *produceQueue, // IN: const VMCIQueue *consumeQueue, // IN: const uint64 consumeQSize, // IN: void *buf, // IN: size_t bufSize) // IN: { return __VMCIQueue_Dequeue(produceQueue, consumeQueue, consumeQSize, buf, bufSize, VMCIMemcpyFromQueue); } #if defined(__linux__) && defined(__KERNEL__) /* *----------------------------------------------------------------------------- * * VMCIQueue_DequeueV -- * * Dequeues data (if available) from the given consume queue. Writes data * to the user provided iovec. If bufSize is larger than the maximum * value of ssize_t the result is unspecified. * * Results: * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue * (as defined by the queue size). * Otherwise the number of bytes dequeued is returned. * * Side effects: * Updates the head pointer of the consume queue. * *----------------------------------------------------------------------------- */ static INLINE ssize_t VMCIQueue_DequeueV(VMCIQueue *produceQueue, // IN: const VMCIQueue *consumeQueue, // IN: const uint64 consumeQSize, // IN: struct iovec *iov, // IN: size_t iovSize) // IN: { return __VMCIQueue_Dequeue(produceQueue, consumeQueue, consumeQSize, (void *)iov, iovSize, VMCIMemcpyFromQueueV); } #endif #endif /* !_VMCI_QUEUE_PAIR_H_ */ vmci-only/vm_atomic.h0000444000000000000000000015202112025726724013626 0ustar rootroot/********************************************************* * Copyright (C) 1998 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_atomic.h -- * * Atomic power */ #ifndef _ATOMIC_H_ #define _ATOMIC_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMEXT #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMIROM #include "includeCheck.h" #include "vm_basic_types.h" /* Basic atomic type: 32 bits */ typedef struct Atomic_uint32 { volatile uint32 value; } Atomic_uint32; /* Basic atomic type: 64 bits */ typedef struct Atomic_uint64 { volatile uint64 value; } Atomic_uint64 ALIGNED(8); /* * Prototypes for msft atomics. These are defined & inlined by the * compiler so no function definition is needed. The prototypes are * needed for c++. Since amd64 compiler doesn't support inline asm we * have to use these. Unfortunately, we still have to use some inline asm * for the 32 bit code since the and/or/xor implementations didn't show up * untill xp or 2k3. * * The declarations for the intrinsic functions were taken from ntddk.h * in the DDK. The declarations must match otherwise the 64-bit c++ * compiler will complain about second linkage of the intrinsic functions. * We define the intrinsic using the basic types corresponding to the * Windows typedefs. This avoids having to include windows header files * to get to the windows types. */ #if defined(_MSC_VER) && _MSC_VER >= 1310 #ifdef __cplusplus extern "C" { #endif long _InterlockedExchange(long volatile*, long); long _InterlockedCompareExchange(long volatile*, long, long); long _InterlockedExchangeAdd(long volatile*, long); long _InterlockedDecrement(long volatile*); long _InterlockedIncrement(long volatile*); #pragma intrinsic(_InterlockedExchange, _InterlockedCompareExchange) #pragma intrinsic(_InterlockedExchangeAdd, _InterlockedDecrement) #pragma intrinsic(_InterlockedIncrement) #if defined(VM_X86_64) long _InterlockedAnd(long volatile*, long); __int64 _InterlockedAnd64(__int64 volatile*, __int64); long _InterlockedOr(long volatile*, long); __int64 _InterlockedOr64(__int64 volatile*, __int64); long _InterlockedXor(long volatile*, long); __int64 _InterlockedXor64(__int64 volatile*, __int64); __int64 _InterlockedExchangeAdd64(__int64 volatile*, __int64); __int64 _InterlockedIncrement64(__int64 volatile*); __int64 _InterlockedDecrement64(__int64 volatile*); __int64 _InterlockedExchange64(__int64 volatile*, __int64); __int64 _InterlockedCompareExchange64(__int64 volatile*, __int64, __int64); #if !defined(_WIN64) #pragma intrinsic(_InterlockedAnd, _InterlockedAnd64) #pragma intrinsic(_InterlockedOr, _InterlockedOr64) #pragma intrinsic(_InterlockedXor, _InterlockedXor64) #pragma intrinsic(_InterlockedExchangeAdd64, _InterlockedIncrement64) #pragma intrinsic(_InterlockedDecrement64, _InterlockedExchange64) #pragma intrinsic(_InterlockedCompareExchange64) #endif /* !_WIN64 */ #endif /* __x86_64__ */ #ifdef __cplusplus } #endif #endif /* _MSC_VER */ /* Convert a volatile int to Atomic_uint32. */ static INLINE Atomic_uint32 * Atomic_VolatileToAtomic(volatile uint32 *var) { return (Atomic_uint32 *)var; } /* *----------------------------------------------------------------------------- * * Atomic_Init, Atomic_SetFence, AtomicUseFence -- * * Determine whether an lfence intruction is executed after * every locked instruction. * * Certain AMD processes have a bug (see bug 107024) that * requires an lfence after every locked instruction. * * The global variable AtomicUseFence controls whether lfence * is used (see AtomicEpilogue). * * Atomic_SetFence sets AtomicUseFence to the given value. * * Atomic_Init computes and sets AtomicUseFence. * It does not take into account the number of processors. * * The rationale for all this complexity is that Atomic_Init * is the easy-to-use interface. It can be called a number * of times cheaply, and does not depend on other libraries. * However, because the number of CPUs is difficult to compute, * it does without it and always assumes there are more than one. * * For programs that care or have special requirements, * Atomic_SetFence can be called directly, in addition to Atomic_Init. * It overrides the effect of Atomic_Init, and can be called * before, after, or between calls to Atomic_Init. * *----------------------------------------------------------------------------- */ // The freebsd assembler doesn't know the lfence instruction #if defined(__GNUC__) && \ __GNUC__ >= 3 && \ !defined(BSD_VERSION) && \ (!defined(MODULE) || defined(__VMKERNEL_MODULE__)) && \ !defined(__APPLE__) /* PR136775 */ #define ATOMIC_USE_FENCE #endif #if defined(VMATOMIC_IMPORT_DLLDATA) VMX86_EXTERN_DATA Bool AtomicUseFence; #else EXTERN Bool AtomicUseFence; #endif EXTERN Bool atomicFenceInitialized; void AtomicInitFence(void); static INLINE void Atomic_Init(void) { #ifdef ATOMIC_USE_FENCE if (!atomicFenceInitialized) { AtomicInitFence(); } #endif } static INLINE void Atomic_SetFence(Bool fenceAfterLock) /* IN: TRUE to enable lfence */ /* FALSE to disable. */ { AtomicUseFence = fenceAfterLock; #if defined(__VMKERNEL__) extern void Atomic_SetFenceVMKAPI(Bool fenceAfterLock); Atomic_SetFenceVMKAPI(fenceAfterLock); #endif atomicFenceInitialized = TRUE; } /* Conditionally execute fence after interlocked instruction. */ static INLINE void AtomicEpilogue(void) { #ifdef ATOMIC_USE_FENCE if (UNLIKELY(AtomicUseFence)) { asm volatile ("lfence" ::: "memory"); } #endif } /* * All the assembly code is tricky and written conservatively. * For example, to make sure gcc won't introduce copies, * we force the addressing mode like this: * * "xchgl %0, (%1)" * : "=r" (val) * : "r" (&var->value), * "0" (val) * : "memory" * * - edward * * Actually - turns out that gcc never generates memory aliases (it * still does generate register aliases though), so we can be a bit * more agressive with the memory constraints. The code above can be * modified like this: * * "xchgl %0, %1" * : "=r" (val), * "=m" (var->value), * : "0" (val), * "1" (var->value) * * The advantages are that gcc can use whatever addressing mode it * likes to access the memory value, and that we dont have to use a * way-too-generic "memory" clobber as there is now an explicit * declaration that var->value is modified. * * see also /usr/include/asm/atomic.h to convince yourself this is a * valid optimization. * * - walken */ /* *----------------------------------------------------------------------------- * * Atomic_Read -- * * Read * * Results: * The value of the atomic variable. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_Read(Atomic_uint32 const *var) // IN { return var->value; } #define Atomic_Read32 Atomic_Read /* *----------------------------------------------------------------------------- * * Atomic_Write -- * * Write * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Write(Atomic_uint32 *var, // IN uint32 val) // IN { var->value = val; } #define Atomic_Write32 Atomic_Write /* *----------------------------------------------------------------------------- * * Atomic_ReadWrite -- * * Read followed by write * * Results: * The value of the atomic variable before the write. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_ReadWrite(Atomic_uint32 *var, // IN uint32 val) // IN #ifdef __GNUC__ { /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "xchgl %0, %1" # if VM_ASM_PLUS : "=r" (val), "+m" (var->value) : "0" (val) # else : "=r" (val), "=m" (var->value) : "0" (val), "1" (var->value) # endif ); AtomicEpilogue(); return val; } #elif _MSC_VER >= 1310 { return _InterlockedExchange((long *)&var->value, (long)val); } #elif _MSC_VER #pragma warning(push) #pragma warning(disable : 4035) // disable no-return warning { __asm mov eax, val __asm mov ebx, var __asm xchg [ebx]Atomic_uint32.value, eax // eax is the return value, this is documented to work - edward } #pragma warning(pop) #else #error No compiler defined for Atomic_ReadWrite #endif #define Atomic_ReadWrite32 Atomic_ReadWrite /* *----------------------------------------------------------------------------- * * Atomic_ReadIfEqualWrite -- * * Compare exchange: Read variable, if equal to oldVal, write newVal * * Results: * The value of the atomic variable before the write. * * Side effects: * The variable may be modified. * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_ReadIfEqualWrite(Atomic_uint32 *var, // IN uint32 oldVal, // IN uint32 newVal) // IN #ifdef __GNUC__ { uint32 val; /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; cmpxchgl %2, %1" # if VM_ASM_PLUS : "=a" (val), "+m" (var->value) : "r" (newVal), "0" (oldVal) # else : "=a" (val), "=m" (var->value) : "r" (newVal), "0" (oldVal) /* * "1" (var->value): results in inconsistent constraints on gcc 2.7.2.3 * when compiling enterprise-2.2.17-14-RH7.0-update. * The constraint has been commented out for now. We may consider doing * this systematically, but we need to be sure it is the right thing to * do. However, it is also possible that the offending use of this asm * function will be removed in the near future in which case we may * decide to reintroduce the constraint instead. hpreg & agesen. */ # endif : "cc" ); AtomicEpilogue(); return val; } #elif _MSC_VER >= 1310 { return _InterlockedCompareExchange((long *)&var->value, (long)newVal, (long)oldVal); } #elif _MSC_VER #pragma warning(push) #pragma warning(disable : 4035) // disable no-return warning { __asm mov eax, oldVal __asm mov ebx, var __asm mov ecx, newVal __asm lock cmpxchg [ebx]Atomic_uint32.value, ecx // eax is the return value, this is documented to work - edward } #pragma warning(pop) #else #error No compiler defined for Atomic_ReadIfEqualWrite #endif #define Atomic_ReadIfEqualWrite32 Atomic_ReadIfEqualWrite #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_ReadIfEqualWrite64 -- * * Compare exchange: Read variable, if equal to oldVal, write newVal * * Results: * The value of the atomic variable before the write. * * Side effects: * The variable may be modified. * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadIfEqualWrite64(Atomic_uint64 *var, // IN uint64 oldVal, // IN uint64 newVal) // IN { #if defined(__GNUC__) uint64 val; /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; cmpxchgq %2, %1" : "=a" (val), "+m" (var->value) : "r" (newVal), "0" (oldVal) : "cc" ); AtomicEpilogue(); return val; #elif _MSC_VER return _InterlockedCompareExchange64((__int64 *)&var->value, (__int64)newVal, (__int64)oldVal); #else #error No compiler defined for Atomic_ReadIfEqualWrite64 #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_And -- * * Atomic read, bitwise AND with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_And(Atomic_uint32 *var, // IN uint32 val) // IN { #ifdef __GNUC__ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; andl %1, %0" # if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) # else : "=m" (var->value) : "ri" (val), "0" (var->value) # endif : "cc" ); AtomicEpilogue(); #elif _MSC_VER #if defined(__x86_64__) _InterlockedAnd((long *)&var->value, (long)val); #else __asm mov eax, val __asm mov ebx, var __asm lock and [ebx]Atomic_uint32.value, eax #endif #else #error No compiler defined for Atomic_And #endif } #define Atomic_And32 Atomic_And #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_And64 -- * * Atomic read, bitwise AND with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_And64(Atomic_uint64 *var, // IN uint64 val) // IN { #if defined(__GNUC__) /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; andq %1, %0" : "+m" (var->value) : "ri" (val) : "cc" ); AtomicEpilogue(); #elif _MSC_VER _InterlockedAnd64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_And64 #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_Or -- * * Atomic read, bitwise OR with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Or(Atomic_uint32 *var, // IN uint32 val) // IN { #ifdef __GNUC__ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; orl %1, %0" # if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) # else : "=m" (var->value) : "ri" (val), "0" (var->value) # endif : "cc" ); AtomicEpilogue(); #elif _MSC_VER #if defined(__x86_64__) _InterlockedOr((long *)&var->value, (long)val); #else __asm mov eax, val __asm mov ebx, var __asm lock or [ebx]Atomic_uint32.value, eax #endif #else #error No compiler defined for Atomic_Or #endif } #define Atomic_Or32 Atomic_Or #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_Or64 -- * * Atomic read, bitwise OR with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Or64(Atomic_uint64 *var, // IN uint64 val) // IN { #if defined(__GNUC__) /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; orq %1, %0" : "+m" (var->value) : "ri" (val) : "cc" ); AtomicEpilogue(); #elif _MSC_VER _InterlockedOr64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_Or64 #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_Xor -- * * Atomic read, bitwise XOR with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Xor(Atomic_uint32 *var, // IN uint32 val) // IN { #ifdef __GNUC__ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; xorl %1, %0" # if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) # else : "=m" (var->value) : "ri" (val), "0" (var->value) # endif : "cc" ); AtomicEpilogue(); #elif _MSC_VER #if defined(__x86_64__) _InterlockedXor((long *)&var->value, (long)val); #else __asm mov eax, val __asm mov ebx, var __asm lock xor [ebx]Atomic_uint32.value, eax #endif #else #error No compiler defined for Atomic_Xor #endif } #define Atomic_Xor32 Atomic_Xor #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_Xor64 -- * * Atomic read, bitwise XOR with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Xor64(Atomic_uint64 *var, // IN uint64 val) // IN { #if defined(__GNUC__) /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; xorq %1, %0" : "+m" (var->value) : "ri" (val) : "cc" ); AtomicEpilogue(); #elif _MSC_VER _InterlockedXor64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_Xor64 #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_Add -- * * Atomic read, add a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Add(Atomic_uint32 *var, // IN uint32 val) // IN { #ifdef __GNUC__ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; addl %1, %0" # if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) # else : "=m" (var->value) : "ri" (val), "0" (var->value) # endif : "cc" ); AtomicEpilogue(); #elif _MSC_VER >= 1310 _InterlockedExchangeAdd((long *)&var->value, (long)val); #elif _MSC_VER __asm mov eax, val __asm mov ebx, var __asm lock add [ebx]Atomic_uint32.value, eax #else #error No compiler defined for Atomic_Add #endif } #define Atomic_Add32 Atomic_Add #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_Add64 -- * * Atomic read, add a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Add64(Atomic_uint64 *var, // IN uint64 val) // IN { #if defined(__GNUC__) /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; addq %1, %0" : "+m" (var->value) : "ri" (val) : "cc" ); AtomicEpilogue(); #elif _MSC_VER _InterlockedExchangeAdd64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_Add64 #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_Sub -- * * Atomic read, subtract a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Sub(Atomic_uint32 *var, // IN uint32 val) // IN { #ifdef __GNUC__ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; subl %1, %0" # if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) # else : "=m" (var->value) : "ri" (val), "0" (var->value) # endif : "cc" ); AtomicEpilogue(); #elif _MSC_VER >= 1310 _InterlockedExchangeAdd((long *)&var->value, (long)-val); #elif _MSC_VER __asm mov eax, val __asm mov ebx, var __asm lock sub [ebx]Atomic_uint32.value, eax #else #error No compiler defined for Atomic_Sub #endif } #define Atomic_Sub32 Atomic_Sub #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_Sub64 -- * * Atomic read, subtract a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Sub64(Atomic_uint64 *var, // IN uint64 val) // IN { #ifdef __GNUC__ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; subq %1, %0" : "+m" (var->value) : "ri" (val) : "cc" ); AtomicEpilogue(); #elif _MSC_VER _InterlockedExchangeAdd64((__int64 *)&var->value, (__int64)-val); #else #error No compiler defined for Atomic_Sub64 #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_Inc -- * * Atomic read, increment, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Inc(Atomic_uint32 *var) // IN { #ifdef __GNUC__ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; incl %0" # if VM_ASM_PLUS : "+m" (var->value) : # else : "=m" (var->value) : "0" (var->value) # endif : "cc" ); AtomicEpilogue(); #elif _MSC_VER >= 1310 _InterlockedIncrement((long *)&var->value); #elif _MSC_VER __asm mov ebx, var __asm lock inc [ebx]Atomic_uint32.value #else #error No compiler defined for Atomic_Inc #endif } #define Atomic_Inc32 Atomic_Inc #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_Inc64 -- * * Atomic read, increment, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Inc64(Atomic_uint64 *var) // IN { #if defined(__GNUC__) /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; incq %0" : "+m" (var->value) : : "cc" ); AtomicEpilogue(); #elif _MSC_VER _InterlockedIncrement64((__int64 *)&var->value); #else #error No compiler defined for Atomic_Inc64 #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_Dec -- * * Atomic read, decrement, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Dec(Atomic_uint32 *var) // IN { #ifdef __GNUC__ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; decl %0" # if VM_ASM_PLUS : "+m" (var->value) : # else : "=m" (var->value) : "0" (var->value) # endif : "cc" ); AtomicEpilogue(); #elif _MSC_VER >= 1310 _InterlockedDecrement((long *)&var->value); #elif _MSC_VER __asm mov ebx, var __asm lock dec [ebx]Atomic_uint32.value #else #error No compiler defined for Atomic_Dec #endif } #define Atomic_Dec32 Atomic_Dec #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_Dec64 -- * * Atomic read, decrement, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Dec64(Atomic_uint64 *var) // IN { #if defined(__GNUC__) /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; decq %0" : "+m" (var->value) : : "cc" ); AtomicEpilogue(); #elif _MSC_VER _InterlockedDecrement64((__int64 *)&var->value); #else #error No compiler defined for Atomic_Dec64 #endif } #endif /* * Note that the technique below can be used to implement ReadX(), where X is * an arbitrary mathematical function. */ /* *----------------------------------------------------------------------------- * * Atomic_FetchAndOr -- * * Atomic read (returned), bitwise OR with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_FetchAndOr(Atomic_uint32 *var, // IN uint32 val) // IN { uint32 res; do { res = var->value; } while (res != Atomic_ReadIfEqualWrite(var, res, res | val)); return res; } /* *----------------------------------------------------------------------------- * * Atomic_FetchAndAnd -- * * Atomic read (returned), bitwise And with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_FetchAndAnd(Atomic_uint32 *var, // IN uint32 val) // IN { uint32 res; do { res = var->value; } while (res != Atomic_ReadIfEqualWrite(var, res, res & val)); return res; } #define Atomic_ReadOr32 Atomic_FetchAndOr #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_ReadOr64 -- * * Atomic read (returned), bitwise OR with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadOr64(Atomic_uint64 *var, // IN uint64 val) // IN { uint64 res; do { res = var->value; } while (res != Atomic_ReadIfEqualWrite64(var, res, res | val)); return res; } #endif /* *----------------------------------------------------------------------------- * * Atomic_FetchAndAddUnfenced -- * * Atomic read (returned), add a value, write. * * If you have to implement FetchAndAdd() on an architecture other than * x86 or x86-64, you might want to consider doing something similar to * Atomic_FetchAndOr(). * * The "Unfenced" version of Atomic_FetchAndInc never executes * "lfence" after the interlocked operation. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_FetchAndAddUnfenced(Atomic_uint32 *var, // IN uint32 val) // IN #ifdef __GNUC__ { /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( # if VM_ASM_PLUS "lock; xaddl %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) : "cc" # else "lock; xaddl %0, (%1)" : "=r" (val) : "r" (&var->value), "0" (val) : "cc", "memory" # endif ); return val; } #elif _MSC_VER >= 1310 { return _InterlockedExchangeAdd((long *)&var->value, (long)val); } #elif _MSC_VER #pragma warning(push) #pragma warning(disable : 4035) // disable no-return warning { __asm mov eax, val __asm mov ebx, var __asm lock xadd [ebx]Atomic_uint32.value, eax } #pragma warning(pop) #else #error No compiler defined for Atomic_FetchAndAdd #endif #define Atomic_ReadAdd32 Atomic_FetchAndAdd /* *----------------------------------------------------------------------------- * * Atomic_FetchAndAdd -- * * Atomic read (returned), add a value, write. * * If you have to implement FetchAndAdd() on an architecture other than * x86 or x86-64, you might want to consider doing something similar to * Atomic_FetchAndOr(). * * Unlike "Unfenced" version, this one may execute the "lfence" after * interlocked operation. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_FetchAndAdd(Atomic_uint32 *var, // IN uint32 val) // IN #ifdef __GNUC__ { val = Atomic_FetchAndAddUnfenced(var, val); AtomicEpilogue(); return val; } #else { return Atomic_FetchAndAddUnfenced(var, val); } #endif #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_ReadAdd64 -- * * Atomic read (returned), add a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadAdd64(Atomic_uint64 *var, // IN uint64 val) // IN { #if defined(__GNUC__) /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; xaddq %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) : "cc" ); AtomicEpilogue(); return val; #elif _MSC_VER return _InterlockedExchangeAdd64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_ReadAdd64 #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_FetchAndInc -- * * Atomic read (returned), increment, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_FetchAndInc(Atomic_uint32 *var) // IN { return Atomic_FetchAndAdd(var, 1); } #define Atomic_ReadInc32 Atomic_FetchAndInc #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_ReadInc64 -- * * Atomic read (returned), increment, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadInc64(Atomic_uint64 *var) // IN { return Atomic_ReadAdd64(var, 1); } #endif /* *----------------------------------------------------------------------------- * * Atomic_FetchAndDec -- * * Atomic read (returned), decrement, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_FetchAndDec(Atomic_uint32 *var) // IN { return Atomic_FetchAndAdd(var, (uint32)-1); } #define Atomic_ReadDec32 Atomic_FetchAndDec #if defined(__x86_64__) /* *----------------------------------------------------------------------------- * * Atomic_ReadDec64 -- * * Atomic read (returned), decrement, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadDec64(Atomic_uint64 *var) // IN { return Atomic_ReadAdd64(var, CONST64U(-1)); } #endif /* * Usage of this helper struct is strictly reserved to the following * function. --hpreg */ typedef struct { uint32 lowValue; uint32 highValue; } S_uint64; /* *----------------------------------------------------------------------------- * * Atomic_CMPXCHG64 -- * * Compare exchange: Read variable, if equal to oldVal, write newVal * * XXX: Ensure that if this function is to be inlined by gcc, it is * compiled with -fno-strict-aliasing. Otherwise it will break. * Unfortunately we know that gcc 2.95.3 (used to build the FreeBSD 3.2 * Tools) does not honor -fno-strict-aliasing. As a workaround, we avoid * inlining the function entirely for versions of gcc under 3.0. * * Results: * TRUE if equal, FALSE if not equal * * Side effects: * None * *----------------------------------------------------------------------------- */ #if defined(__GNUC__) && __GNUC__ < 3 static Bool #else static INLINE Bool #endif Atomic_CMPXCHG64(Atomic_uint64 *var, // IN/OUT uint64 const *oldVal, // IN uint64 const *newVal) // IN #ifdef __GNUC__ { Bool equal; /* Checked against the Intel manual and GCC --walken */ #ifdef VMM64 uint64 dummy; __asm__ __volatile__( "lock; cmpxchgq %3, %0" "\n\t" "sete %1" : "+m" (*var), "=qm" (equal), "=a" (dummy) : "r" (*newVal), "2" (*oldVal) : "cc" ); #else /* 32-bit version */ int dummy1, dummy2; # if defined __PIC__ && !vm_x86_64 // %ebx is reserved by the compiler. # if defined __GNUC__ && __GNUC__ < 3 // Part of #188541 - for RHL 6.2 etc. __asm__ __volatile__( "xchg %%ebx, %6\n\t" "mov (%%ebx), %%ecx\n\t" "mov (%%ebx), %%ebx\n\t" "lock; cmpxchg8b (%3)\n\t" "xchg %%ebx, %6\n\t" "sete %0" : "=a" (equal), "=d" (dummy2), "=D" (dummy1) : "S" (var), "0" (((S_uint64 const *)oldVal)->lowValue), "1" (((S_uint64 const *)oldVal)->highValue), "D" (newVal) : "ecx", "cc", "memory" ); # else __asm__ __volatile__( "xchgl %%ebx, %6" "\n\t" // %3 is a register to make sure it cannot be %ebx-relative. "lock; cmpxchg8b (%3)" "\n\t" "xchgl %%ebx, %6" "\n\t" // Must come after restoring %ebx: %0 could be %ebx-relative. "sete %0" : "=qm" (equal), "=a" (dummy1), "=d" (dummy2) : "r" (var), "1" (((S_uint64 const *)oldVal)->lowValue), "2" (((S_uint64 const *)oldVal)->highValue), // Cannot use "m" here: 'newVal' is read-only. "r" (((S_uint64 const *)newVal)->lowValue), "c" (((S_uint64 const *)newVal)->highValue) : "cc", "memory" ); # endif # else __asm__ __volatile__( "lock; cmpxchg8b %0" "\n\t" "sete %1" # if VM_ASM_PLUS : "+m" (*var), # else : "=m" (*var), # endif "=qm" (equal), "=a" (dummy1), "=d" (dummy2) : "2" (((S_uint64 const *)oldVal)->lowValue), "3" (((S_uint64 const *)oldVal)->highValue), "b" (((S_uint64 const *)newVal)->lowValue), "c" (((S_uint64 const *)newVal)->highValue) : "cc" ); # endif #endif AtomicEpilogue(); return equal; } #elif _MSC_VER #if defined(__x86_64__) { return *oldVal == _InterlockedCompareExchange64((__int64 *)&var->value, (__int64)*newVal, (__int64)*oldVal); } #else #pragma warning(push) #pragma warning(disable : 4035) // disable no-return warning { __asm mov esi, var __asm mov edx, oldVal __asm mov ecx, newVal __asm mov eax, [edx]S_uint64.lowValue __asm mov edx, [edx]S_uint64.highValue __asm mov ebx, [ecx]S_uint64.lowValue __asm mov ecx, [ecx]S_uint64.highValue __asm lock cmpxchg8b [esi] __asm sete al __asm movzx eax, al // eax is the return value, this is documented to work - edward } #pragma warning(pop) #endif #else #error No compiler defined for Atomic_CMPXCHG64 #endif /* *----------------------------------------------------------------------------- * * Atomic_CMPXCHG32 -- * * Compare exchange: Read variable, if equal to oldVal, write newVal * * Results: * TRUE if equal, FALSE if not equal * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE Bool Atomic_CMPXCHG32(Atomic_uint32 *var, // IN/OUT uint32 oldVal, // IN uint32 newVal) // IN { #ifdef __GNUC__ Bool equal; uint32 dummy; __asm__ __volatile__( "lock; cmpxchgl %3, %0" "\n\t" "sete %1" # if VM_ASM_PLUS : "+m" (*var), "=qm" (equal), "=a" (dummy) : "r" (newVal), "2" (oldVal) # else : "=m" (*var), "=qm" (equal), "=a" (dummy) : /*"0" (*var), */ "r" (newVal), "2" (oldVal) # endif : "cc" ); AtomicEpilogue(); return equal; #else return (Atomic_ReadIfEqualWrite(var, oldVal, newVal) == oldVal); #endif } /* *----------------------------------------------------------------------------- * * Atomic_Read64 -- * * Read and return. * * Results: * The value of the atomic variable. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_Read64(Atomic_uint64 const *var) // IN #if defined(__x86_64__) { return var->value; } #elif defined(__GNUC__) && defined(__i386__) /* GCC on x86 */ { uint64 value; /* * Since cmpxchg8b will replace the contents of EDX:EAX with the * value in memory if there is no match, we need only execute the * instruction once in order to atomically read 64 bits from * memory. The only constraint is that ECX:EBX must have the same * value as EDX:EAX so that if the comparison succeeds. We * intentionally don't tell gcc that we are using ebx and ecx as we * don't modify them and do not care what value they store. */ __asm__ __volatile__( "mov %%ebx, %%eax" "\n\t" "mov %%ecx, %%edx" "\n\t" "lock; cmpxchg8b %1" : "=&A" (value) : "m" (*var) : "cc" ); AtomicEpilogue(); return value; } #elif _MSC_VER /* MSC (assume on x86 for now) */ # pragma warning(push) # pragma warning(disable : 4035) // disable no-return warning { __asm mov ecx, var __asm mov edx, ecx __asm mov eax, ebx __asm lock cmpxchg8b [ecx] // edx:eax is the return value; this is documented to work. --mann } # pragma warning(pop) #else # error No compiler defined for Atomic_Read64 #endif /* *---------------------------------------------------------------------- * * Atomic_FetchAndAdd64 -- * * Atomically adds a 64-bit integer to another * * Results: * Returns the old value just prior to the addition * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE uint64 Atomic_FetchAndAdd64(Atomic_uint64 *var, // IN/OUT uint64 addend) // IN { uint64 oldVal; uint64 newVal; do { oldVal = var->value; newVal = oldVal + addend; } while (!Atomic_CMPXCHG64(var, &oldVal, &newVal)); return oldVal; } /* *---------------------------------------------------------------------- * * Atomic_FetchAndInc64 -- * * Atomically increments a 64-bit integer * * Results: * Returns the old value just prior to incrementing * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE uint64 Atomic_FetchAndInc64(Atomic_uint64 *var) // IN/OUT { return Atomic_FetchAndAdd64(var, 1); } /* *---------------------------------------------------------------------- * * Atomic_FetchAndDec64 -- * * Atomically decrements a 64-bit integer * * Results: * Returns the old value just prior to decrementing * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE uint64 Atomic_FetchAndDec64(Atomic_uint64 *var) // IN/OUT { uint64 oldVal; uint64 newVal; do { oldVal = var->value; newVal = oldVal - 1; } while (!Atomic_CMPXCHG64(var, &oldVal, &newVal)); return oldVal; } /* *----------------------------------------------------------------------------- * * Atomic_ReadWrite64 -- * * Read followed by write * * Results: * The value of the atomic variable before the write. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadWrite64(Atomic_uint64 *var, // IN uint64 val) // IN { #if defined(__x86_64__) #if defined(__GNUC__) /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "xchgq %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) ); AtomicEpilogue(); return val; #elif _MSC_VER return _InterlockedExchange64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_ReadWrite64 #endif #else uint64 oldVal; do { oldVal = var->value; } while (!Atomic_CMPXCHG64(var, &oldVal, &val)); return oldVal; #endif } /* *----------------------------------------------------------------------------- * * Atomic_Write64 -- * * Write * * Results: * None. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Write64(Atomic_uint64 *var, // IN uint64 val) // IN { #if defined(__x86_64__) var->value = val; #else (void)Atomic_ReadWrite64(var, val); #endif } /* * Template code for the Atomic_ type and its operators. * * The cast argument is an intermedia type cast to make some * compilers stop complaining about casting uint32 <-> void *, * even though we only do it in the 32-bit case so they are always * the same size. So for val of type uint32, instead of * (void *)val, we have (void *)(uintptr_t)val. * The specific problem case is the Windows ddk compiler * (as used by the SVGA driver). -- edward */ #define MAKE_ATOMIC_TYPE(name, size, in, out, cast) \ typedef Atomic_uint ## size Atomic_ ## name; \ \ \ static INLINE out \ Atomic_Read ## name(Atomic_ ## name const *var) \ { \ return (out)(cast)Atomic_Read ## size(var); \ } \ \ \ static INLINE void \ Atomic_Write ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_Write ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE out \ Atomic_ReadWrite ## name(Atomic_ ## name *var, \ in val) \ { \ return (out)(cast)Atomic_ReadWrite ## size(var, \ (uint ## size)(cast)val); \ } \ \ \ static INLINE out \ Atomic_ReadIfEqualWrite ## name(Atomic_ ## name *var, \ in oldVal, \ in newVal) \ { \ return (out)(cast)Atomic_ReadIfEqualWrite ## size(var, \ (uint ## size)(cast)oldVal, (uint ## size)(cast)newVal); \ } \ \ \ static INLINE void \ Atomic_And ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_And ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE void \ Atomic_Or ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_Or ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE void \ Atomic_Xor ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_Xor ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE void \ Atomic_Add ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_Add ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE void \ Atomic_Sub ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_Sub ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE void \ Atomic_Inc ## name(Atomic_ ## name *var) \ { \ Atomic_Inc ## size(var); \ } \ \ \ static INLINE void \ Atomic_Dec ## name(Atomic_ ## name *var) \ { \ Atomic_Dec ## size(var); \ } \ \ \ static INLINE out \ Atomic_ReadOr ## name(Atomic_ ## name *var, \ in val) \ { \ return (out)(cast)Atomic_ReadOr ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE out \ Atomic_ReadAdd ## name(Atomic_ ## name *var, \ in val) \ { \ return (out)(cast)Atomic_ReadAdd ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE out \ Atomic_ReadInc ## name(Atomic_ ## name *var) \ { \ return (out)(cast)Atomic_ReadInc ## size(var); \ } \ \ \ static INLINE out \ Atomic_ReadDec ## name(Atomic_ ## name *var) \ { \ return (out)(cast)Atomic_ReadDec ## size(var); \ } /* * Since we use a macro to generate these definitions, it is hard to look for * them. So DO NOT REMOVE THIS COMMENT and keep it up-to-date. --hpreg * * Atomic_Ptr * Atomic_ReadPtr -- * Atomic_WritePtr -- * Atomic_ReadWritePtr -- * Atomic_ReadIfEqualWritePtr -- * Atomic_AndPtr -- * Atomic_OrPtr -- * Atomic_XorPtr -- * Atomic_AddPtr -- * Atomic_SubPtr -- * Atomic_IncPtr -- * Atomic_DecPtr -- * Atomic_ReadOrPtr -- * Atomic_ReadAddPtr -- * Atomic_ReadIncPtr -- * Atomic_ReadDecPtr -- * * Atomic_Int * Atomic_ReadInt -- * Atomic_WriteInt -- * Atomic_ReadWriteInt -- * Atomic_ReadIfEqualWriteInt -- * Atomic_AndInt -- * Atomic_OrInt -- * Atomic_XorInt -- * Atomic_AddInt -- * Atomic_SubInt -- * Atomic_IncInt -- * Atomic_DecInt -- * Atomic_ReadOrInt -- * Atomic_ReadAddInt -- * Atomic_ReadIncInt -- * Atomic_ReadDecInt -- */ #if defined(__x86_64__) MAKE_ATOMIC_TYPE(Ptr, 64, void const *, void *, uintptr_t) MAKE_ATOMIC_TYPE(Int, 64, int, int, int) #else MAKE_ATOMIC_TYPE(Ptr, 32, void const *, void *, uintptr_t) MAKE_ATOMIC_TYPE(Int, 32, int, int, int) #endif /* *----------------------------------------------------------------------------- * * Atomic_MFence -- * * Implements mfence in terms of a lock xor. The reason for implementing * our own mfence is that not all of our supported cpus have an assembly * mfence (P3, Athlon). We put it here to avoid duplicating code which is * also why it is prefixed with "Atomic_". * * Results: * None. * * Side effects: * Cause loads and stores prior to this to be globally * visible. * *----------------------------------------------------------------------------- */ static INLINE void Atomic_MFence(void) { Atomic_uint32 fence; Atomic_Xor(&fence, 0x1); } #endif // ifndef _ATOMIC_H_ vmci-only/includeCheck.h0000444000000000000000000001024612025726724014233 0ustar rootroot/********************************************************* * Copyright (C) 1998 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * includeCheck.h -- * * Restrict include file use. * * In every .h file, define one or more of these * * INCLUDE_ALLOW_VMX * INCLUDE_ALLOW_USERLEVEL * INCLUDE_ALLOW_VMMEXT * INCLUDE_ALLOW_VMCORE * INCLUDE_ALLOW_MODULE * INCLUDE_ALLOW_VMNIXMOD * INCLUDE_ALLOW_VMKERNEL * INCLUDE_ALLOW_DISTRIBUTE * INCLUDE_ALLOW_VMK_MODULE * INCLUDE_ALLOW_VMKDRIVERS * INCLUDE_ALLOW_VMIROM * * Then include this file. * * Any file that has INCLUDE_ALLOW_DISTRIBUTE defined will potentially * be distributed in source form along with GPLed code. Ensure * that this is acceptable. */ /* * Declare a VMCORE-only variable to help classify object * files. The variable goes in the common block and does * not create multiple definition link-time conflicts. */ #if defined VMCORE && defined VMX86_DEVEL && defined VMX86_DEBUG && \ defined linux && !defined MODULE && \ !defined COMPILED_WITH_VMCORE #define COMPILED_WITH_VMCORE compiled_with_vmcore #ifdef ASM .comm compiled_with_vmcore, 0 #else asm(".comm compiled_with_vmcore, 0"); #endif /* ASM */ #endif #if defined VMCORE && \ !(defined VMX86_VMX || defined VMM || \ defined MONITOR_APP || defined VMMON) #error "Makefile problem: VMCORE without VMX86_VMX or \ VMM or MONITOR_APP or MODULE." #endif #if defined VMCORE && !defined INCLUDE_ALLOW_VMCORE #error "The surrounding include file is not allowed in vmcore." #endif #undef INCLUDE_ALLOW_VMCORE #if defined VMX86_VMX && !defined VMCORE && \ !(defined INCLUDE_ALLOW_VMX || defined INCLUDE_ALLOW_USERLEVEL) #error "The surrounding include file is not allowed in the VMX." #endif #undef INCLUDE_ALLOW_VMX #if defined USERLEVEL && !defined VMX86_VMX && !defined VMCORE && \ !defined INCLUDE_ALLOW_USERLEVEL #error "The surrounding include file is not allowed at userlevel." #endif #undef INCLUDE_ALLOW_USERLEVEL #if defined VMM && !defined VMCORE && \ !defined INCLUDE_ALLOW_VMMEXT #error "The surrounding include file is not allowed in the monitor." #endif #undef INCLUDE_ALLOW_VMMEXT #if defined MODULE && !defined VMKERNEL_MODULE && !defined VMNIXMOD && \ !defined VMMON && !defined INCLUDE_ALLOW_MODULE #error "The surrounding include file is not allowed in driver modules." #endif #undef INCLUDE_ALLOW_MODULE #if defined VMMON && !defined INCLUDE_ALLOW_VMMON #error "The surrounding include file is not allowed in vmmon." #endif #undef INCLUDE_ALLOW_VMMON #if defined VMKERNEL && !defined INCLUDE_ALLOW_VMKERNEL #error "The surrounding include file is not allowed in the vmkernel." #endif #undef INCLUDE_ALLOW_VMKERNEL #if defined GPLED_CODE && !defined INCLUDE_ALLOW_DISTRIBUTE #error "The surrounding include file is not allowed in GPL code." #endif #undef INCLUDE_ALLOW_DISTRIBUTE #if defined VMKERNEL_MODULE && !defined VMKERNEL && \ !defined INCLUDE_ALLOW_VMK_MODULE && !defined INCLUDE_ALLOW_VMKDRIVERS #error "The surrounding include file is not allowed in vmkernel modules." #endif #undef INCLUDE_ALLOW_VMK_MODULE #undef INCLUDE_ALLOW_VMKDRIVERS #if defined VMNIXMOD && !defined INCLUDE_ALLOW_VMNIXMOD #ifndef VMNIXMOD_VM #error "The surrounding include file is not allowed in vmnixmod." #endif #endif #undef INCLUDE_ALLOW_VMNIXMOD #if defined VMIROM && ! defined INCLUDE_ALLOW_VMIROM #error "The surrounding include file is not allowed in vmirom." #endif #undef INCLUDE_ALLOW_VMIROM vmci-only/vmware_pack_init.h0000444000000000000000000000364412025726724015200 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __VMWARE_PACK_INIT_H__ # define __VMWARE_PACK_INIT_H__ /* * vmware_pack_init.h -- * * Platform-independent code to make the compiler pack (i.e. have them * occupy the smallest possible space) structure definitions. The following * constructs are known to work --hpreg * * #include "vmware_pack_begin.h" * struct foo { * ... * } * #include "vmware_pack_end.h" * ; * * typedef * #include "vmware_pack_begin.h" * struct foo { * ... * } * #include "vmware_pack_end.h" * foo; */ #ifdef _MSC_VER /* * MSVC 6.0 emits warning 4103 when the pack push and pop pragma pairing is * not balanced within 1 included file. That is annoying because our scheme * is based on the pairing being balanced between 2 included files. * * So we disable this warning, but this is safe because the compiler will also * emit warning 4161 when there is more pops than pushes within 1 main * file --hpreg */ # pragma warning(disable:4103) #elif __GNUC__ #else # error Compiler packing... #endif #endif /* __VMWARE_PACK_INIT_H__ */ vmci-only/vmware_pack_begin.h0000444000000000000000000000244412025726724015316 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmware_pack_begin.h -- * * Begin of structure packing. See vmware_pack_init.h for details. * * Note that we do not use the following construct in this include file, * because we want to emit the code every time the file is included --hpreg * * #ifndef foo * # define foo * ... * #endif * */ #include "vmware_pack_init.h" #ifdef _MSC_VER # pragma pack(push, 1) #elif __GNUC__ #else # error Compiler packing... #endif vmci-only/vmware_pack_end.h0000444000000000000000000000247012025726724014777 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmware_pack_end.h -- * * End of structure packing. See vmware_pack_init.h for details. * * Note that we do not use the following construct in this include file, * because we want to emit the code every time the file is included --hpreg * * #ifndef foo * # define foo * ... * #endif * */ #include "vmware_pack_init.h" #ifdef _MSC_VER # pragma pack(pop) #elif __GNUC__ __attribute__((__packed__)) #else # error Compiler packing... #endif vmci-only/circList.h0000444000000000000000000002400112025726724013420 0ustar rootroot/********************************************************* * Copyright (C) 1998 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * circList.h -- * * macros, prototypes and struct definitions for double-linked * circular lists. */ #ifndef _CIRCLIST_H_ #define _CIRCLIST_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" #include "vmware.h" typedef struct ListItem { struct ListItem *prev; struct ListItem *next; } ListItem; /* A list with no elements is a null pointer. */ #define LIST_ITEM_DEF(name) \ ListItem * name = NULL #define LIST_EMPTY(l) ((l) == NULL) /* initialize list item */ #define INIT_LIST_ITEM(p) \ do { \ (p)->prev = (p)->next = (p); \ } while (0) /* check if initialized */ #define IS_LIST_ITEM_INITIALIZED(li) \ (((li) == (li)->prev) && ((li) == (li)->next)) /* return first element in the list */ #define LIST_FIRST(l) (l) #define LIST_FIRST_CHK(l) (l) /* return last element in the list */ #define LIST_LAST(l) ((l)->prev) #define LIST_LAST_CHK(l) (LIST_EMPTY(l) ? NULL : LIST_LAST(l)) /* * LIST_CONTAINER - get the struct for this entry (like list_entry) * @ptr: the &struct ListItem pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list struct within the struct. */ #define LIST_CONTAINER(ptr, type, member) \ ((type *)((char *)(ptr) - offsetof(type, member))) /* * delete item from the list */ #define LIST_DEL DelListItem /* * link two lists together */ #define LIST_SPLICE SpliceLists /* * Split a list into two lists */ #define LIST_SPLIT SplitLists /* * Add item to front of stack. List pointer points to new head. */ #define LIST_PUSH PushListItem /* * Add item at back of queue. List pointer only changes if list was empty. */ #define LIST_QUEUE QueueListItem /* * Get the list size. */ #define LIST_SIZE GetListSize /* * LIST_SCAN_FROM scans the list from "from" up until "until". * The loop variable p should not be destroyed in the process. * "from" is an element in the list where to start scanning. * "until" is the element where search should stop. * member is the field to use for the search - either "next" or "prev". */ #define LIST_SCAN_FROM(p, from, until, member) \ for (p = (from); (p) != NULL; \ (p) = (((p)->member == (until)) ? NULL : (p)->member)) /* scan the entire list (non-destructively) */ #define LIST_SCAN(p, l) \ LIST_SCAN_FROM(p, LIST_FIRST(l), LIST_FIRST(l), next) /* scan a list backward from last element to first (non-destructively) */ #define LIST_SCAN_BACK(p, l) \ LIST_SCAN_FROM(p, LIST_LAST_CHK(l), LIST_LAST(l), prev) /* scan the entire list where loop element may be destroyed */ #define LIST_SCAN_SAFE(p, pn, l) \ if (!LIST_EMPTY(l)) \ for (p = (l), (pn) = NextListItem(p, l); (p) != NULL; \ (p) = (pn), (pn) = NextListItem(p, l)) /* scan the entire list backwards where loop element may be destroyed */ #define LIST_SCAN_BACK_SAFE(p, pn, l) \ if (!LIST_EMPTY(l)) \ for (p = LIST_LAST(l), (pn) = PrevListItem(p, l); (p) != NULL; \ (p) = (pn), (pn) = PrevListItem(p, l)) /* function definitions */ /* *---------------------------------------------------------------------- * * NextListItem -- * * Returns the next member of a doubly linked list, or NULL if last. * Assumes: p is member of the list headed by head. * * Result * If head or p is NULL, return NULL. Otherwise, * next list member (or null if last). * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE ListItem * NextListItem(ListItem *p, // IN ListItem *head) // IN { if (head == NULL || p == NULL) { return NULL; } /* both p and head are non-null */ p = p->next; return p == head ? NULL : p; } /* *---------------------------------------------------------------------- * * PrevListItem -- * * Returns the prev member of a doubly linked list, or NULL if first. * Assumes: p is member of the list headed by head. * * Result * If head or prev is NULL, return NULL. Otherwise, * prev list member (or null if first). * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE ListItem * PrevListItem(ListItem *p, // IN ListItem *head) // IN { if (head == NULL || p == NULL) { return NULL; } /* both p and head are non-null */ return p == head ? NULL : p->prev; } /* *---------------------------------------------------------------------- * * DelListItem -- * * Deletes a member of a doubly linked list, possibly modifies the * list header itself. * Assumes neither p nor headp is null and p is a member of *headp. * * Result * None * * Side effects: * Modifies *headp. * *---------------------------------------------------------------------- */ static INLINE void DelListItem(ListItem *p, // IN ListItem **headp) // IN/OUT { ListItem *next; ASSERT(p); ASSERT(headp); next = p->next; if (p == next) { *headp = NULL; } else { next->prev = p->prev; p->prev->next = next; if (*headp == p) { *headp = next; } } } /* *---------------------------------------------------------------------- * * QueueListItem -- * * Adds a new member to the back of a doubly linked list (queue) * Assumes neither p nor headp is null and p is not a member of *headp. * * Result * None * * Side effects: * Modifies *headp. * *---------------------------------------------------------------------- */ static INLINE void QueueListItem(ListItem *p, // IN ListItem **headp) // IN/OUT { ListItem *head; head = *headp; if (LIST_EMPTY(head)) { INIT_LIST_ITEM(p); *headp = p; } else { p->prev = head->prev; p->next = head; p->prev->next = p; head->prev = p; } } /* *---------------------------------------------------------------------- * * PushListItem -- * * Adds a new member to the front of a doubly linked list (stack) * Assumes neither p nor headp is null and p is not a member of *headp. * * Result * None * * Side effects: * Modifies *headp. * *---------------------------------------------------------------------- */ static INLINE void PushListItem(ListItem *p, // IN ListItem **headp) // IN/OUT { QueueListItem(p, headp); *headp = p; } /* *---------------------------------------------------------------------- * * SpliceLists -- * * Make a single list {l1 l2} from {l1} and {l2} and return it. * It is okay for one or both lists to be NULL. * No checking is done. It is assumed that l1 and l2 are two * distinct lists. * * Result * A list { l1 l2 }. * * Side effects: * Modifies l1 and l2 list pointers. * *---------------------------------------------------------------------- */ static INLINE ListItem * SpliceLists(ListItem *l1, // IN ListItem *l2) // IN { ListItem *l1Last, *l2Last; if (LIST_EMPTY(l1)) { return l2; } if (LIST_EMPTY(l2)) { return l1; } l1Last = l1->prev; /* last elem of l1 */ l2Last = l2->prev; /* last elem of l2 */ /* * l1 -> ... -> l1Last l2 -> ... l2Last */ l1Last->next = l2; l2->prev = l1Last; l1->prev = l2Last; l2Last->next = l1; return l1; } /* *---------------------------------------------------------------------- * * SplitLists -- * * Make a list l = {l1 l2} into two separate lists {l1} and {l2}, where: * l = { ... x -> p -> ... } split into: * l1 = { ... -> x } * l2 = { p -> ... } * Assumes neither p nor l is null and p is a member of l. * If p is the first element of l, then l1 will be NULL. * * Result * None. * * Side effects: * Sets *l1p and *l2p to the resulting two lists. * Modifies l's pointers. * *---------------------------------------------------------------------- */ static INLINE void SplitLists(ListItem *p, // IN ListItem *l, // IN ListItem **l1p, // OUT ListItem **l2p) // OUT { ListItem *last; if (p == LIST_FIRST(l)) { /* first element */ *l1p = NULL; *l2p = l; return; } last = l->prev; *l1p = l; p->prev->next = l; l->prev = p->prev; *l2p = p; p->prev = last; last->next = p; } /* *---------------------------------------------------------------------- * * GetListSize -- * * Return the number of items in the list. * * Result: * The number of items in the list. * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE int GetListSize(ListItem *head) // IN { ListItem *li; int ret = 0; LIST_SCAN(li, head) { ret++; } return ret; } #endif /* _CIRCLIST_H_ */ vmci-only/pgtbl.h0000444000000000000000000002232612025726724012764 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __PGTBL_H__ # define __PGTBL_H__ #include "compat_highmem.h" #include "compat_pgtable.h" #include "compat_spinlock.h" #include "compat_page.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 11) # define compat_active_mm mm #else # define compat_active_mm active_mm #endif /* *----------------------------------------------------------------------------- * * PgtblPte2MPN -- * * Returns the page structure associated to a Page Table Entry. * * This function is not allowed to schedule() because it can be called while * holding a spinlock --hpreg * * Results: * INVALID_MPN on failure * mpn on success * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE MPN PgtblPte2MPN(pte_t *pte) // IN { if (pte_present(*pte) == 0) { return INVALID_MPN; } return pte_pfn(*pte); } /* *----------------------------------------------------------------------------- * * PgtblPte2Page -- * * Returns the page structure associated to a Page Table Entry. * * This function is not allowed to schedule() because it can be called while * holding a spinlock --hpreg * * Results: * The page structure if the page table entry points to a physical page * NULL if the page table entry does not point to a physical page * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE struct page * PgtblPte2Page(pte_t *pte) // IN { if (pte_present(*pte) == 0) { return NULL; } return compat_pte_page(*pte); } /* *----------------------------------------------------------------------------- * * PgtblPGD2PTELocked -- * * Walks through the hardware page tables to try to find the pte * associated to a virtual address. * * Results: * pte. Caller must call pte_unmap if valid pte returned. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE pte_t * PgtblPGD2PTELocked(compat_pgd_t *pgd, // IN: PGD to start with VA addr) // IN: Address in the virtual address // space of that process { compat_pud_t *pud; pmd_t *pmd; pte_t *pte; if (compat_pgd_present(*pgd) == 0) { return NULL; } pud = compat_pud_offset(pgd, addr); if (compat_pud_present(*pud) == 0) { return NULL; } pmd = pmd_offset_map(pud, addr); if (pmd_present(*pmd) == 0) { pmd_unmap(pmd); return NULL; } pte = pte_offset_map(pmd, addr); pmd_unmap(pmd); return pte; } /* *----------------------------------------------------------------------------- * * PgtblVa2PTELocked -- * * Walks through the hardware page tables to try to find the pte * associated to a virtual address. * * Results: * pte. Caller must call pte_unmap if valid pte returned. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE pte_t * PgtblVa2PTELocked(struct mm_struct *mm, // IN: Mm structure of a process VA addr) // IN: Address in the virtual address // space of that process { return PgtblPGD2PTELocked(compat_pgd_offset(mm, addr), addr); } /* *----------------------------------------------------------------------------- * * PgtblVa2MPNLocked -- * * Retrieve MPN for a given va. * * Caller must call pte_unmap if valid pte returned. The mm->page_table_lock * must be held, so this function is not allowed to schedule() --hpreg * * Results: * INVALID_MPN on failure * mpn on success * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE MPN PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process VA addr) // IN: Address in the virtual address { pte_t *pte; pte = PgtblVa2PTELocked(mm, addr); if (pte != NULL) { MPN mpn = PgtblPte2MPN(pte); pte_unmap(pte); return mpn; } return INVALID_MPN; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) /* *----------------------------------------------------------------------------- * * PgtblKVa2MPNLocked -- * * Retrieve MPN for a given kernel va. * * Caller must call pte_unmap if valid pte returned. The mm->page_table_lock * must be held, so this function is not allowed to schedule() --hpreg * * Results: * INVALID_MPN on failure * mpn on success * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE MPN PgtblKVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a caller VA addr) // IN: Address in the virtual address { pte_t *pte; pte = PgtblPGD2PTELocked(compat_pgd_offset_k(mm, addr), addr); if (pte != NULL) { MPN mpn = PgtblPte2MPN(pte); pte_unmap(pte); return mpn; } return INVALID_MPN; } #endif /* *----------------------------------------------------------------------------- * * PgtblVa2PageLocked -- * * Return the "page" struct for a given va. * * Results: * struct page or NULL. The mm->page_table_lock must be held, so this * function is not allowed to schedule() --hpreg * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE struct page * PgtblVa2PageLocked(struct mm_struct *mm, // IN: Mm structure of a process VA addr) // IN: Address in the virtual address { pte_t *pte; pte = PgtblVa2PTELocked(mm, addr); if (pte != NULL) { struct page *page = PgtblPte2Page(pte); pte_unmap(pte); return page; } else { return NULL; } } /* *----------------------------------------------------------------------------- * * PgtblVa2MPN -- * * Walks through the hardware page tables of the current process to try to * find the page structure associated to a virtual address. * * Results: * Same as PgtblVa2MPNLocked() * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE int PgtblVa2MPN(VA addr) // IN { struct mm_struct *mm; MPN mpn; /* current->mm is NULL for kernel threads, so use active_mm. */ mm = current->compat_active_mm; if (compat_get_page_table_lock(mm)) { spin_lock(compat_get_page_table_lock(mm)); } mpn = PgtblVa2MPNLocked(mm, addr); if (compat_get_page_table_lock(mm)) { spin_unlock(compat_get_page_table_lock(mm)); } return mpn; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) /* *----------------------------------------------------------------------------- * * PgtblKVa2MPN -- * * Walks through the hardware page tables of the current process to try to * find the page structure associated to a virtual address. * * Results: * Same as PgtblVa2MPNLocked() * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE int PgtblKVa2MPN(VA addr) // IN { struct mm_struct *mm; MPN mpn; mm = current->compat_active_mm; if (compat_get_page_table_lock(mm)) { spin_lock(compat_get_page_table_lock(mm)); } mpn = PgtblKVa2MPNLocked(mm, addr); if (compat_get_page_table_lock(mm)) { spin_unlock(compat_get_page_table_lock(mm)); } return mpn; } #endif /* *----------------------------------------------------------------------------- * * PgtblVa2Page -- * * Walks through the hardware page tables of the current process to try to * find the page structure associated to a virtual address. * * Results: * Same as PgtblVa2PageLocked() * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE struct page * PgtblVa2Page(VA addr) // IN { struct mm_struct *mm; struct page *page; mm = current->compat_active_mm; if (compat_get_page_table_lock(mm)) { spin_lock(compat_get_page_table_lock(mm)); } page = PgtblVa2PageLocked(mm, addr); if (compat_get_page_table_lock(mm)) { spin_unlock(compat_get_page_table_lock(mm)); } return page; } #endif /* __PGTBL_H__ */ vmci-only/compat_completion.h0000444000000000000000000001371012025726724015365 0ustar rootroot/********************************************************* * Copyright (C) 2004 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_COMPLETION_H__ # define __COMPAT_COMPLETION_H__ /* * The kernel's completion objects were made available for module use in 2.4.9. * * Between 2.4.0 and 2.4.9, we implement completions on our own using * waitqueues and counters. This was done so that we could safely support * functions like complete_all(), which cannot be implemented using semaphores. * * Prior to that, the waitqueue API is substantially different, and since none * of our modules that are built against older kernels need complete_all(), * we fallback on a simple semaphore-based implementation. */ /* * Native completions. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 9) #include #define compat_completion struct completion #define compat_init_completion(comp) init_completion(comp) #define COMPAT_DECLARE_COMPLETION DECLARE_COMPLETION #define compat_wait_for_completion(comp) wait_for_completion(comp) #define compat_complete(comp) complete(comp) /* complete_all() was exported in 2.6.6. */ # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6) # include "compat_wait.h" # include "compat_list.h" # include "compat_spinlock.h" # include "compat_sched.h" # define compat_complete_all(x) \ ({ \ struct list_head *currLinks; \ spin_lock(&(x)->wait.lock); \ (x)->done += UINT_MAX/2; \ \ list_for_each(currLinks, &(x)->wait.task_list) { \ wait_queue_t *currQueue = list_entry(currLinks, wait_queue_t, task_list); \ wake_up_process(currQueue->task); \ } \ spin_unlock(&(x)->wait.lock); \ }) # else # define compat_complete_all complete_all # endif /* * Completions via waitqueues. */ #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0) /* * Kernel completions in 2.4.9 and beyond use a counter and a waitqueue, and * our implementation is quite similar. Because __wake_up_common() is not * exported, our implementations of compat_complete() and compat_complete_all() * are somewhat racy: the counter is incremented outside of the waitqueue's * lock. * * As a result, our completion cannot guarantee in-order wake ups. For example, * suppose thread A is entering compat_complete(), thread B is sleeping inside * compat_wait_for_completion(), and thread C is just now entering * compat_wait_for_completion(). If Thread A is scheduled first and increments * the counter, then gets swapped out, thread C may get scheduled and will * quickly go through compat_wait_for_completion() (since done != 0) while * thread B continues to sleep, even though thread B should have been the one * to wake up. */ #include #include "compat_sched.h" #include "compat_list.h" #include // for lock_kernel()/unlock_kernel() #include "compat_wait.h" typedef struct compat_completion { unsigned int done; wait_queue_head_t wq; } compat_completion; #define compat_init_completion(comp) do { \ (comp)->done = 0; \ init_waitqueue_head(&(comp)->wq); \ } while (0) #define COMPAT_DECLARE_COMPLETION(comp) \ compat_completion comp = { \ .done = 0, \ .wq = __WAIT_QUEUE_HEAD_INITIALIZER((comp).wq), \ } /* * Locking and unlocking the kernel lock here ensures that the thread * is no longer running in module code: compat_complete_and_exit * performs the sequence { lock_kernel(); up(comp); compat_exit(); }, with * the final unlock_kernel performed implicitly by the resident kernel * in do_exit. */ #define compat_wait_for_completion(comp) do { \ spin_lock_irq(&(comp)->wq.lock); \ if (!(comp)->done) { \ DECLARE_WAITQUEUE(wait, current); \ wait.flags |= WQ_FLAG_EXCLUSIVE; \ __add_wait_queue_tail(&(comp)->wq, &wait); \ do { \ __set_current_state(TASK_UNINTERRUPTIBLE); \ spin_unlock_irq(&(comp)->wq.lock); \ schedule(); \ spin_lock_irq(&(comp)->wq.lock); \ } while (!(comp)->done); \ __remove_wait_queue(&(comp)->wq, &wait); \ } \ (comp)->done--; \ spin_unlock_irq(&(comp)->wq.lock); \ lock_kernel(); \ unlock_kernel(); \ } while (0) /* XXX: I don't think I need to touch the BKL. */ #define compat_complete(comp) do { \ unsigned long flags; \ spin_lock_irqsave(&(comp)->wq.lock, flags); \ (comp)->done++; \ spin_unlock_irqrestore(&(comp)->wq.lock, flags); \ wake_up(&(comp)->wq); \ } while (0) #define compat_complete_all(comp) do { \ unsigned long flags; \ spin_lock_irqsave(&(comp)->wq.lock, flags); \ (comp)->done += UINT_MAX / 2; \ spin_unlock_irqrestore(&(comp)->wq.lock, flags); \ wake_up_all(&(comp)->wq); \ } while (0) /* * Completions via semaphores. */ #else #include "compat_semaphore.h" #define compat_completion struct semaphore #define compat_init_completion(comp) init_MUTEX_LOCKED(comp) #define COMPAT_DECLARE_COMPLETION(comp) DECLARE_MUTEX_LOCKED(comp) #define compat_wait_for_completion(comp) do { \ down(comp); \ lock_kernel(); \ unlock_kernel(); \ } while (0) #define compat_complete(comp) up(comp) #endif #endif /* __COMPAT_COMPLETION_H__ */ vmci-only/compat_file.h0000444000000000000000000000352312025726724014134 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_FILE_H__ # define __COMPAT_FILE_H__ /* The fput() API is modified in 2.2.0 --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0) # define compat_fput(_file) fput(_file) #else # define compat_fput(_file) fput(_file, (_file)->f_inode) #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0) # define compat_get_file(_file) get_file(_file) # define compat_file_count(_file) file_count(_file) #else # define compat_get_file(_file) (_file)->f_count++ # define compat_file_count(_file) (_file)->f_count #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 4) # define compat_filp_close(_file, _files) filp_close(_file, _files) #else static inline void compat_filp_close(struct file* filp, fl_owner_t files) { if (filp->f_op && filp->f_op->flush) { filp->f_op->flush(filp); } /* * Hopefully there are no locks to release on this filp. * locks_remove_posix is not exported so we cannot use it... */ fput(filp); } #endif #endif /* __COMPAT_FILE_H__ */ vmci-only/compat_highmem.h0000444000000000000000000000255712025726724014641 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_HIGHMEM_H__ # define __COMPAT_HIGHMEM_H__ /* * BIGMEM (4 GB) support appeared in 2.3.16: kmap() API added * HIGHMEM (4 GB + 64 GB) support appeared in 2.3.23: kmap() API modified * In 2.3.27, kmap() API modified again * * --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 27) # include #else /* For page_address --hpreg */ # include # define kmap(_page) (void*)page_address(_page) # define kunmap(_page) #endif #endif /* __COMPAT_HIGHMEM_H__ */ vmci-only/compat_init.h0000444000000000000000000000235512025726724014162 0ustar rootroot/********************************************************* * Copyright (C) 1999 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * compat_init.h: Initialization compatibility wrappers. */ #ifndef __COMPAT_INIT_H__ #define __COMPAT_INIT_H__ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0) #include #endif #ifndef module_init #define module_init(x) int init_module(void) { return x(); } #endif #ifndef module_exit #define module_exit(x) void cleanup_module(void) { x(); } #endif #endif /* __COMPAT_INIT_H__ */ vmci-only/compat_interrupt.h0000444000000000000000000000357312025726724015256 0ustar rootroot/********************************************************* * Copyright (C) 2003 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_INTERRUPT_H__ # define __COMPAT_INTERRUPT_H__ #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 69) /* * We cannot just define irqreturn_t, as some 2.4.x kernels have * typedef void irqreturn_t; for "increasing" backward compatibility. */ typedef void compat_irqreturn_t; #define COMPAT_IRQ_NONE #define COMPAT_IRQ_HANDLED #define COMPAT_IRQ_RETVAL(x) #else typedef irqreturn_t compat_irqreturn_t; #define COMPAT_IRQ_NONE IRQ_NONE #define COMPAT_IRQ_HANDLED IRQ_HANDLED #define COMPAT_IRQ_RETVAL(x) IRQ_RETVAL(x) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) #define COMPAT_IRQF_DISABLED SA_INTERRUPT #define COMPAT_IRQF_SHARED SA_SHIRQ #else #define COMPAT_IRQF_DISABLED IRQF_DISABLED #define COMPAT_IRQF_SHARED IRQF_SHARED #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) #define COMPAT_IRQ_HANDLER_ARGS(irq, devp) (int irq, void *devp, struct pt_regs *regs) #else #define COMPAT_IRQ_HANDLER_ARGS(irq, devp) (int irq, void *devp) #endif #endif /* __COMPAT_INTERRUPT_H__ */ vmci-only/compat_ioport.h0000444000000000000000000000404112025726724014525 0ustar rootroot/********************************************************* * Copyright (C) 2003 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_IOPORT_H__ # define __COMPAT_IOPORT_H__ #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) static inline void * compat_request_region(unsigned long start, unsigned long len, const char *name) { if (check_region(start, len)) { return NULL; } request_region(start, len, name); return (void*)1; } #else #define compat_request_region(start, len, name) request_region(start, len, name) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 7) /* mmap io support starts from 2.3.7, fail the call for kernel prior to that */ static inline void * compat_request_mem_region(unsigned long start, unsigned long len, const char *name) { return NULL; } static inline void compat_release_mem_region(unsigned long start, unsigned long len) { return; } #else #define compat_request_mem_region(start, len, name) request_mem_region(start, len, name) #define compat_release_mem_region(start, len) release_mem_region(start, len) #endif /* these two macro defs are needed by compat_pci_request_region */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 15) # define IORESOURCE_IO 0x00000100 # define IORESOURCE_MEM 0x00000200 #endif #endif /* __COMPAT_IOPORT_H__ */ vmci-only/compat_kernel.h0000444000000000000000000000514312025726724014475 0ustar rootroot/********************************************************* * Copyright (C) 2004 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_KERNEL_H__ # define __COMPAT_KERNEL_H__ #include #include /* * container_of was introduced in 2.5.28 but it's easier to check like this. */ #ifndef container_of #define container_of(ptr, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) #endif /* * wait_for_completion and friends did not exist before 2.4.9. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 9) #define compat_complete_and_exit(comp, status) complete_and_exit(comp, status) #else #include "compat_completion.h" /* * Used by _syscallX macros. Note that this is global variable, so * do not rely on its contents too much. As exit() is only function * we use, and we never check return value from exit(), we have * no problem... */ extern int errno; /* * compat_exit() provides an access to the exit() function. It must * be named compat_exit(), as exit() (with different signature) is * provided by x86-64, arm and other (but not by i386). */ #define __NR_compat_exit __NR_exit static inline _syscall1(int, compat_exit, int, exit_code); /* * See compat_wait_for_completion in compat_completion.h. * compat_exit implicitly performs an unlock_kernel, in resident code, * ensuring that the thread is no longer running in module code when the * module is unloaded. */ #define compat_complete_and_exit(comp, status) do { \ lock_kernel(); \ compat_complete(comp); \ compat_exit(status); \ } while (0) #endif /* * vsnprintf became available in 2.4.10. For older kernels, just fall back on * vsprintf. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10) #define vsnprintf(str, size, fmt, args) vsprintf(str, fmt, args) #endif #endif /* __COMPAT_KERNEL_H__ */ vmci-only/compat_list.h0000444000000000000000000000357512025726724014177 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_LIST_H__ # define __COMPAT_LIST_H__ #include /* * list_add_tail is with us since 2.4.0, or something like that. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) #define list_add_tail(newe, head) do { \ struct list_head *__h = (head); \ __list_add((newe), __h->prev, __h); \ } while (0) #endif /* * list_for_each_safe() showed up in 2.4.10, but it may be backported so we * just check for its existence. */ #ifndef list_for_each_safe # define list_for_each_safe(pos, n, head) \ for (pos = (head)->next, n = pos->next; pos != (head); \ pos = n, n = pos->next) #endif /* * list_for_each_entry() showed up in 2.4.20, but it may be backported so we * just check for its existence. */ #ifndef list_for_each_entry # define list_for_each_entry(pos, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) #endif #endif /* __COMPAT_LIST_H__ */ vmci-only/compat_mm.h0000444000000000000000000001020712025726724013623 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_MM_H__ # define __COMPAT_MM_H__ #include /* The get_page() API appeared in 2.3.7 --hpreg */ /* Sometime during development it became function instead of macro --petr */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) && !defined(get_page) # define get_page(_page) atomic_inc(&(_page)->count) /* The __free_page() API is exported in 2.1.67 --hpreg */ # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 67) # define put_page __free_page # else # include "compat_page.h" # define page_to_phys(_page) (page_to_pfn(_page) << PAGE_SHIFT) # define put_page(_page) free_page(page_to_phys(_page)) # endif #endif /* page_count() is 2.4.0 invention. Unfortunately unavailable in some RedHat * kernels (for example 2.4.21-4-RHEL3). */ /* It is function since 2.6.0, and hopefully RedHat will not play silly games * with mm_inline.h again... */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(page_count) # define page_count(page) atomic_read(&(page)->count) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) # define compat_vm_pgoff(vma) ((vma)->vm_offset >> PAGE_SHIFT) static inline unsigned long compat_do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff) { unsigned long ret = -EINVAL; if (pgoff < 1 << (32 - PAGE_SHIFT)) { ret = do_mmap(file, addr, len, prot, flag, pgoff << PAGE_SHIFT); } return ret; } #else # define compat_vm_pgoff(vma) (vma)->vm_pgoff # ifdef VMW_SKAS_MMAP # define compat_do_mmap_pgoff(f, a, l, p, g, o) \ do_mmap_pgoff(current->mm, f, a, l, p, g, o) # else # define compat_do_mmap_pgoff(f, a, l, p, g, o) \ do_mmap_pgoff(f, a, l, p, g, o) # endif #endif /* 2.2.x uses 0 instead of some define */ #ifndef NOPAGE_SIGBUS #define NOPAGE_SIGBUS (0) #endif /* 2.2.x does not have HIGHMEM support */ #ifndef GFP_HIGHUSER #define GFP_HIGHUSER (GFP_USER) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) #include "compat_page.h" static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order) { unsigned long addr; addr = __get_free_pages(gfp_mask, order); if (!addr) { return NULL; } return virt_to_page(addr); } #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #endif /* * In 2.4.14, the logic behind the UnlockPage macro was moved to the * unlock_page() function. Later (in 2.5.12), the UnlockPage macro was removed * altogether, and nowadays everyone uses unlock_page(). */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 14) #define compat_unlock_page(page) UnlockPage(page) #else #define compat_unlock_page(page) unlock_page(page) #endif /* * In 2.4.10, vmtruncate was changed from returning void to returning int. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10) #define compat_vmtruncate(inode, size) \ ({ \ int result = 0; \ vmtruncate(inode, size); \ result; \ }) #else #define compat_vmtruncate(inode, size) vmtruncate(inode, size) #endif #endif /* __COMPAT_MM_H__ */ vmci-only/compat_module.h0000444000000000000000000000437212025726724014505 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * compat_module.h -- */ #ifndef __COMPAT_MODULE_H__ # define __COMPAT_MODULE_H__ #include /* * Modules wishing to use the GPL license are required to include a * MODULE_LICENSE definition in their module source as of 2.4.10. */ #ifndef MODULE_LICENSE #define MODULE_LICENSE(license) #endif /* * To make use of our own home-brewed MODULE_INFO, we need macros to * concatenate two expressions to "__mod_", and and to convert an * expression into a string. I'm sure we've got these in our codebase, * but I'd rather not introduce such a dependency in a compat header. */ #ifndef __module_cat #define __module_cat_1(a, b) __mod_ ## a ## b #define __module_cat(a, b) __module_cat_1(a, b) #endif #ifndef __stringify #define __stringify_1(x) #x #define __stringify(x) __stringify_1(x) #endif /* * MODULE_INFO was born in 2.5.69. */ #ifndef MODULE_INFO #define MODULE_INFO(tag, info) \ static const char __module_cat(tag, __LINE__)[] \ __attribute__((section(".modinfo"), unused)) = __stringify(tag) "=" info #endif /* * MODULE_VERSION was born in 2.6.4. The earlier form appends a long "\0xxx" * string to the module's version, but that was removed in 2.6.10, so we'll * ignore it in our wrapper. */ #ifndef MODULE_VERSION #define MODULE_VERSION(_version) MODULE_INFO(version, _version) #endif #endif /* __COMPAT_MODULE_H__ */ vmci-only/compat_page.h0000444000000000000000000000466312025726724014137 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_PAGE_H__ # define __COMPAT_PAGE_H__ #include #include /* The pfn_to_page() API appeared in 2.5.14 and changed to function during 2.6.x */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(pfn_to_page) # define pfn_to_page(_pfn) (mem_map + (_pfn)) # define page_to_pfn(_page) ((_page) - mem_map) #endif /* The virt_to_page() API appeared in 2.4.0 --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) && !defined(virt_to_page) # define virt_to_page(_kvAddr) pfn_to_page(MAP_NR(_kvAddr)) #endif /* * The get_order() API appeared at some point in 2.3.x, and was then backported * in 2.2.17-21mdk and in the stock 2.2.18. Because we can only detect its * definition through makefile tricks, we provide our own for now --hpreg */ static inline int compat_get_order(unsigned long size) // IN { int order; size = (size - 1) >> (PAGE_SHIFT - 1); order = -1; do { size >>= 1; order++; } while (size); return order; } /* * BUG() was added to in 2.2.18, and was moved to * in 2.5.58. * * XXX: Technically, this belongs in some sort of "compat_asm_page.h" file, but * since our compatibility wrappers don't distinguish between and * , putting it here is reasonable. */ #ifndef BUG #define BUG() do { \ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ __asm__ __volatile__(".byte 0x0f,0x0b"); \ } while (0) #endif #endif /* __COMPAT_PAGE_H__ */ vmci-only/compat_pci.h0000444000000000000000000004157512025726724014001 0ustar rootroot/********************************************************* * Copyright (C) 1999 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * compat_pci.h: PCI compatibility wrappers. */ #ifndef __COMPAT_PCI_H__ #define __COMPAT_PCI_H__ #include "compat_ioport.h" #include #ifndef KERNEL_2_1 # include #endif /* 2.0.x has useless struct pci_dev; remap it to our own */ #ifndef KERNEL_2_1 #define pci_dev vmw_pci_driver_instance #endif /* 2.0/2.2 does not have pci driver API */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) struct vmw_pci_driver_instance { struct vmw_pci_driver_instance *next; void *driver_data; struct pci_driver *pcidrv; #ifdef KERNEL_2_1 struct pci_dev *pcidev; #else unsigned char bus; unsigned char devfn; unsigned int irq; #endif }; #endif /* 2.0 has pcibios_* calls only... We have to provide pci_* compatible wrappers. */ #ifndef KERNEL_2_1 static inline int pci_read_config_byte(struct pci_dev *pdev, // IN: PCI slot unsigned char where, // IN: Byte to read u8 *value) // OUT: Value read { return pcibios_read_config_byte(pdev->bus, pdev->devfn, where, value); } static inline int pci_read_config_dword(struct pci_dev *pdev, // IN: PCI slot unsigned char where, // IN: Dword to read u32 *value) // OUT: Value read { return pcibios_read_config_dword(pdev->bus, pdev->devfn, where, value); } static inline int pci_write_config_dword(struct pci_dev *pdev, // IN: PCI slot unsigned char where, // IN: Dword to write u32 value) // IN: Value to write { return pcibios_write_config_dword(pdev->bus, pdev->devfn, where, value); } #endif /* *----------------------------------------------------------------------------- * * compat_pci_name -- * * Return human readable PCI slot name. Note that some implementations * return a pointer to the static storage, so returned value may be * overwritten by subsequent calls to this function. * * Results: * Returns pointer to the string with slot name. * * Side effects: * None. * *----------------------------------------------------------------------------- */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) #define compat_pci_name(pdev) pci_name(pdev) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0) #define compat_pci_name(pdev) (pdev)->slot_name #elif defined(KERNEL_2_1) static inline const char* compat_pci_name(struct pci_dev* pdev) { static char slot_name[12]; sprintf(slot_name, "%02X:%02X.%X", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); return slot_name; } #else static inline const char* compat_pci_name(struct pci_dev* pdev) { static char slot_name[12]; sprintf(slot_name, "%02X:%02X.%X", pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); return slot_name; } #endif /* pci_resource_start comes in 4 flavors - 2.0, 2.2, early 2.3, 2.4+ */ #ifndef KERNEL_2_1 static inline unsigned long compat_pci_resource_start(struct pci_dev *pdev, unsigned int index) { u32 addr; if (pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0 + index * 4, &addr)) { printk(KERN_ERR "Unable to read base address %u from PCI slot %s!\n", index, compat_pci_name(pdev)); return ~0UL; } if (addr & PCI_BASE_ADDRESS_SPACE) { return addr & PCI_BASE_ADDRESS_IO_MASK; } else { return addr & PCI_BASE_ADDRESS_MEM_MASK; } } #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 1) # define compat_pci_resource_start(dev, index) \ (((dev)->base_address[index] & PCI_BASE_ADDRESS_SPACE) \ ? ((dev)->base_address[index] & PCI_BASE_ADDRESS_IO_MASK) \ : ((dev)->base_address[index] & PCI_BASE_ADDRESS_MEM_MASK)) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43) # define compat_pci_resource_start(dev, index) \ ((dev)->resource[index].start) #else # define compat_pci_resource_start(dev, index) \ pci_resource_start(dev, index) #endif /* since 2.3.15, a new set of s/w res flags IORESOURCE_ is introduced, * we fake them by returning either IORESOURCE_{IO, MEM} prior to 2.3.15 since * this is what compat_pci_request_region uses */ #ifndef KERNEL_2_1 static inline unsigned long compat_pci_resource_flags(struct pci_dev *pdev, unsigned int index) { u32 addr; if (pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0 + index * 4, &addr)) { printk(KERN_ERR "Unable to read base address %u from PCI slot %s!\n", index, compat_pci_name(pdev)); return ~0UL; } if (addr & PCI_BASE_ADDRESS_SPACE) { return IORESOURCE_IO; } else { return IORESOURCE_MEM; } } #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 1) # define compat_pci_resource_flags(dev, index) \ (((dev)->base_address[index] & PCI_BASE_ADDRESS_SPACE) \ ? IORESOURCE_IO: IORESOURCE_MEM) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 15) /* IORESOURCE_xxx appeared in 2.3.15 and is set in resource[].flags */ # define compat_pci_resource_flags(dev, index) ((dev)->resource[index].flags) #else # define compat_pci_resource_flags(dev, index) pci_resource_flags(dev, index) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) static inline unsigned long compat_pci_resource_len(struct pci_dev *pdev, // IN unsigned int index) // IN { u32 addr, mask; unsigned char reg = PCI_BASE_ADDRESS_0 + index * 4; if (pci_read_config_dword(pdev, reg, &addr) || addr == 0xFFFFFFFF) { return 0; } pci_write_config_dword(pdev, reg, 0xFFFFFFFF); pci_read_config_dword(pdev, reg, &mask); pci_write_config_dword(pdev, reg, addr); if (mask == 0 || mask == 0xFFFFFFFF) { return 0; } if (addr & PCI_BASE_ADDRESS_SPACE) { return 65536 - (mask & PCI_BASE_ADDRESS_IO_MASK & 0xFFFF); } else { return -(mask & PCI_BASE_ADDRESS_MEM_MASK); } } #else #define compat_pci_resource_len(dev, index) pci_resource_len(dev, index) #endif /* pci_request_region appears in 2.4.20 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 20) static inline int compat_pci_request_region(struct pci_dev *pdev, int bar, char *name) { if (compat_pci_resource_len(pdev, bar) == 0) { return 0; } if (compat_pci_resource_flags(pdev, bar) & IORESOURCE_IO) { if (!compat_request_region(compat_pci_resource_start(pdev, bar), compat_pci_resource_len(pdev, bar), name)) { return -EBUSY; } } else if (compat_pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { if (!compat_request_mem_region(compat_pci_resource_start(pdev, bar), compat_pci_resource_len(pdev, bar), name)) { return -EBUSY; } } return 0; } static inline void compat_pci_release_region(struct pci_dev *pdev, int bar) { if (compat_pci_resource_len(pdev, bar) != 0) { if (compat_pci_resource_flags(pdev, bar) & IORESOURCE_IO) { release_region(compat_pci_resource_start(pdev, bar), compat_pci_resource_len(pdev, bar)); } else if (compat_pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { compat_release_mem_region(compat_pci_resource_start(pdev, bar), compat_pci_resource_len(pdev, bar)); } } } #else #define compat_pci_request_region(pdev, bar, name) pci_request_region(pdev, bar, name) #define compat_pci_release_region(pdev, bar) pci_release_region(pdev, bar) #endif /* pci_request_regions appeears in 2.4.3 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) static inline int compat_pci_request_regions(struct pci_dev *pdev, char *name) { int i; for (i = 0; i < 6; i++) { if (compat_pci_request_region(pdev, i, name)) { goto release; } } return 0; release: while (--i >= 0) { compat_pci_release_region(pdev, i); } return -EBUSY; } static inline void compat_pci_release_regions(struct pci_dev *pdev) { int i; for (i = 0; i < 6; i++) { compat_pci_release_region(pdev, i); } } #else #define compat_pci_request_regions(pdev, name) pci_request_regions(pdev, name) #define compat_pci_release_regions(pdev) pci_release_regions(pdev) #endif /* pci_enable_device is available since 2.4.0 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) #define compat_pci_enable_device(pdev) (0) #else #define compat_pci_enable_device(pdev) pci_enable_device(pdev) #endif /* pci_set_master is available since 2.2.0 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0) #define compat_pci_set_master(pdev) (0) #else #define compat_pci_set_master(pdev) pci_set_master(pdev) #endif /* pci_disable_device is available since 2.4.4 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 4) #define compat_pci_disable_device(pdev) do {} while (0) #else #define compat_pci_disable_device(pdev) pci_disable_device(pdev) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) /* * Devices supported by particular pci driver. While 2.4+ kernels * can do match on subsystem and class too, we support match on * vendor/device IDs only. */ struct pci_device_id { unsigned int vendor, device; unsigned long driver_data; }; #define PCI_DEVICE(vend, dev) .vendor = (vend), .device = (dev) /* PCI driver */ struct pci_driver { const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev* dev, const struct pci_device_id* id); void (*remove)(struct pci_dev* dev); }; /* * Note that this is static variable. Maybe everything below should be in * separate compat_pci.c file, but currently only user of this file is vmxnet, * and vmxnet has only one file, so it is fine. Also with vmxnet all * functions below are called just once, so difference between 'inline' and * separate compat_pci.c should be very small. */ static struct vmw_pci_driver_instance *pci_driver_instances = NULL; #ifdef KERNEL_2_1 #define vmw_pci_device(instance) (instance)->pcidev #else #define vmw_pci_device(instance) (instance) #endif /* *----------------------------------------------------------------------------- * * pci_register_driver -- * * Create driver instances for all matching PCI devices in the box. * * Results: * Returns 0 for success, negative error value for failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static inline int pci_register_driver(struct pci_driver *drv) { const struct pci_device_id *chipID; for (chipID = drv->id_table; chipID->vendor; chipID++) { #ifdef KERNEL_2_1 struct pci_dev *pdev; for (pdev = NULL; (pdev = pci_find_device(chipID->vendor, chipID->device, pdev)) != NULL; ) { #else int adapter; unsigned char bus, devfn, irq; for (adapter = 0; pcibios_find_device(chipID->vendor, chipID->device, adapter, &bus, &devfn) == 0; adapter++) { #endif struct vmw_pci_driver_instance *pdi; int err; pdi = kmalloc(sizeof *pdi, GFP_KERNEL); if (!pdi) { printk(KERN_ERR "Not enough memory.\n"); break; } pdi->pcidrv = drv; #ifdef KERNEL_2_1 pdi->pcidev = pdev; #else pdi->bus = bus; pdi->devfn = devfn; if (pci_read_config_byte(pdi, PCI_INTERRUPT_LINE, &irq)) { pdi->irq = -1; } else { pdi->irq = irq; } #endif pdi->driver_data = NULL; pdi->next = pci_driver_instances; pci_driver_instances = pdi; err = drv->probe(vmw_pci_device(pdi), chipID); if (err) { pci_driver_instances = pdi->next; kfree(pdi); } } } return 0; } /* *----------------------------------------------------------------------------- * * compat_pci_unregister_driver -- * * Shut down PCI driver - unbind all device instances from driver. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static inline void pci_unregister_driver(struct pci_driver *drv) { struct vmw_pci_driver_instance **ppdi; ppdi = &pci_driver_instances; while (1) { struct vmw_pci_driver_instance *pdi = *ppdi; if (!pdi) { break; } if (pdi->pcidrv == drv) { drv->remove(vmw_pci_device(pdi)); *ppdi = pdi->next; kfree(pdi); } else { ppdi = &pdi->next; } } } #else /* provide PCI_DEVICE for early 2.4.x kernels */ #ifndef PCI_DEVICE #define PCI_DEVICE(vend, dev) .vendor = (vend), .device = (dev), \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID #endif #endif /* provide dummy MODULE_DEVICE_TABLE for 2.0/2.2 */ #ifndef MODULE_DEVICE_TABLE #define MODULE_DEVICE_TABLE(bus, devices) #endif /* *----------------------------------------------------------------------------- * * pci_set_drvdata -- * * Set per-device driver's private data. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ /* *----------------------------------------------------------------------------- * * pci_get_drvdata -- * * Retrieve per-device driver's private data. * * Results: * per-device driver's data previously set by pci_set_drvdata, * or NULL on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ #ifndef KERNEL_2_1 /* 2.0.x is simple, we have driver_data directly in pci_dev */ #define pci_set_drvdata(pdev, data) do { (pdev)->driver_data = (data); } while (0) #define pci_get_drvdata(pdev) (pdev)->driver_data #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) /* 2.2.x is trickier, we have to find driver instance first */ static inline void pci_set_drvdata(struct pci_dev *pdev, void* data) { struct vmw_pci_driver_instance *pdi; for (pdi = pci_driver_instances; pdi; pdi = pdi->next) { if (pdi->pcidev == pdev) { pdi->driver_data = data; return; } } printk(KERN_ERR "pci_set_drvdata issued for unknown device %p\n", pdev); } static inline void * pci_get_drvdata(struct pci_dev *pdev) { struct vmw_pci_driver_instance *pdi; for (pdi = pci_driver_instances; pdi; pdi = pdi->next) { if (pdi->pcidev == pdev) { return pdi->driver_data; } } printk(KERN_ERR "pci_get_drvdata issued for unknown device %p\n", pdev); return NULL; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48) # define PCI_DMA_BIDIRECTIONAL 0 # define PCI_DMA_TODEVICE 1 # define PCI_DMA_FROMDEVICE 2 # define PCI_DMA_NONE 3 #endif /* * Power Management related compat wrappers. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) # define compat_pci_save_state(pdev) pci_save_state((pdev), NULL) # define compat_pci_restore_state(pdev) pci_restore_state((pdev), NULL) #else # define compat_pci_save_state(pdev) pci_save_state((pdev)) # define compat_pci_restore_state(pdev) pci_restore_state((pdev)) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) # define pm_message_t u32 # define compat_pci_choose_state(pdev, state) (state) # define PCI_D0 0 # define PCI_D3hot 3 #else # define compat_pci_choose_state(pdev, state) pci_choose_state((pdev), (state)) #endif /* 2.6.14 changed the PCI shutdown callback */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) # define COMPAT_PCI_SHUTDOWN(func) .driver = { .shutdown = (func), } # define COMPAT_PCI_DECLARE_SHUTDOWN(func, var) (func)(struct device *(var)) # define COMPAT_PCI_TO_DEV(dev) (to_pci_dev(dev)) #else # define COMPAT_PCI_SHUTDOWN(func) .shutdown = (func) # define COMPAT_PCI_DECLARE_SHUTDOWN(func, var) (func)(struct pci_dev *(var)) # define COMPAT_PCI_TO_DEV(dev) (dev) #endif #endif /* __COMPAT_PCI_H__ */ vmci-only/compat_pgtable.h0000444000000000000000000001104412025726724014630 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_PGTABLE_H__ # define __COMPAT_PGTABLE_H__ #if defined(CONFIG_PARAVIRT) && defined(CONFIG_HIGHPTE) # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21) # include # undef paravirt_map_pt_hook # define paravirt_map_pt_hook(type, va, pfn) do {} while (0) # endif #endif #include /* pte_page() API modified in 2.3.23 to return a struct page * --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 23) # define compat_pte_page pte_page #else # include "compat_page.h" # define compat_pte_page(_pte) virt_to_page(pte_page(_pte)) #endif /* Appeared in 2.5.5 --hpreg */ #ifndef pte_offset_map /* Appeared in SuSE 8.0's 2.4.18 --hpreg */ # ifdef pte_offset_atomic # define pte_offset_map pte_offset_atomic # define pte_unmap pte_kunmap # else # define pte_offset_map pte_offset # define pte_unmap(_pte) # endif #endif /* Appeared in 2.5.74-mmX --petr */ #ifndef pmd_offset_map # define pmd_offset_map(pgd, address) pmd_offset(pgd, address) # define pmd_unmap(pmd) #endif /* * Appeared in 2.6.10-rc2-mm1. Older kernels did L4 page tables as * part of pgd_offset, or they did not have L4 page tables at all. * In 2.6.11 pml4 -> pgd -> pmd -> pte hierarchy was replaced by * pgd -> pud -> pmd -> pte hierarchy. */ #ifdef PUD_MASK # define compat_pgd_offset(mm, address) pgd_offset(mm, address) # define compat_pgd_present(pgd) pgd_present(pgd) # define compat_pud_offset(pgd, address) pud_offset(pgd, address) # define compat_pud_present(pud) pud_present(pud) typedef pgd_t compat_pgd_t; typedef pud_t compat_pud_t; #elif defined(pml4_offset) # define compat_pgd_offset(mm, address) pml4_offset(mm, address) # define compat_pgd_present(pml4) pml4_present(pml4) # define compat_pud_offset(pml4, address) pml4_pgd_offset(pml4, address) # define compat_pud_present(pgd) pgd_present(pgd) typedef pml4_t compat_pgd_t; typedef pgd_t compat_pud_t; #else # define compat_pgd_offset(mm, address) pgd_offset(mm, address) # define compat_pgd_present(pgd) pgd_present(pgd) # define compat_pud_offset(pgd, address) (pgd) # define compat_pud_present(pud) (1) typedef pgd_t compat_pgd_t; typedef pgd_t compat_pud_t; #endif #define compat_pgd_offset_k(mm, address) pgd_offset_k(address) /* Introduced somewhere in 2.6.0, + backported to some 2.4 RedHat kernels */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(pte_pfn) # define pte_pfn(pte) page_to_pfn(compat_pte_page(pte)) #endif /* A page_table_lock field is added to struct mm_struct in 2.3.10 --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 10) # define compat_get_page_table_lock(_mm) (&(_mm)->page_table_lock) #else # define compat_get_page_table_lock(_mm) NULL #endif /* * Define VM_PAGE_KERNEL_EXEC for vmapping executable pages. * * On ia32 PAGE_KERNEL_EXEC was introduced in 2.6.8.1. Unfortunately it accesses * __PAGE_KERNEL_EXEC which is not exported for modules. So we use * __PAGE_KERNEL and just cut _PAGE_NX bit from it. * * For ia32 kernels before 2.6.8.1 we use PAGE_KERNEL directly, these kernels * do not have noexec support. * * On x86-64 situation is a bit better: they always supported noexec, but * before 2.6.8.1 flag was named PAGE_KERNEL_EXECUTABLE, and it was renamed * to PAGE_KERNEL_EXEC when ia32 got noexec too (see above). */ #ifdef CONFIG_X86 #ifdef _PAGE_NX #define VM_PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL & ~_PAGE_NX) #else #define VM_PAGE_KERNEL_EXEC PAGE_KERNEL #endif #else #ifdef PAGE_KERNEL_EXECUTABLE #define VM_PAGE_KERNEL_EXEC PAGE_KERNEL_EXECUTABLE #else #define VM_PAGE_KERNEL_EXEC PAGE_KERNEL_EXEC #endif #endif #endif /* __COMPAT_PGTABLE_H__ */ vmci-only/compat_sched.h0000444000000000000000000002425212025726724014305 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SCHED_H__ # define __COMPAT_SCHED_H__ #include /* CLONE_KERNEL available in 2.5.35 and higher. */ #ifndef CLONE_KERNEL #define CLONE_KERNEL CLONE_FILES | CLONE_FS | CLONE_SIGHAND #endif /* TASK_COMM_LEN become available in 2.6.11. */ #ifndef TASK_COMM_LEN #define TASK_COMM_LEN 16 #endif /* The capable() API appeared in 2.1.92 --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 1, 92) # define capable(_capability) suser() #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0) # define need_resched() need_resched #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 3) # define need_resched() (current->need_resched) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 3) # define cond_resched() (need_resched() ? schedule() : (void) 0) #endif /* Oh well. We need yield... Happy us! */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 20) # ifdef __x86_64__ # define compat_yield() there_is_nothing_like_yield() # else # include # include /* * Used by _syscallX macros. Note that this is global variable, so * do not rely on its contents too much. As exit() is only function * we use, and we never check return value from exit(), we have * no problem... */ extern int errno; /* * compat_exit() provides an access to the exit() function. It must * be named compat_exit(), as exit() (with different signature) is * provided by x86-64, arm and other (but not by i386). */ # define __NR_compat_yield __NR_sched_yield static inline _syscall0(int, compat_yield); # endif #else # define compat_yield() yield() #endif /* * Since 2.5.34 there are two methods to enumerate tasks: * for_each_process(p) { ... } which enumerates only tasks and * do_each_thread(g,t) { ... } while_each_thread(g,t) which enumerates * also threads even if they share same pid. */ #ifndef for_each_process # define for_each_process(p) for_each_task(p) #endif #ifndef do_each_thread # define do_each_thread(g, t) for_each_task(g) { t = g; do # define while_each_thread(g, t) while (0) } #endif /* * Lock for signal mask is moving target... */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 40) && defined(CLONE_PID) /* 2.4.x without NPTL patches or early 2.5.x */ #define compat_sigmask_lock sigmask_lock #define compat_dequeue_signal_current(siginfo_ptr) \ dequeue_signal(¤t->blocked, (siginfo_ptr)) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 60) && !defined(INIT_SIGHAND) /* RedHat's 2.4.x with first version of NPTL support, or 2.5.40 to 2.5.59 */ #define compat_sigmask_lock sig->siglock #define compat_dequeue_signal_current(siginfo_ptr) \ dequeue_signal(¤t->blocked, (siginfo_ptr)) #else /* RedHat's 2.4.x with second version of NPTL support, or 2.5.60+. */ #define compat_sigmask_lock sighand->siglock #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) #define compat_dequeue_signal_current(siginfo_ptr) \ dequeue_signal(¤t->blocked, (siginfo_ptr)) #else #define compat_dequeue_signal_current(siginfo_ptr) \ dequeue_signal(current, ¤t->blocked, (siginfo_ptr)) #endif #endif /* * recalc_sigpending() had task argument in the past */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 29) && defined(CLONE_PID) /* 2.4.x without NPTL patches or early 2.5.x */ #define compat_recalc_sigpending() recalc_sigpending(current) #else /* RedHat's 2.4.x with NPTL support, or 2.5.29+ */ #define compat_recalc_sigpending() recalc_sigpending() #endif /* * reparent_to_init() was introduced in 2.4.8. In 2.5.38 (or possibly * earlier, but later than 2.5.31) a call to it was added into * daemonize(), so compat_daemonize no longer needs to call it. * * In 2.4.x kernels reparent_to_init() forgets to do correct refcounting * on current->user. It is better to count one too many than one too few... */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 38) #define compat_reparent_to_init() do { \ reparent_to_init(); \ atomic_inc(¤t->user->__count); \ } while (0) #else #define compat_reparent_to_init() do {} while (0) #endif /* * daemonize appeared in 2.2.18. Except 2.2.17-4-RH7.0, which has it too. * Fortunately 2.2.17-4-RH7.0 uses versioned symbols, so we can check * its existence with defined(). */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) && !defined(daemonize) static inline void daemonize(void) { struct fs_struct *fs; exit_mm(current); current->session = 1; current->pgrp = 1; exit_fs(current); fs = init_task.fs; current->fs = fs; atomic_inc(&fs->count); } #endif /* * flush_signals acquires sighand->siglock since 2.5.61... Verify RH's kernels! */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61) #define compat_flush_signals(task) do { \ spin_lock_irq(&task->compat_sigmask_lock); \ flush_signals(task); \ spin_unlock_irq(&task->compat_sigmask_lock); \ } while (0) #else #define compat_flush_signals(task) flush_signals(task) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61) #define compat_allow_signal(signr) do { \ spin_lock_irq(¤t->compat_sigmask_lock); \ sigdelset(¤t->blocked, signr); \ compat_recalc_sigpending(); \ spin_unlock_irq(¤t->compat_sigmask_lock); \ } while (0) #else #define compat_allow_signal(signr) allow_signal(signr) #endif /* * daemonize can set process name since 2.5.61. Prior to 2.5.61, daemonize * didn't block signals on our behalf. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61) #define compat_daemonize(x...) \ ({ \ /* Beware! No snprintf here, so verify arguments! */ \ sprintf(current->comm, x); \ \ /* Block all signals. */ \ spin_lock_irq(¤t->compat_sigmask_lock); \ sigfillset(¤t->blocked); \ compat_recalc_sigpending(); \ spin_unlock_irq(¤t->compat_sigmask_lock); \ compat_flush_signals(current); \ \ daemonize(); \ compat_reparent_to_init(); \ }) #else #define compat_daemonize(x...) daemonize(x) #endif /* * set priority for specified thread. Exists on 2.6.x kernels and some * 2.4.x vendor's kernels. */ #if defined(VMW_HAVE_SET_USER_NICE) #define compat_set_user_nice(task, n) set_user_nice((task), (n)) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) #define compat_set_user_nice(task, n) do { (task)->priority = 20 - (n); } while (0) #elif !defined(VMW_HAVE_SET_USER_NICE) #define compat_set_user_nice(task, n) do { (task)->nice = (n); } while (0) #endif /* * try to freeze a process. For kernels 2.6.11 or newer, we know how to choose * the interface. The problem is that the oldest interface, introduced in * 2.5.18, was backported to 2.4.x kernels. So if we're older than 2.6.11, * we'll decide what to do based on whether or not swsusp was configured * for the kernel. For kernels 2.6.20 and newer, we'll also need to include * freezer.h since the try_to_freeze definition was pulled out of sched.h. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) #include #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) || defined(VMW_TL10S64_WORKAROUND) #define compat_try_to_freeze() try_to_freeze() #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) #define compat_try_to_freeze() try_to_freeze(PF_FREEZE) #elif defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_SOFTWARE_SUSPEND2) #include "compat_mm.h" #include #include static inline int compat_try_to_freeze(void) { if (current->flags & PF_FREEZE) { refrigerator(PF_FREEZE); return 1; } else { return 0; } } #else static inline int compat_try_to_freeze(void) { return 0; } #endif /* * As of 2.6.23-rc1, kernel threads are no longer freezable by * default. Instead, kernel threads that need to be frozen must opt-in * by calling set_freezable() as soon as the thread is created. */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22) #define compat_set_freezable() do { set_freezable(); } while (0) #else #define compat_set_freezable() do {} while (0) #endif /* * Since 2.6.27-rc2 kill_proc() is gone... Replacement (GPL-only!) * API is available since 2.6.19. Use them from 2.6.27-rc1 up. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) typedef int compat_pid; #define compat_find_get_pid(pid) (pid) #define compat_put_pid(pid) do { } while (0) #define compat_kill_pid(pid, sig, flag) kill_proc(pid, sig, flag) #else typedef struct pid * compat_pid; #define compat_find_get_pid(pid) find_get_pid(pid) #define compat_put_pid(pid) put_pid(pid) #define compat_kill_pid(pid, sig, flag) kill_pid(pid, sig, flag) #endif #endif /* __COMPAT_SCHED_H__ */ vmci-only/compat_semaphore.h0000444000000000000000000000314212025726724015175 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SEMAPHORE_H__ # define __COMPAT_SEMAPHORE_H__ /* <= 2.6.25 have asm only, 2.6.26 has both, and 2.6.27-rc2+ has linux only. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) # include #else # include #endif /* * The init_MUTEX_LOCKED() API appeared in 2.2.18, and is also in * 2.2.17-21mdk --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) #ifndef init_MUTEX_LOCKED #define init_MUTEX_LOCKED(_sem) *(_sem) = MUTEX_LOCKED #endif #ifndef DECLARE_MUTEX #define DECLARE_MUTEX(name) struct semaphore name = MUTEX #endif #ifndef DECLARE_MUTEX_LOCKED #define DECLARE_MUTEX_LOCKED(name) struct semaphore name = MUTEX_LOCKED #endif #endif #endif /* __COMPAT_SEMAPHORE_H__ */ vmci-only/compat_slab.h0000444000000000000000000000665312025726724014145 0ustar rootroot/********************************************************* * Copyright (C) 2005 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SLAB_H__ # define __COMPAT_SLAB_H__ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0) # include #else # include #endif /* * Before 2.6.20, kmem_cache_t was the accepted way to refer to a kmem_cache * structure. Prior to 2.6.15, this structure was called kmem_cache_s, and * afterwards it was renamed to kmem_cache. Here we keep things simple and use * the accepted typedef until it became deprecated, at which point we switch * over to the kmem_cache name. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) # define compat_kmem_cache struct kmem_cache #else # define compat_kmem_cache kmem_cache_t #endif /* * Up to 2.6.22 kmem_cache_create has 6 arguments - name, size, alignment, flags, * constructor, and destructor. Then for some time kernel was asserting that * destructor is NULL, and since 2.6.23-pre1 kmem_cache_create takes only 5 * arguments - destructor is gone. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) || defined(VMW_KMEMCR_HAS_DTOR) #define compat_kmem_cache_create(name, size, align, flags, ctor) \ kmem_cache_create(name, size, align, flags, ctor, NULL) #else #define compat_kmem_cache_create(name, size, align, flags, ctor) \ kmem_cache_create(name, size, align, flags, ctor) #endif /* * Up to 2.6.23 kmem_cache constructor has three arguments - pointer to block to * prepare (aka "this"), from which cache it came, and some unused flags. After * 2.6.23 flags were removed, and order of "this" and cache parameters was swapped... * Since 2.6.27-rc2 everything is different again, and ctor has only one argument. * * HAS_3_ARGS has precedence over HAS_2_ARGS if both are defined. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23) && !defined(VMW_KMEMCR_CTOR_HAS_3_ARGS) # define VMW_KMEMCR_CTOR_HAS_3_ARGS #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) && !defined(VMW_KMEMCR_CTOR_HAS_2_ARGS) # define VMW_KMEMCR_CTOR_HAS_2_ARGS #endif #if defined(VMW_KMEMCR_CTOR_HAS_3_ARGS) typedef void compat_kmem_cache_ctor(void *, compat_kmem_cache *, unsigned long); #define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) void *arg, \ compat_kmem_cache *cache, \ unsigned long flags #elif defined(VMW_KMEMCR_CTOR_HAS_2_ARGS) typedef void compat_kmem_cache_ctor(compat_kmem_cache *, void *); #define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) compat_kmem_cache *cache, \ void *arg #else typedef void compat_kmem_cache_ctor(void *); #define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) void *arg #endif #endif /* __COMPAT_SLAB_H__ */ vmci-only/compat_spinlock.h0000444000000000000000000000460612025726724015042 0ustar rootroot/********************************************************* * Copyright (C) 2005 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SPINLOCK_H__ # define __COMPAT_SPINLOCK_H__ /* * The spin_lock() API appeared in 2.1.25 in asm/smp_lock.h * It moved in 2.1.30 to asm/spinlock.h * It moved again in 2.3.18 to linux/spinlock.h * * --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 18) # include #else # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 30) # include # else typedef struct {} spinlock_t; # define spin_lock_init(lock) # define spin_lock(lock) # define spin_unlock(lock) # define spin_lock_irqsave(lock, flags) do { \ save_flags(flags); \ cli(); \ spin_lock(lock); \ } while (0) # define spin_unlock_irqrestore(lock, flags) do { \ spin_unlock(lock); \ restore_flags(flags); \ } while (0) # endif #endif /* * Preempt support was added during 2.5.x development cycle, and later * it was backported to 2.4.x. In 2.4.x backport these definitions * live in linux/spinlock.h, that's why we put them here (in 2.6.x they * are defined in linux/preempt.h which is included by linux/spinlock.h). */ #ifdef CONFIG_PREEMPT #define compat_preempt_disable() preempt_disable() #define compat_preempt_enable() preempt_enable() #else #define compat_preempt_disable() do { } while (0) #define compat_preempt_enable() do { } while (0) #endif #endif /* __COMPAT_SPINLOCK_H__ */ vmci-only/compat_version.h0000444000000000000000000000616512025726724014707 0ustar rootroot/********************************************************* * Copyright (C) 1998 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_VERSION_H__ # define __COMPAT_VERSION_H__ #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #ifndef __linux__ # error "linux-version.h" #endif #include /* Appeared in 2.1.90 --hpreg */ #ifndef KERNEL_VERSION # define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) #endif /* * Distinguish relevant classes of Linux kernels. * * The convention is that version X defines all * the KERNEL_Y symbols where Y <= X. * * XXX Do not add more definitions here. This way of doing things does not * scale, and we are going to phase it out soon --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 0) # define KERNEL_2_1 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0) # define KERNEL_2_2 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 1) # define KERNEL_2_3_1 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 15) /* new networking */ # define KERNEL_2_3_15 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 25) /* new procfs */ # define KERNEL_2_3_25 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 29) /* even newer procfs */ # define KERNEL_2_3_29 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 43) /* softnet changes */ # define KERNEL_2_3_43 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 47) /* more softnet changes */ # define KERNEL_2_3_47 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 99) /* name in netdevice struct is array and not pointer */ # define KERNEL_2_3_99 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0) /* New 'owner' member at the beginning of struct file_operations */ # define KERNEL_2_4_0 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8) /* New netif_rx_ni() --hpreg */ # define KERNEL_2_4_8 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) /* New vmap() */ # define KERNEL_2_4_22 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 2) /* New kdev_t, major()/minor() API --hpreg */ # define KERNEL_2_5_2 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5) /* New sk_alloc(), pte_offset_map()/pte_unmap() --hpreg */ # define KERNEL_2_5_5 #endif #endif /* __COMPAT_VERSION_H__ */ vmci-only/compat_wait.h0000444000000000000000000001564212025726724014166 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_WAIT_H__ # define __COMPAT_WAIT_H__ #include #include #include #include "compat_file.h" /* * The DECLARE_WAITQUEUE() API appeared in 2.3.1 * It was back ported in 2.2.18 * * --hpreg */ #ifndef DECLARE_WAITQUEUE typedef struct wait_queue *wait_queue_head_t; # define init_waitqueue_head(_headPtr) *(_headPtr) = NULL # define DECLARE_WAITQUEUE(_var, _task) \ struct wait_queue _var = {_task, NULL, } typedef struct wait_queue wait_queue_t; # define init_waitqueue_entry(_wait, _task) ((_wait)->task = (_task)) #endif /* * The 'struct poll_wqueues' appeared in 2.5.48, when global * /dev/epoll interface was added. It was backported to the * 2.4.20-wolk4.0s. */ #ifdef VMW_HAVE_EPOLL // { #define compat_poll_wqueues struct poll_wqueues #else // } { #define compat_poll_wqueues poll_table #endif // } #ifdef VMW_HAVE_EPOLL // { /* If prototype does not match, build will abort here */ extern void poll_initwait(compat_poll_wqueues *); #define compat_poll_initwait(wait, table) ( \ poll_initwait((table)), \ (wait) = &(table)->pt \ ) #define compat_poll_freewait(wait, table) ( \ poll_freewait((table)) \ ) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0) // { /* If prototype does not match, build will abort here */ extern void poll_initwait(compat_poll_wqueues *); #define compat_poll_initwait(wait, table) ( \ (wait) = (table), \ poll_initwait(wait) \ ) #define compat_poll_freewait(wait, table) ( \ poll_freewait((table)) \ ) #else // } { #define compat_poll_initwait(wait, table) ( \ (wait) = (table), /* confuse compiler */ \ (wait) = (poll_table *) __get_free_page(GFP_KERNEL), \ (wait)->nr = 0, \ (wait)->entry = (struct poll_table_entry *)((wait) + 1), \ (wait)->next = NULL \ ) static inline void poll_freewait(poll_table *wait) { while (wait) { struct poll_table_entry * entry; poll_table *old; entry = wait->entry + wait->nr; while (wait->nr > 0) { wait->nr--; entry--; remove_wait_queue(entry->wait_address, &entry->wait); compat_fput(entry->filp); } old = wait; wait = wait->next; free_page((unsigned long) old); } } #define compat_poll_freewait(wait, table) ( \ poll_freewait((wait)) \ ) #endif // } /* * The wait_event_interruptible_timeout() interface is not * defined in pre-2.6 kernels. */ #ifndef wait_event_interruptible_timeout #define __wait_event_interruptible_timeout(wq, condition, ret) \ do { \ wait_queue_t __wait; \ init_waitqueue_entry(&__wait, current); \ \ add_wait_queue(&wq, &__wait); \ for (;;) { \ set_current_state(TASK_INTERRUPTIBLE); \ if (condition) \ break; \ if (!signal_pending(current)) { \ ret = schedule_timeout(ret); \ if (!ret) \ break; \ continue; \ } \ ret = -ERESTARTSYS; \ break; \ } \ set_current_state(TASK_RUNNING); \ remove_wait_queue(&wq, &__wait); \ } while (0) #define wait_event_interruptible_timeout(wq, condition, timeout) \ ({ \ long __ret = timeout; \ if (!(condition)) \ __wait_event_interruptible_timeout(wq, condition, __ret); \ __ret; \ }) #endif /* * The wait_event_timeout() interface is not * defined in pre-2.6 kernels. */ #ifndef wait_event_timeout #define __wait_event_timeout(wq, condition, ret) \ do { \ wait_queue_t __wait; \ init_waitqueue_entry(&__wait, current); \ \ add_wait_queue(&wq, &__wait); \ for (;;) { \ set_current_state(TASK_UNINTERRUPTIBLE); \ if (condition) \ break; \ ret = schedule_timeout(ret); \ if (!ret) \ break; \ } \ set_current_state(TASK_RUNNING); \ remove_wait_queue(&wq, &__wait); \ } while (0) #define wait_event_timeout(wq, condition, timeout) \ ({ \ long __ret = timeout; \ if (!(condition)) \ __wait_event_timeout(wq, condition, __ret); \ __ret; \ }) #endif /* * DEFINE_WAIT() and friends were added in 2.5.39 and backported to 2.4.28. * * Unfortunately it is not true. While some distros may have done it the * change has never made it into vanilla 2.4 kernel. Instead of testing * particular kernel versions let's just test for presence of DEFINE_WAIT * when figuring out whether we need to provide replacement implementation * or simply alias existing one. */ #ifndef DEFINE_WAIT # define COMPAT_DEFINE_WAIT(_wait) \ DECLARE_WAITQUEUE(_wait, current) # define compat_init_prepare_to_wait(_sleep, _wait, _state) \ do { \ __set_current_state(_state); \ add_wait_queue(_sleep, _wait); \ } while (0) # define compat_cont_prepare_to_wait(_sleep, _wait, _state) \ set_current_state(_state) # define compat_finish_wait(_sleep, _wait, _state) \ do { \ __set_current_state(_state); \ remove_wait_queue(_sleep, _wait); \ } while (0) #else # define COMPAT_DEFINE_WAIT(_wait) \ DEFINE_WAIT(_wait) # define compat_init_prepare_to_wait(_sleep, _wait, _state) \ prepare_to_wait(_sleep, _wait, _state) # define compat_cont_prepare_to_wait(_sleep, _wait, _state) \ prepare_to_wait(_sleep, _wait, _state) # define compat_finish_wait(_sleep, _wait, _state) \ finish_wait(_sleep, _wait) #endif /* #ifndef DEFINE_WAIT */ #endif /* __COMPAT_WAIT_H__ */ vmci-only/compat_uaccess.h0000444000000000000000000000606212025726724014644 0ustar rootroot/********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_UACCESS_H__ # define __COMPAT_UACCESS_H__ /* User space access functions moved in 2.1.7 to asm/uaccess.h --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 7) # include #else # include #endif /* get_user() API modified in 2.1.4 to take 2 arguments --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 4) # define compat_get_user get_user #else /* * We assign 0 to the variable in case of failure to prevent "`_var' might be * used uninitialized in this function" compiler warnings. I think it is OK, * because the hardware-based version in newer kernels probably has the same * semantics and does not guarantee that the value of _var will not be * modified, should the access fail --hpreg */ # define compat_get_user(_var, _uvAddr) ({ \ int _status; \ \ _status = verify_area(VERIFY_READ, _uvAddr, sizeof(*(_uvAddr))); \ if (_status == 0) { \ (_var) = get_user(_uvAddr); \ } else { \ (_var) = 0; \ } \ _status; \ }) #endif /* * The copy_from_user() API appeared in 2.1.4 * * The emulation is not perfect here, but it is conservative: on failure, we * always return the total size, instead of the potentially smaller faulty * size --hpreg * * Since 2.5.55 copy_from_user() is no longer macro. */ #if !defined(copy_from_user) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0) # define copy_from_user(_to, _from, _size) ( \ verify_area(VERIFY_READ, _from, _size) \ ? (_size) \ : (memcpy_fromfs(_to, _from, _size), 0) \ ) # define copy_to_user(_to, _from, _size) ( \ verify_area(VERIFY_WRITE, _to, _size) \ ? (_size) \ : (memcpy_tofs(_to, _from, _size), 0) \ ) #endif #endif /* __COMPAT_UACCESS_H__ */ vmci-only/driver-config.h0000444000000000000000000000425012025726724014406 0ustar rootroot/********************************************************* * Copyright (C) 1998 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * Sets the proper defines from the Linux header files * * This file must be included before the inclusion of any kernel header file, * with the exception of linux/autoconf.h and linux/version.h --hpreg */ #ifndef __VMX_CONFIG_H__ #define __VMX_CONFIG_H__ #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMNIXMOD #include "includeCheck.h" #include #include "compat_version.h" /* * We rely on Kernel Module support. Check here. */ #ifndef CONFIG_MODULES # error "No Module support in this kernel. Please configure with CONFIG_MODULES" #endif /* * 2.2 kernels still use __SMP__ (derived from CONFIG_SMP * in the main Makefile), so we do it here. */ #ifdef CONFIG_SMP # define __SMP__ 1 #endif #if defined(CONFIG_MODVERSIONS) && defined(KERNEL_2_1) # if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,60) /* * MODVERSIONS might be already defined when using kernel's Makefiles. */ # ifndef MODVERSIONS # define MODVERSIONS # endif # include # endif #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) /* * Force the uintptr_t definition to come from linux/types.h instead of vm_basic_types.h. */ # include # define _STDINT_H 1 #endif #ifndef __KERNEL__ # define __KERNEL__ #endif #endif vmci-only/kernelStubs.h0000444000000000000000000001124612025726724014154 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * kernelStubs.h * * KernelStubs implements some userspace library functions in terms * of kernel functions to allow library userspace code to be used in a * kernel. */ #ifndef __KERNELSTUBS_H__ #define __KERNELSTUBS_H__ #ifdef linux # ifndef __KERNEL__ # error "__KERNEL__ is not defined" # endif # include "driver-config.h" // Must be included before any other header files # include "vm_basic_types.h" # include # include #elif defined(_WIN32) # include "vm_basic_types.h" # include /* kernel memory APIs */ # include /* for _vsnprintf, vsprintf */ # include /* for va_start stuff */ # include /* for min macro. */ # include "vm_assert.h" /* Our assert macros */ #elif defined(__FreeBSD__) # include "vm_basic_types.h" # ifndef _KERNEL # error "_KERNEL is not defined" # endif # include # include # include # include # include # include # include "vm_assert.h" #elif defined(__APPLE__) # include "vm_basic_types.h" # ifndef KERNEL # error "KERNEL is not defined" # endif # include # include #endif /* * Function Prototypes */ #if defined(linux) || defined(__APPLE__) /* if (linux) || (__APPLE__) { */ # ifdef linux /* if (linux) { */ char *strdup(const char *source); # endif /* Shared between Linux and Apple kernel stubs. */ void *malloc(size_t size); void free(void *mem); void *calloc(size_t num, size_t len); void *realloc(void *ptr, size_t newSize); #elif defined(_WIN32) /* } else if (_WIN32) { */ #if (_WIN32_WINNT == 0x0400) /* The following declarations are missing on NT4. */ typedef unsigned int UINT_PTR; typedef unsigned int SIZE_T; /* No free with tag availaible on NT4 kernel! */ #define KRNL_STUBS_FREE(P,T) ExFreePool((P)) #else /* _WIN32_WINNT */ #define KRNL_STUBS_FREE(P,T) ExFreePoolWithTag((P),(T)) /* Win 2K and later useful kernel function, documented but not declared! */ NTKERNELAPI VOID ExFreePoolWithTag(IN PVOID P, IN ULONG Tag); #endif /* _WIN32_WINNT */ #elif defined(__FreeBSD__) /* } else if (FreeBSD) { */ /* Kernel memory on FreeBSD is tagged for statistics and sanity checking. */ MALLOC_DECLARE(M_VMWARE_TEMP); /* * On FreeBSD, the general memory allocator for both userland and the kernel is named * malloc, but the kernel malloc() takes more arguments. The following alias & macros * work around this, to provide the standard malloc() API for userspace code that is * being used in the kernel. */ # undef malloc static INLINE void * __compat_malloc(unsigned long size, struct malloc_type *type, int flags) { return malloc(size, type, flags); } # define malloc(size) __compat_malloc(size, M_VMWARE_TEMP, M_NOWAIT) # define calloc(count, size) __compat_malloc((count) * (size), \ M_VMWARE_TEMP, M_NOWAIT|M_ZERO) # define realloc(buf, size) realloc(buf, size, M_VMWARE_TEMP, M_NOWAIT) # define free(buf) free(buf, M_VMWARE_TEMP) # define strchr(s,c) index(s,c) # define strrchr(s,c) rindex(s,c) #endif /* } */ /* * Stub functions we provide. */ void Panic(const char *fmt, ...); char *Str_Strcpy(char *buf, const char *src, size_t maxSize); int Str_Vsnprintf(char *str, size_t size, const char *format, va_list arguments); char *Str_Vasprintf(size_t *length, const char *format, va_list arguments); char *Str_Asprintf(size_t *length, const char *Format, ...); /* * Functions the driver must implement for the stubs. */ EXTERN void Debug(const char *fmt, ...); #endif /* __KERNELSTUBS_H__ */ vmci-only/kernelStubsLinux.c0000444000000000000000000002263512025726724015173 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * kernelStubsLinux.c * * This file contains implementations of common userspace functions in terms * that the Linux kernel can understand. */ /* Must come before any kernel header file */ #include "driver-config.h" #include "kernelStubs.h" #include "compat_kernel.h" #include "compat_page.h" #include "compat_sched.h" #include #include "vm_assert.h" /* *----------------------------------------------------------------------------- * * Panic -- * * Prints the debug message and stops the system. * * Results: * None. * * Side effects: * None * *----------------------------------------------------------------------------- */ void Panic(const char *fmt, ...) // IN { va_list args; char *result; va_start(args, fmt); result = Str_Vasprintf(NULL, fmt, args); va_end(args); if (result) { printk(KERN_EMERG "%s", result); } BUG(); while (1); // Avoid compiler warning. } /* *---------------------------------------------------------------------- * * Str_Strcpy-- * * Wrapper for strcpy that checks for buffer overruns. * * Results: * Same as strcpy. * * Side effects: * None. * *---------------------------------------------------------------------- */ char * Str_Strcpy(char *buf, // OUT const char *src, // IN size_t maxSize) // IN { unsigned int *stack = (unsigned int *)&buf; size_t len; len = strlen(src); if (len >= maxSize) { Panic("%s:%d Buffer too small 0x%x\n", __FILE__,__LINE__, stack[-1]); } return memcpy(buf, src, len + 1); } /* *---------------------------------------------------------------------- * * Str_Vsnprintf -- * * Compatability wrapper b/w different libc versions * * Results: * int - number of bytes written (not including NULL terminate character), * -1 on overflow (insufficient space for NULL terminate is considered * overflow) * * NB: on overflow the buffer WILL be null terminated * * Side effects: * None * *---------------------------------------------------------------------- */ int Str_Vsnprintf(char *str, // OUT size_t size, // IN const char *format, // IN va_list arguments) // IN { int retval; retval = vsnprintf(str, size, format, arguments); /* * Linux glibc 2.0.x returns -1 and null terminates (which we shouldn't * be linking against), but glibc 2.1.x follows c99 and returns * characters that would have been written. */ if (retval >= size) { return -1; } return retval; } /* *----------------------------------------------------------------------------- * * Str_Vasprintf -- * * Allocate and format a string, using the GNU libc way to specify the * format (i.e. optionally allow the use of positional parameters) * * Results: * The allocated string on success (if 'length' is not NULL, *length * is set to the length of the allocated string) * NULL on failure * * Side effects: * None * *----------------------------------------------------------------------------- */ char * Str_Vasprintf(size_t *length, // OUT const char *format, // IN va_list arguments) // IN { /* * Simple implementation of Str_Vasprintf when userlevel libraries are not * available (e.g. for use in drivers). We just fallback to vsnprintf, * doubling if we didn't have enough space. */ unsigned int bufSize; char *buf; int retval; bufSize = strlen(format); buf = NULL; do { /* * Initial allocation of strlen(format) * 2. Should this be tunable? * XXX Yes, this could overflow and spin forever when you get near 2GB * allocations. I don't care. --rrdharan */ va_list args2; bufSize *= 2; buf = realloc(buf, bufSize); if (!buf) { return NULL; } va_copy(args2, arguments); retval = Str_Vsnprintf(buf, bufSize, format, args2); va_end(args2); } while (retval == -1); if (length) { *length = retval; } /* * Try to trim the buffer here to save memory? */ return buf; } /* *----------------------------------------------------------------------------- * * Str_Asprintf -- * * Same as Str_Vasprintf(), but parameters are passed inline --hpreg * * Results: * Same as Str_Vasprintf() * * Side effects: * Same as Str_Vasprintf() * *----------------------------------------------------------------------------- */ char * Str_Asprintf(size_t *length, // OUT const char *format, // IN ...) // IN { va_list arguments; char *result; va_start(arguments, format); result = Str_Vasprintf(length, format, arguments); va_end(arguments); return result; } /* *----------------------------------------------------------------------------- * * strdup -- * * Duplicates a string. * * Results: * A pointer to memory containing the duplicated string or NULL if no * memory was available. * * Side effects: * None * *----------------------------------------------------------------------------- */ char * strdup(const char *source) // IN { char *target = NULL; if (source) { /* * We call our special implementation of malloc() because the users of * strdup() will call free(), and that'll decrement the pointer before * freeing it. Thus, we need to make sure that the allocated block * also stores the block length before the block itself (see malloc() * below). */ unsigned int len = strlen(source); target = malloc(len + 1); if (target) { memcpy(target, source, len + 1); } } return target; } /* *---------------------------------------------------------------------------- * * malloc -- * * Allocate memory using kmalloc. There is no realloc * equivalent, so we roll our own by padding each allocation with * 4 (or 8 for 64 bit guests) extra bytes to store the block length. * * Results: * Pointer to driver heap memory, offset by 4 (or 8) * bytes from the real block pointer. * * Side effects: * None. * *---------------------------------------------------------------------------- */ void * malloc(size_t size) // IN { size_t *ptr; ptr = kmalloc(size + sizeof size, GFP_KERNEL); if (ptr) { *ptr++ = size; } return ptr; } /* *--------------------------------------------------------------------------- * * free -- * * Free memory allocated by a previous call to malloc, calloc or realloc. * * Results: * None. * * Side effects: * Calls kfree to free the real (base) pointer. * *--------------------------------------------------------------------------- */ void free(void *mem) // IN { if (mem) { size_t *dataPtr = (size_t *)mem; kfree(--dataPtr); } } /* *---------------------------------------------------------------------------- * * calloc -- * * Malloc and zero. * * Results: * Pointer to driver heap memory (see malloc, above). * * Side effects: * None. * *---------------------------------------------------------------------------- */ void * calloc(size_t num, // IN size_t len) // IN { size_t size; void *ptr; size = num * len; ptr = malloc(size); if (ptr) { memset(ptr, 0, size); } return ptr; } /* *---------------------------------------------------------------------------- * * realloc -- * * Since the driver heap has no realloc equivalent, we have to roll our * own. Fortunately, we can retrieve the block size of every block we * hand out since we stashed it at allocation time (see malloc above). * * Results: * Pointer to memory block valid for 'newSize' bytes, or NULL if * allocation failed. * * Side effects: * Could copy memory around. * *---------------------------------------------------------------------------- */ void * realloc(void* ptr, // IN size_t newSize) // IN { void *newPtr; size_t *dataPtr; size_t length, lenUsed; dataPtr = (size_t *)ptr; length = ptr ? dataPtr[-1] : 0; if (newSize == 0) { if (ptr) { free(ptr); newPtr = NULL; } else { newPtr = malloc(newSize); } } else if (newSize == length) { newPtr = ptr; } else if ((newPtr = malloc(newSize))) { if (length < newSize) { lenUsed = length; } else { lenUsed = newSize; } memcpy(newPtr, ptr, lenUsed); free(ptr); } return newPtr; } vmci-only/vmciGuestKernelAPI.h0000444000000000000000000000517212025726724015315 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciGuestKernelAPI.h -- * * Kernel API exported from the VMCI guest driver. */ #ifndef __VMCI_GUESTKERNELAPI_H__ #define __VMCI_GUESTKERNELAPI_H__ /* VMCI guest kernel API version number. */ #define VMCI_GUEST_KERNEL_API_VERSION 1 /* Macros to operate on the driver version number. */ #define VMCI_MAJOR_VERSION(v) (((v) >> 16) & 0xffff) #define VMCI_MINOT_VERSION(v) ((v) & 0xffff) #define INCLUDE_ALLOW_MODULE #include "includeCheck.h" #include "vmci_defs.h" #include "vmci_call_defs.h" #if defined(__linux__) || defined(_WIN32) /* XXX TODO for other guests. */ # include "vmci_queue_pair.h" #endif /* VMCI Device Usage API. */ Bool VMCI_DeviceGet(void); void VMCI_DeviceRelease(void); /* VMCI Datagram API. */ int VMCIDatagram_CreateHnd(VMCIId resourceID, uint32 flags, VMCIDatagramRecvCB recvCB, void *clientData, VMCIHandle *outHandle); int VMCIDatagram_DestroyHnd(VMCIHandle handle); int VMCIDatagram_Send(VMCIDatagram *msg); /* VMCI Utility API. */ VMCIId VMCI_GetContextID(void); uint32 VMCI_Version(void); /* VMCI Event API. */ typedef void (*VMCI_EventCB)(VMCIId subID, VMCI_EventData *ed, void *clientData); int VMCIEvent_Subscribe(VMCI_Event event, VMCI_EventCB callback, void *callbackData, VMCIId *subID); int VMCIEvent_Unsubscribe(VMCIId subID); /* VMCI Discovery Service API. */ int VMCIDs_Lookup(const char *name, VMCIHandle *out); #if defined(__linux__) || defined(_WIN32) /* VMCI QueuePair API. XXX TODO for other guests. */ int VMCIQueuePair_Alloc(VMCIHandle *handle, VMCIQueue **produceQ, uint64 produceSize, VMCIQueue **consumeQ, uint64 consumeSize, VMCIId peer, uint32 flags); int VMCIQueuePair_Detach(VMCIHandle handle); #endif #endif /* !__VMCI_GUESTKERNELAPI_H__ */ vmci-only/vmciUtil.h0000444000000000000000000000326112025726724013445 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciUtil.h -- * * Helper functions. */ #ifndef __VMCI_UTIL_H__ #define __VMCI_UTIL_H__ #define INCLUDE_ALLOW_MODULE #include "includeCheck.h" #include "vmciGuestKernelIf.h" #include "vmci_infrastructure.h" #include "vmciGuestKernelAPI.h" #define VMCI_MAJOR_VERSION_NUMBER 1 #define VMCI_MINOR_VERSION_NUMBER 0 #define VMCI_VERSION_NUMBER \ ((VMCI_MAJOR_VERSION_NUMBER << 16) | (VMCI_MINOR_VERSION_NUMBER)) typedef struct VMCIGuestDeviceHandle { void *obj; VMCIObjType objType; } VMCIGuestDeviceHandle; void VMCIUtil_Init(void); void VMCIUtil_Exit(void); Bool VMCIUtil_CheckHostCapabilities(void); Bool VMCI_CheckHostCapabilities(void); Bool VMCI_InInterrupt(void); void VMCI_ReadDatagramsFromPort(VMCIIoHandle ioHandle, VMCIIoPort dgInPort, uint8 *dgInBuffer, size_t dgInBufferSize); #endif //__VMCI_UTIL_H__ vmci-only/vmciUtil.c0000444000000000000000000003435412025726724013447 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciUtil.c * * Small utility function for allocating kernel memory and copying data. * */ #ifdef __linux__ # include "driver-config.h" # define EXPORT_SYMTAB # include # include # include "compat_kernel.h" # include "compat_slab.h" # include "compat_wait.h" # include "compat_interrupt.h" #elif defined(_WIN32) # ifndef WINNT_DDK # error This file only works with the NT ddk # endif // WINNT_DDK # include #elif defined(SOLARIS) # include # include # include #else #error "platform not supported." #endif //linux #define LGPFX "VMCIUtil: " #include "vmware.h" #include "vm_atomic.h" #include "vmci_defs.h" #include "vmci_kernel_if.h" #include "vmciGuestKernelIf.h" #include "vmciInt.h" #include "vmciProcess.h" #include "vmciDatagram.h" #include "vmciUtil.h" #include "vmciEvent.h" static void VMCIUtilCidUpdate(VMCIId subID, VMCI_EventData *eventData, void *clientData); static VMCIId ctxUpdateSubID = VMCI_INVALID_ID; static Atomic_uint32 vmContextID = { VMCI_INVALID_ID }; /* *----------------------------------------------------------------------------- * * VMCIUtil_Init -- * * Subscribe to context id update event. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCIUtil_Init(void) { /* * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can update the * internal context id when needed. */ if (VMCIEvent_Subscribe(VMCI_EVENT_CTX_ID_UPDATE, VMCIUtilCidUpdate, NULL, &ctxUpdateSubID) < VMCI_SUCCESS) { VMCI_LOG(("VMCIUtil: Failed to subscribe to event %d.\n", VMCI_EVENT_CTX_ID_UPDATE)); } } /* *----------------------------------------------------------------------------- * * VMCIUtil_Exit -- * * Cleanup * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCIUtil_Exit(void) { if (VMCIEvent_Unsubscribe(ctxUpdateSubID) < VMCI_SUCCESS) { VMCI_LOG(("VMCIUtil: Failed to unsubscribe to event %d with subscriber " "id %d.\n", VMCI_EVENT_CTX_ID_UPDATE, ctxUpdateSubID)); } } /* *----------------------------------------------------------------------------- * * VMCIUtilCidUpdate -- * * Gets called with the new context id if updated or resumed. * * Results: * Context id. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static void VMCIUtilCidUpdate(VMCIId subID, // IN: VMCI_EventData *eventData, // IN: void *clientData) // IN: { VMCIEventPayload_Context *evPayload = VMCIEventDataPayload(eventData); if (subID != ctxUpdateSubID) { VMCI_LOG(("VMCIUtil: Invalid subscriber id. %d.\n", subID)); return; } if (eventData == NULL || evPayload->contextID == VMCI_INVALID_ID) { VMCI_LOG(("VMCIUtil: Invalid event data.\n")); return; } VMCI_LOG(("VMCIUtil: Updating context id from 0x%x to 0x%x on event %d.\n", Atomic_Read(&vmContextID), evPayload->contextID, eventData->event)); Atomic_Write(&vmContextID, evPayload->contextID); } /* *----------------------------------------------------------------------------- * * VMCIUtil_CheckHostCapabilities -- * * Verify that the host supports the hypercalls we need. If it does not, * try to find fallback hypercalls and use those instead. * * Results: * TRUE if required hypercalls (or fallback hypercalls) are * supported by the host, FALSE otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ #define VMCI_UTIL_NUM_RESOURCES 1 Bool VMCIUtil_CheckHostCapabilities(void) { int result; VMCIResourcesQueryMsg *msg; uint32 msgSize = sizeof(VMCIResourcesQueryHdr) + VMCI_UTIL_NUM_RESOURCES * sizeof(VMCI_Resource); VMCIDatagram *checkMsg = VMCI_AllocKernelMem(msgSize, VMCI_MEMORY_NONPAGED); if (checkMsg == NULL) { VMCI_LOG((LGPFX"Check host: Insufficient memory.\n")); return FALSE; } checkMsg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_RESOURCES_QUERY); checkMsg->src = VMCI_ANON_SRC_HANDLE; checkMsg->payloadSize = msgSize - VMCI_DG_HEADERSIZE; msg = (VMCIResourcesQueryMsg *)VMCI_DG_PAYLOAD(checkMsg); msg->numResources = VMCI_UTIL_NUM_RESOURCES; msg->resources[0] = VMCI_GET_CONTEXT_ID; result = VMCI_SendDatagram(checkMsg); VMCI_FreeKernelMem(checkMsg, msgSize); /* We need the vector. There are no fallbacks. */ return (result == 0x1); } /* *----------------------------------------------------------------------------- * * VMCI_GetContextID -- * * Returns the context id. * * Results: * Context id. * * Side effects: * None. * *----------------------------------------------------------------------------- */ #ifdef __linux__ EXPORT_SYMBOL(VMCI_GetContextID); #endif VMCIId VMCI_GetContextID(void) { if (Atomic_Read(&vmContextID) == VMCI_INVALID_ID) { uint32 result; VMCIDatagram getCidMsg; getCidMsg.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_GET_CONTEXT_ID); getCidMsg.src = VMCI_ANON_SRC_HANDLE; getCidMsg.payloadSize = 0; result = VMCI_SendDatagram(&getCidMsg); Atomic_Write(&vmContextID, result); } return Atomic_Read(&vmContextID); } /* *----------------------------------------------------------------------------- * * VMCI_CheckHostCapabilities -- * * Tell host which guestcalls we support and let each API check * that the host supports the hypercalls it needs. If a hypercall * is not supported, the API can check for a fallback hypercall, * or fail the check. * * Results: * TRUE if successful, FALSE otherwise. * * Side effects: * Fallback mechanisms may be enabled in the API and vmmon. * *----------------------------------------------------------------------------- */ Bool VMCI_CheckHostCapabilities(void) { Bool result = VMCIEvent_CheckHostCapabilities(); result &= VMCIProcess_CheckHostCapabilities(); result &= VMCIDatagram_CheckHostCapabilities(); result &= VMCIUtil_CheckHostCapabilities(); VMCI_LOG((LGPFX"Host capability check: %s\n", result ? "PASSED" : "FAILED")); return result; } /* *---------------------------------------------------------------------- * * VMCI_Version -- * * Returns the version of the VMCI guest driver. * * Results: * Returns a version number. * * Side effects: * None. * *---------------------------------------------------------------------- */ #ifdef __linux__ EXPORT_SYMBOL(VMCI_Version); #endif uint32 VMCI_Version() { return VMCI_VERSION_NUMBER; } /* *---------------------------------------------------------------------- * * VMCI_InInterrupt -- * * Determines if we are running in tasklet/dispatch level or above. * * Results: * TRUE if tasklet/dispatch or above, FALSE otherwise. * * Side effects: * None. * *---------------------------------------------------------------------- */ Bool VMCI_InInterrupt() { #if defined(_WIN32) return KeGetCurrentIrql() >= DISPATCH_LEVEL; #elif defined(__linux__) return in_interrupt(); #elif defined(SOLARIS) return servicing_interrupt(); /* servicing_interrupt is not part of DDI. */ #endif // } /* *---------------------------------------------------------------------- * * VMCI_DeviceGet -- * * Verifies that a valid VMCI device is present, and indicates * the callers intention to use the device until it calls * VMCI_DeviceRelease(). * * Results: * TRUE if a valid VMCI device is present, FALSE otherwise. * * Side effects: * None. * *---------------------------------------------------------------------- */ #ifdef __linux__ EXPORT_SYMBOL(VMCI_DeviceGet); #endif Bool VMCI_DeviceGet(void) { return VMCI_DeviceEnabled(); } /* *---------------------------------------------------------------------- * * VMCI_DeviceRelease -- * * Indicates that the caller is done using the VMCI device. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ #ifdef __linux__ EXPORT_SYMBOL(VMCI_DeviceRelease); #endif void VMCI_DeviceRelease(void) { } /* *---------------------------------------------------------------------- * * VMCI_ReadDatagramsFromPort -- * * Reads datagrams from the data in port and dispatches them. We * always start reading datagrams into only the first page of the * datagram buffer. If the datagrams don't fit into one page, we * use the maximum datagram buffer size for the remainder of the * invocation. This is a simple heuristic for not penalizing * small datagrams. * * This function assumes that it has exclusive access to the data * in port for the duration of the call. * * Results: * No result. * * Side effects: * Datagram handlers may be invoked. * *---------------------------------------------------------------------- */ void VMCI_ReadDatagramsFromPort(VMCIIoHandle ioHandle, // IN VMCIIoPort dgInPort, // IN uint8 *dgInBuffer, // IN size_t dgInBufferSize) // IN { VMCIDatagram *dg; size_t currentDgInBufferSize = PAGE_SIZE; size_t remainingBytes; ASSERT(dgInBufferSize >= PAGE_SIZE); VMCI_ReadPortBytes(ioHandle, dgInPort, dgInBuffer, currentDgInBufferSize); dg = (VMCIDatagram *)dgInBuffer; remainingBytes = currentDgInBufferSize; while (dg->dst.resource != VMCI_ERROR_INVALID_RESOURCE || remainingBytes > PAGE_SIZE) { unsigned dgInSize; /* * When the input buffer spans multiple pages, a datagram can * start on any page boundary in the buffer. */ if (dg->dst.resource == VMCI_ERROR_INVALID_RESOURCE) { ASSERT(remainingBytes > PAGE_SIZE); dg = (VMCIDatagram *)ROUNDUP((uintptr_t)dg + 1, PAGE_SIZE); ASSERT((uint8 *)dg < dgInBuffer + currentDgInBufferSize); remainingBytes = (size_t)(dgInBuffer + currentDgInBufferSize - (uint8 *)dg); continue; } dgInSize = VMCI_DG_SIZE_ALIGNED(dg); if (dgInSize <= dgInBufferSize) { int result; /* * If the remaining bytes in the datagram buffer doesn't * contain the complete datagram, we first make sure we have * enough room for it and then we read the reminder of the * datagram and possibly any following datagrams. */ if (dgInSize > remainingBytes) { if (remainingBytes != currentDgInBufferSize) { /* * We move the partial datagram to the front and read * the reminder of the datagram and possibly following * calls into the following bytes. */ memmove(dgInBuffer, dgInBuffer + currentDgInBufferSize - remainingBytes, remainingBytes); dg = (VMCIDatagram *)dgInBuffer; } if (currentDgInBufferSize != dgInBufferSize) { currentDgInBufferSize = dgInBufferSize; } VMCI_ReadPortBytes(ioHandle, dgInPort, dgInBuffer + remainingBytes, currentDgInBufferSize - remainingBytes); } /* We special case event datagrams from the hypervisor. */ if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && dg->dst.resource == VMCI_EVENT_HANDLER) { result = VMCIEvent_Dispatch(dg); } else { result = VMCIDatagram_Dispatch(dg->src.context, dg); } if (result < VMCI_SUCCESS) { VMCI_LOG(("Datagram with resource %d failed with err %x.\n", dg->dst.resource, result)); } /* On to the next datagram. */ dg = (VMCIDatagram *)((uint8 *)dg + dgInSize); } else { size_t bytesToSkip; /* * Datagram doesn't fit in datagram buffer of maximal size. We drop it. */ VMCI_LOG(("Failed to receive datagram of size %u.\n", dgInSize)); bytesToSkip = dgInSize - remainingBytes; if (currentDgInBufferSize != dgInBufferSize) { currentDgInBufferSize = dgInBufferSize; } for (;;) { VMCI_ReadPortBytes(ioHandle, dgInPort, dgInBuffer, currentDgInBufferSize); if (bytesToSkip <= currentDgInBufferSize) { break; } bytesToSkip -= currentDgInBufferSize; } dg = (VMCIDatagram *)(dgInBuffer + bytesToSkip); } remainingBytes = (size_t) (dgInBuffer + currentDgInBufferSize - (uint8 *)dg); if (remainingBytes < VMCI_DG_HEADERSIZE) { /* Get the next batch of datagrams. */ VMCI_ReadPortBytes(ioHandle, dgInPort, dgInBuffer, currentDgInBufferSize); dg = (VMCIDatagram *)dgInBuffer; remainingBytes = currentDgInBufferSize; } } } vmci-only/vmciEvent.c0000444000000000000000000002713712025726724013614 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciEvent.c -- * * VMCI Event code for host and guests. */ #if defined(__linux__) && !defined(VMKERNEL) # include "driver-config.h" # define EXPORT_SYMTAB # include # include "compat_kernel.h" #endif // __linux__ #include "vmci_defs.h" #include "vmci_kernel_if.h" #include "vmci_infrastructure.h" #include "vmciEvent.h" #ifdef VMX86_TOOLS # include "vmciInt.h" # include "vmciGuestKernelAPI.h" # include "vmciUtil.h" #else # include "vmciDriver.h" #endif #include "circList.h" #ifdef VMKERNEL # include "vm_libc.h" #endif #define EVENT_MAGIC 0xEABE0000 typedef struct VMCISubscription { VMCIId id; VMCI_Event event; VMCI_EventCB callback; void *callbackData; ListItem subscriberListItem; } VMCISubscription; typedef struct VMCISubscriptionItem { ListItem listItem; VMCISubscription sub; } VMCISubscriptionItem; static VMCISubscription *VMCIEventFind(VMCIId subID); static int VMCIEventRegisterSubscription(VMCISubscription *sub, VMCI_Event event, VMCI_EventCB callback, void *callbackData); static VMCISubscription *VMCIEventUnregisterSubscription(VMCIId subID); /* * In the guest, VMCI events are dispatched from interrupt context, so * the locks need to be bottom half safe. In the host kernel, this * isn't so, and regular locks are used instead. */ #ifdef VMX86_TOOLS #define VMCIEventInitLock(_lock, _name) VMCI_InitLock(_lock, _name, VMCI_LOCK_RANK_MIDDLE_BH) #define VMCIEventGrabLock(_lock, _flags) VMCI_GrabLock_BH(_lock, _flags) #define VMCIEventReleaseLock(_lock, _flags) VMCI_ReleaseLock_BH(_lock, _flags) #else #define VMCIEventInitLock(_lock, _name) VMCI_InitLock(_lock, _name, VMCI_LOCK_RANK_HIGH) #define VMCIEventGrabLock(_lock, _flags) VMCI_GrabLock(_lock, _flags) #define VMCIEventReleaseLock(_lock, _flags) VMCI_ReleaseLock(_lock, _flags) #endif static ListItem *subscriberArray[VMCI_EVENT_MAX] = {NULL}; static VMCILock subscriberLock; /* *---------------------------------------------------------------------- * * VMCIEvent_Init -- * * General init code. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void VMCIEvent_Init(void) { VMCIEventInitLock(&subscriberLock, "VMCIEventSubscriberLock"); } /* *---------------------------------------------------------------------- * * VMCIEvent_Exit -- * * General exit code. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void VMCIEvent_Exit(void) { VMCILockFlags flags; ListItem *iter, *iter2; VMCI_Event e; /* We free all memory at exit. */ VMCIEventGrabLock(&subscriberLock, &flags); for (e = 0; e < VMCI_EVENT_MAX; e++) { LIST_SCAN_SAFE(iter, iter2, subscriberArray[e]) { VMCISubscription *cur = LIST_CONTAINER(iter, VMCISubscription, subscriberListItem); VMCI_FreeKernelMem(cur, sizeof *cur); } subscriberArray[e] = NULL; } VMCIEventReleaseLock(&subscriberLock, flags); VMCI_CleanupLock(&subscriberLock); } #ifdef VMX86_TOOLS /* *----------------------------------------------------------------------------- * * VMCIEvent_CheckHostCapabilities -- * * Verify that the host supports the hypercalls we need. If it does not, * try to find fallback hypercalls and use those instead. * * Results: * TRUE if required hypercalls (or fallback hypercalls) are * supported by the host, FALSE otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ Bool VMCIEvent_CheckHostCapabilities(void) { /* VMCIEvent does not require any hypercalls. */ return TRUE; } #endif /* *----------------------------------------------------------------------------- * * VMCIEventFind -- * * Find entry. Assumes lock is held. * * Results: * Entry if found, NULL if not. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static VMCISubscription * VMCIEventFind(VMCIId subID) // IN { ListItem *iter; VMCI_Event e; for (e = 0; e < VMCI_EVENT_MAX; e++) { LIST_SCAN(iter, subscriberArray[e]) { VMCISubscription *cur = LIST_CONTAINER(iter, VMCISubscription, subscriberListItem); if (cur->id == subID) { return cur; } } } return NULL; } /* *---------------------------------------------------------------------- * * VMCIEvent_Dispatch -- * * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all * subscribers for given event. * * Results: * VMCI_SUCCESS on success, error code otherwise. * * Side effects: * None. * *---------------------------------------------------------------------- */ int VMCIEvent_Dispatch(VMCIDatagram *msg) // IN { ListItem *iter; VMCILockFlags flags; VMCIEventMsg *eventMsg = (VMCIEventMsg *)msg; ASSERT(msg && msg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && msg->dst.resource == VMCI_EVENT_HANDLER); if (msg->payloadSize < sizeof(VMCI_Event) || msg->payloadSize > sizeof(VMCIEventData_Max)) { return VMCI_ERROR_INVALID_ARGS; } if (eventMsg->eventData.event >= VMCI_EVENT_MAX) { return VMCI_ERROR_EVENT_UNKNOWN; } VMCIEventGrabLock(&subscriberLock, &flags); LIST_SCAN(iter, subscriberArray[eventMsg->eventData.event]) { uint8 eventPayload[sizeof(VMCIEventData_Max)]; VMCI_EventData *ed; VMCISubscription *cur = LIST_CONTAINER(iter, VMCISubscription, subscriberListItem); ASSERT(cur && cur->event == eventMsg->eventData.event); /* We set event data before each callback to ensure isolation. */ memset(eventPayload, 0, sizeof eventPayload); memcpy(eventPayload, VMCI_DG_PAYLOAD(eventMsg), (size_t)eventMsg->hdr.payloadSize); ed = (VMCI_EventData *)eventPayload; cur->callback(cur->id, ed, cur->callbackData); } VMCIEventReleaseLock(&subscriberLock, flags); return VMCI_SUCCESS; } /* *---------------------------------------------------------------------- * * VMCIEventRegisterSubscription -- * * Initialize and add subscription to subscriber list. * * Results: * VMCI_SUCCESS on success, error code otherwise. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int VMCIEventRegisterSubscription(VMCISubscription *sub, // IN VMCI_Event event, // IN VMCI_EventCB callback, // IN void *callbackData) // IN { # define VMCI_EVENT_MAX_ATTEMPTS 10 static VMCIId subscriptionID = 0; VMCILockFlags flags; uint32 attempts = 0; int result; Bool success; ASSERT(sub); if (event >= VMCI_EVENT_MAX || callback == NULL) { VMCI_LOG(("VMCIEvent: Failed to subscribe to event %d cb %p data %p.\n", event, callback, callbackData)); return VMCI_ERROR_INVALID_ARGS; } sub->event = event; sub->callback = callback; sub->callbackData = callbackData; VMCIEventGrabLock(&subscriberLock, &flags); ASSERT(subscriberArray); for (success = FALSE, attempts = 0; success == FALSE && attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) { /* * We try to get an id a couple of time before claiming we are out of * resources. */ sub->id = ++subscriptionID; /* Test for duplicate id. */ if (VMCIEventFind(sub->id) == NULL) { /* We succeeded if we didn't find a duplicate. */ success = TRUE; } } if (success) { LIST_QUEUE(&sub->subscriberListItem, &subscriberArray[event]); result = VMCI_SUCCESS; } else { result = VMCI_ERROR_NO_RESOURCES; } VMCIEventReleaseLock(&subscriberLock, flags); return result; # undef VMCI_EVENT_MAX_ATTEMPTS } /* *---------------------------------------------------------------------- * * VMCIEventUnregisterSubscription -- * * Remove subscription from subscriber list. * * Results: * VMCISubscription when found, NULL otherwise. * * Side effects: * None. * *---------------------------------------------------------------------- */ static VMCISubscription * VMCIEventUnregisterSubscription(VMCIId subID) // IN { VMCILockFlags flags; VMCISubscription *s; VMCIEventGrabLock(&subscriberLock, &flags); s = VMCIEventFind(subID); if (s != NULL) { LIST_DEL(&s->subscriberListItem, &subscriberArray[s->event]); } VMCIEventReleaseLock(&subscriberLock, flags); return s; } /* *---------------------------------------------------------------------- * * VMCIEvent_Subscribe -- * * Subscribe to given event. * * Results: * VMCI_SUCCESS on success, error code otherwise. * * Side effects: * None. * *---------------------------------------------------------------------- */ #if defined(__linux__) && !defined(VMKERNEL) EXPORT_SYMBOL(VMCIEvent_Subscribe); #endif int VMCIEvent_Subscribe(VMCI_Event event, // IN VMCI_EventCB callback, // IN void *callbackData, // IN VMCIId *subscriptionID) // OUT { int retval; VMCISubscription *s = NULL; if (subscriptionID == NULL) { VMCI_LOG(("VMCIEvent: Invalid arguments.\n")); return VMCI_ERROR_INVALID_ARGS; } s = VMCI_AllocKernelMem(sizeof *s, VMCI_MEMORY_NONPAGED); if (s == NULL) { return VMCI_ERROR_NO_MEM; } retval = VMCIEventRegisterSubscription(s, event, callback, callbackData); if (retval < VMCI_SUCCESS) { VMCI_FreeKernelMem(s, sizeof *s); return retval; } *subscriptionID = s->id; return retval; } /* *---------------------------------------------------------------------- * * VMCIEvent_Unsubscribe -- * * Unsubscribe to given event. Removes it from list and frees it. * Will return callbackData if requested by caller. * * Results: * VMCI_SUCCESS on success, error code otherwise. * * Side effects: * None. * *---------------------------------------------------------------------- */ #if defined(__linux__) && !defined(VMKERNEL) EXPORT_SYMBOL(VMCIEvent_Unsubscribe); #endif int VMCIEvent_Unsubscribe(VMCIId subID) // IN { VMCISubscription *s; /* * Return subscription. At this point we know noone else is accessing * the subscription so we can free it. */ s = VMCIEventUnregisterSubscription(subID); if (s == NULL) { return VMCI_ERROR_NOT_FOUND; } VMCI_FreeKernelMem(s, sizeof *s); return VMCI_SUCCESS; } vmci-only/vmciEvent.h0000444000000000000000000000322412025726724013610 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciEvent.h -- * * Event code for the vmci guest driver */ #ifndef __VMCI_EVENT_H__ #define __VMCI_EVENT_H__ #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" #include "vmci_defs.h" #include "vmci_call_defs.h" void VMCIEvent_Init(void); void VMCIEvent_Exit(void); int VMCIEvent_Dispatch(VMCIDatagram *msg); #ifdef VMX86_TOOLS Bool VMCIEvent_CheckHostCapabilities(void); #else /* * Public VMCI Event API for host kernel. */ typedef void (*VMCI_EventCB)(VMCIId subID, VMCI_EventData *ed, void *clientData); int VMCIEvent_Subscribe(VMCI_Event event, VMCI_EventCB callback, void *callbackData, VMCIId *subID); int VMCIEvent_Unsubscribe(VMCIId subID); #endif #endif //__VMCI_EVENT_H__ vmci-only/vmciProcess.h0000444000000000000000000000311512025726724014144 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciProcess.h -- * * Process code for the Linux guest driver */ #ifndef __VMCI_PROCESS_H__ #define __VMCI_PROCESS_H__ #define INCLUDE_ALLOW_MODULE #include "includeCheck.h" #include "vm_basic_types.h" #include "vmci_defs.h" #include "vmci_handle_array.h" #include "circList.h" typedef struct VMCIProcess { ListItem listItem; /* For global process list. */ VMCIId pid; /* Process id. */ } VMCIProcess; void VMCIProcess_Init(void); void VMCIProcess_Exit(void); Bool VMCIProcess_CheckHostCapabilities(void); int VMCIProcess_Create(VMCIProcess **outProcess, int eventHnd); void VMCIProcess_Destroy(VMCIProcess *process); VMCIProcess *VMCIProcess_Get(VMCIId processID); #endif //__VMCI_PROCESS_H__ vmci-only/vmciProcess.c0000444000000000000000000001160612025726724014143 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciProcess.c -- * * VMCI Process code for guest driver. */ #ifdef __linux__ # include "driver-config.h" # define EXPORT_SYMTAB # include # include "compat_kernel.h" # include "compat_pci.h" #endif // __linux__ #include "vmciInt.h" #include "vmci_defs.h" #include "vmci_kernel_if.h" #include "vmciProcess.h" #include "vmciDatagram.h" #include "vmci_infrastructure.h" #include "circList.h" #include "vmciUtil.h" #include "vmciGuestKernelAPI.h" static ListItem *processList = NULL; static VMCILock processLock; /* *---------------------------------------------------------------------- * * VMCIProcess_Init -- * * General init code. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void VMCIProcess_Init(void) { VMCI_InitLock(&processLock, "VMCIProcessListLock", VMCI_LOCK_RANK_HIGH); } /* *---------------------------------------------------------------------- * * VMCIProcess_Exit -- * * General init code. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void VMCIProcess_Exit(void) { VMCI_CleanupLock(&processLock); } /* *----------------------------------------------------------------------------- * * VMCIProcess_CheckHostCapabilities -- * * Verify that the host supports the hypercalls we need. If it does not, * try to find fallback hypercalls and use those instead. * * Results: * TRUE if required hypercalls (or fallback hypercalls) are * supported by the host, FALSE otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ Bool VMCIProcess_CheckHostCapabilities(void) { /* VMCIProcess does not require any hypercalls. */ return TRUE; } /* *---------------------------------------------------------------------- * * VMCIProcess_Create -- * * Creates a new VMCI process. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ int VMCIProcess_Create(VMCIProcess **outProcess, // IN int eventHnd) // IN { VMCIProcess *process; VMCILockFlags flags; process = VMCI_AllocKernelMem(sizeof *process, VMCI_MEMORY_NONPAGED); if (process == NULL) { return VMCI_ERROR_NO_MEM; } process->pid = (VMCIId)(uintptr_t)process >> 1; VMCI_GrabLock(&processLock, &flags); LIST_QUEUE(&process->listItem, &processList); VMCI_ReleaseLock(&processLock, flags); *outProcess = process; return 0; } /* *---------------------------------------------------------------------- * * VMCIProcess_Destroy -- * * Destroys a VMCI process. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void VMCIProcess_Destroy(VMCIProcess *process) { VMCILockFlags flags; /* Dequeue process. */ VMCI_GrabLock(&processLock, &flags); LIST_DEL(&process->listItem, &processList); VMCI_ReleaseLock(&processLock, flags); VMCI_FreeKernelMem(process, sizeof *process); } /* *---------------------------------------------------------------------- * * VMCIProcess_Get -- * * Get the process corresponding to the pid. * * Results: * VMCI process on success, NULL otherwise. * * Side effects: * None. * *---------------------------------------------------------------------- */ VMCIProcess * VMCIProcess_Get(VMCIId processID) // IN { VMCIProcess *process = NULL; ListItem *next; VMCILockFlags flags; VMCI_GrabLock(&processLock, &flags); if (processList == NULL) { goto out; } LIST_SCAN(next, processList) { process = LIST_CONTAINER(next, VMCIProcess, listItem); if (process->pid == processID) { break; } } out: VMCI_ReleaseLock(&processLock, flags); return (process && process->pid == processID) ? process : NULL; } vmci-only/vmciDatagram.h0000444000000000000000000000403112025726724014244 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciDatagram.h -- * * Simple Datagram API for the Linux guest driver. */ #ifndef __VMCI_DATAGRAM_H__ #define __VMCI_DATAGRAM_H__ #define INCLUDE_ALLOW_MODULE #include "includeCheck.h" #include "vmci_defs.h" #include "vmci_kernel_if.h" #include "vmci_infrastructure.h" #include "circList.h" #include "vmciGuestKernelAPI.h" #include "vmci_iocontrols.h" typedef struct DatagramQueueEntry { ListItem listItem; /* For queuing. */ VMCIDatagram *dg; /* Pending datagram. */ } DatagramQueueEntry; typedef struct VMCIDatagramProcess { VMCILock datagramQueueLock; VMCIHandle handle; VMCIHost host; uint32 pendingDatagrams; size_t datagramQueueSize; ListItem *datagramQueue; } VMCIDatagramProcess; void VMCIDatagram_Init(void); Bool VMCIDatagram_CheckHostCapabilities(void); int VMCIDatagram_Dispatch(VMCIId contextID, VMCIDatagram *msg); int VMCIDatagramProcess_Create(VMCIDatagramProcess **outDgmProc, VMCIDatagramCreateInfo *createInfo); void VMCIDatagramProcess_Destroy(VMCIDatagramProcess *dgmProc); int VMCIDatagramProcess_ReadCall(VMCIDatagramProcess *dgmProc, size_t maxSize, VMCIDatagram **dg); #endif //__VMCI_DATAGRAM_H__ vmci-only/vmciDatagram.c0000444000000000000000000006005412025726724014246 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciDatagram.c -- * * Simple Datagram API for the Linux guest driver. */ #ifdef __linux__ # include "driver-config.h" # define EXPORT_SYMTAB # include # include "compat_kernel.h" # include "compat_pci.h" #elif defined(_WIN32) # include #elif defined(SOLARIS) # include # include #else # error "Platform not support by VMCI datagram API." #endif // linux #include "vm_basic_types.h" #include "vm_assert.h" #include "vmci_defs.h" #include "vmci_kernel_if.h" #include "vmci_infrastructure.h" #include "vmciInt.h" #include "vmciUtil.h" #include "vmciDatagram.h" typedef struct DatagramHashEntry { struct DatagramHashEntry *next; int refCount; VMCIHandle handle; uint32 flags; VMCIDatagramRecvCB recvCB; void *clientData; VMCIEvent destroyEvent; } DatagramHashEntry; #define HASH_TABLE_SIZE 64 /* * Hash table containing all the datagram handles for this VM. It is * synchronized using a single lock but we should consider making it more * fine grained, e.g. a per bucket lock or per set of buckets' lock. */ typedef struct DatagramHashTable { VMCILock lock; DatagramHashEntry *entries[HASH_TABLE_SIZE]; } DatagramHashTable; static int DatagramReleaseCB(void *clientData); static int DatagramHashAddEntry(DatagramHashEntry *entry, VMCIId contextID); static int DatagramHashRemoveEntry(VMCIHandle handle); static DatagramHashEntry *DatagramHashGetEntry(VMCIHandle handle); static void DatagramHashReleaseEntry(DatagramHashEntry *entry); static Bool DatagramHandleUniqueLocked(VMCIHandle handle); static int DatagramProcessNotify(void *clientData, VMCIDatagram *msg); DatagramHashTable hashTable; /* *------------------------------------------------------------------------------ * * DatagramReleaseCB -- * * Callback to release the datagram entry reference. It is called by the * VMCI_WaitOnEvent function before it blocks. * * Result: * None. * *------------------------------------------------------------------------------ */ static int DatagramReleaseCB(void *clientData) { DatagramHashEntry *entry = (DatagramHashEntry *)clientData; ASSERT(entry); DatagramHashReleaseEntry(entry); return 0; } /* *------------------------------------------------------------------------- * * DatagramHashAddEntry -- * Given a datagram handle entry, adds it to the hashtable of datagram * entries. Allocates a resource id iff the handle of the given entry * is an invalid one. 0 through VMCI_RESERVED_RESOURCE_ID_MAX are * reserved resource ids. * * Result: * VMCI_SUCCESS if added, error if not. * *------------------------------------------------------------------------- */ static int DatagramHashAddEntry(DatagramHashEntry *entry, // IN: VMCIId contextID) // IN: { int idx; VMCILockFlags flags; static VMCIId datagramRID = VMCI_RESERVED_RESOURCE_ID_MAX + 1; ASSERT(entry && contextID != VMCI_INVALID_ID); VMCI_GrabLock_BH(&hashTable.lock, &flags); if (!VMCI_HANDLE_INVALID(entry->handle) && !DatagramHandleUniqueLocked(entry->handle)) { VMCI_ReleaseLock_BH(&hashTable.lock, flags); return VMCI_ERROR_DUPLICATE_ENTRY; } else if (VMCI_HANDLE_INVALID(entry->handle)) { VMCIId oldRID = datagramRID; VMCIHandle handle; Bool foundRID = FALSE; /* * Generate a unique datagram rid. Keep on trying until we wrap around * in the RID space. */ ASSERT(oldRID > VMCI_RESERVED_RESOURCE_ID_MAX); do { handle = VMCI_MAKE_HANDLE(contextID, datagramRID); foundRID = DatagramHandleUniqueLocked(handle); datagramRID++; if (UNLIKELY(!datagramRID)) { /* * Skip the reserved rids. */ datagramRID = VMCI_RESERVED_RESOURCE_ID_MAX + 1; } } while (!foundRID && datagramRID != oldRID); if (LIKELY(foundRID)) { entry->handle = handle; } else { /* * We wrapped around --- no rids were free. */ ASSERT(datagramRID == oldRID); VMCI_ReleaseLock_BH(&hashTable.lock, flags); return VMCI_ERROR_NO_HANDLE; } } ASSERT(!VMCI_HANDLE_INVALID(entry->handle)); idx = VMCI_Hash(entry->handle, HASH_TABLE_SIZE); /* New entry is added to top/front of hash bucket. */ entry->refCount++; entry->next = hashTable.entries[idx]; hashTable.entries[idx] = entry; VMCI_ReleaseLock_BH(&hashTable.lock, flags); return VMCI_SUCCESS; } /* *------------------------------------------------------------------------- * * DatagramHashRemoveEntry -- * * Result: * VMCI_SUCCESS if removed, VMCI_ERROR_NO_HANDLE if not found. * *------------------------------------------------------------------------- */ static int DatagramHashRemoveEntry(VMCIHandle handle) { int result = VMCI_ERROR_NOT_FOUND; VMCILockFlags flags; DatagramHashEntry *prev, *cur; int idx = VMCI_Hash(handle, HASH_TABLE_SIZE); prev = NULL; VMCI_GrabLock_BH(&hashTable.lock, &flags); cur = hashTable.entries[idx]; while (TRUE) { if (cur == NULL) { break; } if (VMCI_HANDLE_EQUAL(cur->handle, handle)) { /* Remove entry and break. */ if (prev) { prev->next = cur->next; } else { hashTable.entries[idx] = cur->next; } cur->refCount--; /* * We know that DestroyHnd still has a reference so refCount must be * at least 1. */ ASSERT(cur->refCount > 0); result = VMCI_SUCCESS; break; } prev = cur; cur = cur->next; } VMCI_ReleaseLock_BH(&hashTable.lock, flags); return result; } /* *------------------------------------------------------------------------- * * DatagramHashGetEntry -- * * Result: * None. * *------------------------------------------------------------------------- */ static DatagramHashEntry * DatagramHashGetEntry(VMCIHandle handle) { VMCILockFlags flags; DatagramHashEntry *cur; int idx = VMCI_Hash(handle, HASH_TABLE_SIZE); VMCI_GrabLock_BH(&hashTable.lock, &flags); for (cur = hashTable.entries[idx]; cur != NULL; cur = cur->next) { if (VMCI_HANDLE_EQUAL(cur->handle, handle)) { cur->refCount++; break; } } VMCI_ReleaseLock_BH(&hashTable.lock, flags); return cur; } /* *------------------------------------------------------------------------- * * DatagramHashReleaseEntry -- * * Result: * None. * *------------------------------------------------------------------------- */ static void DatagramHashReleaseEntry(DatagramHashEntry *entry) { VMCILockFlags flags; VMCI_GrabLock_BH(&hashTable.lock, &flags); entry->refCount--; /* Check if this is last reference and signal the destroy event if so. */ if (entry->refCount == 0) { VMCI_SignalEvent(&entry->destroyEvent); } VMCI_ReleaseLock_BH(&hashTable.lock, flags); } /* *------------------------------------------------------------------------------ * * DatagramHandleUniqueLocked -- * * Checks whether the given handle is already in the hash * table. Assumes that the caller to have the hash table lock. * * Result: * None. * *------------------------------------------------------------------------------ */ static Bool DatagramHandleUniqueLocked(VMCIHandle handle) { Bool unique = TRUE; DatagramHashEntry *entry; int idx = VMCI_Hash(handle, HASH_TABLE_SIZE); entry = hashTable.entries[idx]; while (entry) { if (VMCI_HANDLE_EQUAL(entry->handle, handle)) { unique = FALSE; break; } entry = entry->next; } return unique; } /* *----------------------------------------------------------------------------- * * VMCIDatagram_CreateHnd -- * * Creates a datagram endpoint and returns a handle to it. * * Results: * Returns handle if success, negative errno value otherwise. * * Side effects: * Datagram endpoint is created both in guest and on host. * *----------------------------------------------------------------------------- */ #ifdef __linux__ EXPORT_SYMBOL(VMCIDatagram_CreateHnd); #endif int VMCIDatagram_CreateHnd(VMCIId resourceID, // IN: uint32 flags, // IN: VMCIDatagramRecvCB recvCB, // IN: void *clientData, // IN: VMCIHandle *outHandle) // OUT: { int result; DatagramHashEntry *entry; VMCIHandle handle; VMCIId contextID = VMCI_GetContextID(); if (!recvCB || !outHandle) { return VMCI_ERROR_INVALID_ARGS; } /* Validate contextID. */ if (contextID == VMCI_INVALID_ID) { return VMCI_ERROR_NO_RESOURCES; } if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0) { VMCIDatagramWellKnownMapMsg wkMsg; if (resourceID == VMCI_INVALID_ID) { return VMCI_ERROR_INVALID_ARGS; } wkMsg.hdr.dst.context = VMCI_HYPERVISOR_CONTEXT_ID; wkMsg.hdr.dst.resource = VMCI_DATAGRAM_REQUEST_MAP; wkMsg.hdr.src = VMCI_ANON_SRC_HANDLE; wkMsg.hdr.payloadSize = sizeof wkMsg - VMCI_DG_HEADERSIZE; wkMsg.wellKnownID = resourceID; result = VMCI_SendDatagram((VMCIDatagram *)&wkMsg); if (result < VMCI_SUCCESS) { VMCI_LOG(("Failed to reserve wellknown id %d, error %d.\n", resourceID, result)); return result; } handle = VMCI_MAKE_HANDLE(VMCI_WELL_KNOWN_CONTEXT_ID, resourceID); } else { if (resourceID == VMCI_INVALID_ID) { handle = VMCI_INVALID_HANDLE; } else { handle = VMCI_MAKE_HANDLE(contextID, resourceID); } } /* Update local datastructure. */ entry = VMCI_AllocKernelMem(sizeof *entry, VMCI_MEMORY_NONPAGED); if (entry == NULL) { return VMCI_ERROR_NO_MEM; } entry->handle = handle; entry->flags = flags; entry->recvCB = recvCB; entry->clientData = clientData; entry->refCount = 0; VMCI_CreateEvent(&entry->destroyEvent); result = DatagramHashAddEntry(entry, contextID); if (result != VMCI_SUCCESS) { VMCI_LOG(("Failed to add new entry, err 0x%x.\n", result)); VMCI_DestroyEvent(&entry->destroyEvent); VMCI_FreeKernelMem(entry, sizeof *entry); return result; } ASSERT(!VMCI_HANDLE_INVALID(entry->handle)); *outHandle = entry->handle; return VMCI_SUCCESS; } /* *----------------------------------------------------------------------------- * * VMCIDatagram_DestroyHnd -- * * Destroys a handle. * * Results: * VMCI_SUCCESS or error code. * * Side effects: * Host and guest state is cleaned up. * *----------------------------------------------------------------------------- */ #ifdef __linux__ EXPORT_SYMBOL(VMCIDatagram_DestroyHnd); #endif int VMCIDatagram_DestroyHnd(VMCIHandle handle) // IN { DatagramHashEntry *entry = DatagramHashGetEntry(handle); if (entry == NULL) { return VMCI_ERROR_NOT_FOUND; } DatagramHashRemoveEntry(entry->handle); /* * We wait for destroyEvent to be signalled. The resource is released * as part of the wait. */ VMCI_WaitOnEvent(&entry->destroyEvent, DatagramReleaseCB, entry); if ((entry->flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0) { int result; VMCIDatagramWellKnownMapMsg wkMsg; wkMsg.hdr.dst.context = VMCI_HYPERVISOR_CONTEXT_ID; wkMsg.hdr.dst.resource = VMCI_DATAGRAM_REMOVE_MAP; wkMsg.hdr.src = VMCI_ANON_SRC_HANDLE; wkMsg.hdr.payloadSize = sizeof wkMsg - VMCI_DG_HEADERSIZE; wkMsg.wellKnownID = entry->handle.resource; result = VMCI_SendDatagram((VMCIDatagram *)&wkMsg); if (result < VMCI_SUCCESS) { VMCI_LOG(("Failed to remove well-known mapping for resource %d.\n", entry->handle.resource)); } } /* We know we are now holding the last reference so we can free the entry. */ VMCI_DestroyEvent(&entry->destroyEvent); VMCI_FreeKernelMem(entry, sizeof *entry); return VMCI_SUCCESS; } /* *----------------------------------------------------------------------------- * * VMCIDatagram_Send -- * * Sends the payload to the destination datagram handle. * * Results: * Returns number of bytes sent if success, or error code if failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ #ifdef __linux__ EXPORT_SYMBOL(VMCIDatagram_Send); #endif int VMCIDatagram_Send(VMCIDatagram *msg) // IN { uint32 retval; DatagramHashEntry *entry; if (msg == NULL) { VMCI_LOG(("Invalid datagram.\n")); return VMCI_ERROR_INVALID_ARGS; } if (VMCI_DG_SIZE(msg) > VMCI_MAX_DG_SIZE) { VMCI_LOG(("Payload size %"FMT64"u too big to send.\n", msg->payloadSize)); return VMCI_ERROR_INVALID_ARGS; } /* Check srcHandle exists otherwise fail. */ entry = DatagramHashGetEntry(msg->src); if (entry == NULL) { VMCI_LOG(("Couldn't find handle 0x%x:0x%x.\n", msg->src.context, msg->src.resource)); return VMCI_ERROR_INVALID_ARGS; } retval = VMCI_SendDatagram(msg); DatagramHashReleaseEntry(entry); return retval; } /* *----------------------------------------------------------------------------- * * VMCIDatagram_Dispatch -- * * Forwards the datagram corresponding entry's callback. * * Results: * VMCI_SUCCESS on success, error code if not. * * Side effects: * None. * *----------------------------------------------------------------------------- */ int VMCIDatagram_Dispatch(VMCIId contextID, // IN: unused VMCIDatagram *msg) // IN { DatagramHashEntry *entry; ASSERT(msg); entry = DatagramHashGetEntry(msg->dst); if (entry == NULL) { VMCI_LOG(("destination handle 0x%x:0x%x doesn't exists.\n", msg->dst.context, msg->dst.resource)); return VMCI_ERROR_NO_HANDLE; } if (entry->recvCB) { entry->recvCB(entry->clientData, msg); } else { VMCI_LOG(("no handle callback for handle 0x%x:0x%x payload of " "size %"FMT64"d.\n", msg->dst.context, msg->dst.resource, msg->payloadSize)); } DatagramHashReleaseEntry(entry); return VMCI_SUCCESS; } /* *----------------------------------------------------------------------------- * * VMCIDatagram_Init -- * * Register guest call handlers. * * Results: * None * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCIDatagram_Init(void) { int i; VMCI_InitLock(&hashTable.lock, "VMCIDatagramHashtable", VMCI_LOCK_RANK_MIDDLE_BH); for (i = 0; i < HASH_TABLE_SIZE; i++) { hashTable.entries[i] = NULL; } } /* *----------------------------------------------------------------------------- * * VMCIDatagram_CheckHostCapabilities -- * * Verify that the host supports the resources we need. * None are required for datagrams since they are implicitly supported. * * Results: * TRUE. * * Side effects: * None. * *----------------------------------------------------------------------------- */ Bool VMCIDatagram_CheckHostCapabilities(void) { return TRUE; } /* *----------------------------------------------------------------------------- * * DatagramProcessNotify -- * * Callback to send a notificaton to a vmci process. Creates datagram * copy and signals the process. * * Results: * VMCI_SUCCESS on success, appropriate error code otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int DatagramProcessNotify(void *clientData, // IN: VMCIDatagram *msg) // IN: { VMCIDatagramProcess *dgmProc = (VMCIDatagramProcess *) clientData; size_t dgmSize; VMCIDatagram *dgm; DatagramQueueEntry *dqEntry; VMCILockFlags flags; ASSERT(dgmProc != NULL && msg != NULL); dgmSize = VMCI_DG_SIZE(msg); ASSERT(dgmSize <= VMCI_MAX_DG_SIZE); dgm = VMCI_AllocKernelMem(dgmSize, VMCI_MEMORY_NONPAGED | VMCI_MEMORY_ATOMIC); if (!dgm) { VMCI_LOG(("VMCI: Failed to allocate datagram of size %d bytes.\n", (uint32)dgmSize)); return VMCI_ERROR_NO_MEM; } memcpy(dgm, msg, dgmSize); /* Allocate datagram queue entry and add it to the target fd's queue. */ dqEntry = VMCI_AllocKernelMem(sizeof *dqEntry, VMCI_MEMORY_NONPAGED | VMCI_MEMORY_ATOMIC); if (dqEntry == NULL) { VMCI_FreeKernelMem(dgm, dgmSize); VMCI_LOG(("VMCI: Failed to allocate memory for process datagram.\n")); return VMCI_ERROR_NO_MEM; } dqEntry->dg = dgm; VMCI_GrabLock_BH(&dgmProc->datagramQueueLock, &flags); if (dgmProc->datagramQueueSize + dgmSize >= VMCI_MAX_DATAGRAM_QUEUE_SIZE) { VMCI_ReleaseLock_BH(&dgmProc->datagramQueueLock, flags); VMCI_FreeKernelMem(dgm, dgmSize); VMCI_FreeKernelMem(dqEntry, sizeof *dqEntry); VMCI_LOG(("VMCI: Datagram process receive queue is full.\n")); return VMCI_ERROR_NO_RESOURCES; } LIST_QUEUE(&dqEntry->listItem, &dgmProc->datagramQueue); dgmProc->pendingDatagrams++; dgmProc->datagramQueueSize += dgmSize; #ifdef SOLARIS /* * Release the lock here for Solaris. Otherwise, a deadlock * may occur since pollwakeup(9F) (invoked from VMCIHost_SignalCall) * and poll_common (invoked from poll(2)) try to grab a common lock. * The man pages of pollwakeup(9F) and chpoll(9E) talk about this. */ VMCI_ReleaseLock_BH(&dgmProc->datagramQueueLock, flags); #endif VMCIHost_SignalCall(&dgmProc->host); #ifndef SOLARIS /* For platforms other than Solaris, release the lock here. */ VMCI_ReleaseLock_BH(&dgmProc->datagramQueueLock, flags); #endif DEBUG_ONLY(VMCI_LOG(("VMCI: Sent datagram with resource id %d and size %u.\n", msg->dst.resource, (uint32)dgmSize));) /* dqEntry and dgm are freed when user reads call.. */ return VMCI_SUCCESS; } /* *---------------------------------------------------------------------- * * VMCIDatagramProcess_Create -- * * Creates a new VMCIDatagramProcess object. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ int VMCIDatagramProcess_Create(VMCIDatagramProcess **outDgmProc, // IN: VMCIDatagramCreateInfo *createInfo) // IN: { VMCIDatagramProcess *dgmProc; ASSERT(createInfo); ASSERT(outDgmProc); dgmProc = VMCI_AllocKernelMem(sizeof *dgmProc, VMCI_MEMORY_NONPAGED); if (dgmProc == NULL) { return VMCI_ERROR_NO_MEM; } VMCI_InitLock(&dgmProc->datagramQueueLock, "VMCIDgmProc", VMCI_LOCK_RANK_MIDDLE_BH); VMCIHost_InitContext(&dgmProc->host, createInfo->eventHnd); dgmProc->pendingDatagrams = 0; dgmProc->datagramQueueSize = 0; dgmProc->datagramQueue = NULL; /* * We pass the result and corresponding handle to user level via the * createInfo. */ createInfo->result = VMCIDatagram_CreateHnd(createInfo->resourceID, createInfo->flags, DatagramProcessNotify, (void *)dgmProc, &dgmProc->handle); if (createInfo->result < VMCI_SUCCESS) { VMCI_FreeKernelMem(dgmProc, sizeof *dgmProc); return createInfo->result; } createInfo->handle = dgmProc->handle; *outDgmProc = dgmProc; return VMCI_SUCCESS; } /* *---------------------------------------------------------------------- * * VMCIDatagramProcess_Destroy -- * * Destroys a VMCIDatagramProcess object. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void VMCIDatagramProcess_Destroy(VMCIDatagramProcess *dgmProc) // IN: { ListItem *curr, *next; DatagramQueueEntry *dqEntry; VMCILockFlags flags; if (!dgmProc) { return; } if (!VMCI_HANDLE_EQUAL(dgmProc->handle, VMCI_INVALID_HANDLE)) { /* * We block in destroy so we know that there can be no more * callbacks to DatagramProcessNotifyCB when we return from * this call. */ VMCIDatagram_DestroyHnd(dgmProc->handle); dgmProc->handle = VMCI_INVALID_HANDLE; } /* Flush dgmProc's call queue. */ VMCI_GrabLock_BH(&dgmProc->datagramQueueLock, &flags); LIST_SCAN_SAFE(curr, next, dgmProc->datagramQueue) { dqEntry = LIST_CONTAINER(curr, DatagramQueueEntry, listItem); LIST_DEL(curr, &dgmProc->datagramQueue); ASSERT(dqEntry && dqEntry->dg); VMCI_FreeKernelMem(dqEntry->dg, VMCI_DG_SIZE(dqEntry->dg)); VMCI_FreeKernelMem(dqEntry, sizeof *dqEntry); } VMCI_ReleaseLock_BH(&dgmProc->datagramQueueLock, flags); VMCIHost_ReleaseContext(&dgmProc->host); VMCI_CleanupLock(&dgmProc->datagramQueueLock); VMCI_FreeKernelMem(dgmProc, sizeof *dgmProc); } /* *---------------------------------------------------------------------- * * VMCIDatagramProcess_ReadCall -- * * Dequeues the next guest call and returns it to user level. * * Results: * 0 on success, appropriate error code otherwise. * * Side effects: * None. * *---------------------------------------------------------------------- */ int VMCIDatagramProcess_ReadCall(VMCIDatagramProcess *dgmProc, // IN: size_t maxSize, // IN: max size of dg VMCIDatagram **dg) // OUT: { DatagramQueueEntry *dqEntry; ListItem *listItem; VMCILockFlags flags; ASSERT(dgmProc); ASSERT(dg); /* Dequeue the next dgmProc datagram queue entry. */ VMCI_GrabLock_BH(&dgmProc->datagramQueueLock, &flags); /* * Currently, we do not support blocking read of datagrams on Mac and * Solaris. XXX: This will go away soon. */ #if defined(SOLARIS) || defined(__APPLE__) if (dgmProc->pendingDatagrams == 0) { VMCIHost_ClearCall(&dgmProc->host); VMCI_ReleaseLock_BH(&dgmProc->datagramQueueLock, flags); VMCI_LOG(("VMCI: No datagrams pending.\n")); return VMCI_ERROR_NO_MORE_DATAGRAMS; } #else while (dgmProc->pendingDatagrams == 0) { VMCIHost_ClearCall(&dgmProc->host); if (!VMCIHost_WaitForCallLocked(&dgmProc->host, &dgmProc->datagramQueueLock, &flags, TRUE)) { VMCI_ReleaseLock_BH(&dgmProc->datagramQueueLock, flags); VMCI_LOG(("VMCI: Blocking read of datagram interrupted.\n")); return VMCI_ERROR_NO_MORE_DATAGRAMS; } } #endif listItem = LIST_FIRST(dgmProc->datagramQueue); ASSERT (listItem != NULL); dqEntry = LIST_CONTAINER(listItem, DatagramQueueEntry, listItem); ASSERT(dqEntry->dg); /* Check the size of the userland buffer. */ if (maxSize < VMCI_DG_SIZE(dqEntry->dg)) { VMCI_ReleaseLock_BH(&dgmProc->datagramQueueLock, flags); VMCI_LOG(("VMCI: Caller's buffer is too small.\n")); return VMCI_ERROR_NO_MEM; } LIST_DEL(listItem, &dgmProc->datagramQueue); dgmProc->pendingDatagrams--; dgmProc->datagramQueueSize -= VMCI_DG_SIZE(dqEntry->dg); if (dgmProc->pendingDatagrams == 0) { VMCIHost_ClearCall(&dgmProc->host); } VMCI_ReleaseLock_BH(&dgmProc->datagramQueueLock, flags); *dg = dqEntry->dg; VMCI_FreeKernelMem(dqEntry, sizeof *dqEntry); return VMCI_SUCCESS; } vmci-only/vmciQueuePairInt.h0000444000000000000000000000222712025726724015104 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciQueuePairInt.h -- * * Helper function declarations for VMCI QueuePair API. */ #ifndef _VMCI_QUEUE_PAIR_INT_H_ #define _VMCI_QUEUE_PAIR_INT_H_ #include "vmci_queue_pair.h" #include "vmciGuestKernelAPI.h" void VMCIQueuePair_Init(void); void VMCIQueuePair_Exit(void); #endif /* !_VMCI_QUEUE_PAIR_INT_H_ */ vmci-only/vmciQueuePair.c0000444000000000000000000006113712025726724014431 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciQueuePair.c -- * * Implements the VMCI QueuePair API. */ #ifdef __linux__ # include "driver-config.h" # define EXPORT_SYMTAB # include # include #elif defined(_WIN32) # include #endif /* __linux__ */ #include "vm_assert.h" #include "vmci_kernel_if.h" #include "vmci_queue_pair.h" #include "vmciQueuePairInt.h" #include "vmciUtil.h" #include "vmciInt.h" #include "vmciEvent.h" #include "circList.h" #define LGPFX "VMCIQueuePair: " typedef struct QueuePairEntry { VMCIHandle handle; VMCIId peer; uint32 flags; uint64 produceSize; uint64 consumeSize; uint64 numPPNs; PPNSet ppnSet; VA produceQVA; VA consumeQVA; uint32 refCount; ListItem listItem; } QueuePairEntry; typedef struct QueuePairList { ListItem *head; VMCIMutex mutex; } QueuePairList; static QueuePairList queuePairList; static QueuePairEntry *QueuePairList_FindEntry(VMCIHandle handle); static void QueuePairList_AddEntry(QueuePairEntry *entry); static void QueuePairList_RemoveEntry(QueuePairEntry *entry); static QueuePairEntry *QueuePairList_GetHead(void); static QueuePairEntry *QueuePairEntryCreate(VMCIHandle handle, VMCIId peer, uint32 flags, uint64 produceSize, uint64 consumeSize, VA produceQVA, VA consumeQVA); static void QueuePairEntryDestroy(QueuePairEntry *entry); static int VMCIQueuePairAlloc_HyperCall(const QueuePairEntry *entry); static int VMCIQueuePairAllocHelper(VMCIHandle *handle, VMCIQueue **produceQ, uint64 produceSize, VMCIQueue **consumeQ, uint64 consumeSize, VMCIId peer, uint32 flags); static int VMCIQueuePairDetachHelper(VMCIHandle handle); static int QueuePairNotifyPeerLocal(Bool attach, VMCIHandle handle); /* *----------------------------------------------------------------------------- * * QueuePairLock_Init -- * * Creates the lock protecting the QueuePair list. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void QueuePairLock_Init(void) { VMCIMutex_Init(&queuePairList.mutex); } /* *----------------------------------------------------------------------------- * * QueuePairLock_Destroy -- * * Destroys the lock protecting the QueuePair list. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void QueuePairLock_Destroy(void) { VMCIMutex_Destroy(&queuePairList.mutex); /* No-op on Linux and Windows. */ } /* *----------------------------------------------------------------------------- * * QueuePairList_Lock -- * * Acquires the lock protecting the QueuePair list. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void QueuePairList_Lock(void) { VMCIMutex_Acquire(&queuePairList.mutex); } /* *----------------------------------------------------------------------------- * * QueuePairList_Unlock -- * * Releases the lock protecting the QueuePair list. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void QueuePairList_Unlock(void) { VMCIMutex_Release(&queuePairList.mutex); } /* *----------------------------------------------------------------------------- * * VMCIQueuePair_Init -- * * Initalizes QueuePair data structure state. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCIQueuePair_Init(void) { queuePairList.head = NULL; QueuePairLock_Init(); } /* *----------------------------------------------------------------------------- * * VMCIQueuePair_Exit -- * * Destroys all QueuePairs. Makes hypercalls to detach from QueuePairs. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCIQueuePair_Exit(void) { QueuePairEntry *entry; QueuePairList_Lock(); while ((entry = QueuePairList_GetHead())) { /* * Don't make a hypercall for local QueuePairs. */ if (!(entry->flags & VMCI_QPFLAG_LOCAL)) { VMCIQueuePairDetachMsg detachMsg; detachMsg.hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_QUEUEPAIR_DETACH); detachMsg.hdr.src = VMCI_ANON_SRC_HANDLE; detachMsg.hdr.payloadSize = sizeof entry->handle; detachMsg.handle = entry->handle; (void)VMCI_SendDatagram((VMCIDatagram *)&detachMsg); } /* * We cannot fail the exit, so let's reset refCount. */ entry->refCount = 0; QueuePairList_RemoveEntry(entry); QueuePairEntryDestroy(entry); } QueuePairList_Unlock(); QueuePairLock_Destroy(); } /* *----------------------------------------------------------------------------- * * QueuePairList_FindEntry -- * * Searches the list of QueuePairs to find if an entry already exists. * Assumes that the lock on the list is held. * * Results: * Pointer to the entry if it exists, NULL otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static QueuePairEntry * QueuePairList_FindEntry(VMCIHandle handle) // IN: { ListItem *next; if (VMCI_HANDLE_INVALID(handle)) { return NULL; } LIST_SCAN(next, queuePairList.head) { QueuePairEntry *entry = LIST_CONTAINER(next, QueuePairEntry, listItem); if (VMCI_HANDLE_EQUAL(entry->handle, handle)) { return entry; } } return NULL; } /* *----------------------------------------------------------------------------- * * QueuePairList_AddEntry -- * * Appends a QueuePair entry to the list. Assumes that the lock on the * list is held. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static void QueuePairList_AddEntry(QueuePairEntry *entry) // IN: { if (entry) { LIST_QUEUE(&entry->listItem, &queuePairList.head); } } /* *----------------------------------------------------------------------------- * * QueuePairList_RemoveEntry -- * * Removes a QueuePair entry from the list. Assumes that the lock on the * list is held. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static void QueuePairList_RemoveEntry(QueuePairEntry *entry) // IN: { if (entry) { LIST_DEL(&entry->listItem, &queuePairList.head); } } /* *----------------------------------------------------------------------------- * * QueuePairList_GetHead -- * * Returns the entry from the head of the list. Assumes that the list is * locked. * * Results: * Pointer to entry. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static QueuePairEntry * QueuePairList_GetHead(void) { ListItem *first = LIST_FIRST(queuePairList.head); if (first) { QueuePairEntry *entry = LIST_CONTAINER(first, QueuePairEntry, listItem); return entry; } return NULL; } /* *----------------------------------------------------------------------------- * * VMCIQueuePair_Alloc -- * * Allocates a VMCI QueuePair. Only checks validity of input arguments. * Real work is done in the OS-specific helper routine. * * Results: * Success or failure. * * Side effects: * Memory is allocated. * *----------------------------------------------------------------------------- */ #ifdef __linux__ EXPORT_SYMBOL(VMCIQueuePair_Alloc); #endif int VMCIQueuePair_Alloc(VMCIHandle *handle, // IN/OUT: VMCIQueue **produceQ, // OUT: uint64 produceSize, // IN: VMCIQueue **consumeQ, // OUT: uint64 consumeSize, // IN: VMCIId peer, // IN: uint32 flags) // IN: { ASSERT_ON_COMPILE(sizeof(VMCIQueueHeader) <= PAGE_SIZE); # define VMCIQP_OFFSET_OF(Struct, field) ((uintptr_t)&(((Struct *)0)->field)) #ifdef __linux__ ASSERT_ON_COMPILE(VMCIQP_OFFSET_OF(VMCIQueue, page) == PAGE_SIZE); #else ASSERT_ON_COMPILE(VMCIQP_OFFSET_OF(VMCIQueue, buffer) == PAGE_SIZE); #endif # undef VMCIQP_OFFSET_OF if (!handle || !produceQ || !consumeQ || (!produceSize && !consumeSize) || (flags & ~VMCI_QP_ALL_FLAGS)) { return VMCI_ERROR_INVALID_ARGS; } return VMCIQueuePairAllocHelper(handle, produceQ, produceSize, consumeQ, consumeSize, peer, flags); } /* *----------------------------------------------------------------------------- * * VMCIQueuePair_Detach -- * * Detaches from a VMCI QueuePair. Only checks validity of input argument. * Real work is done in the OS-specific helper routine. * * Results: * Success or failure. * * Side effects: * Memory is freed. * *----------------------------------------------------------------------------- */ #ifdef __linux__ EXPORT_SYMBOL(VMCIQueuePair_Detach); #endif int VMCIQueuePair_Detach(VMCIHandle handle) // IN: { if (VMCI_HANDLE_INVALID(handle)) { return VMCI_ERROR_INVALID_ARGS; } return VMCIQueuePairDetachHelper(handle); } /* *----------------------------------------------------------------------------- * * QueuePairEntryCreate -- * * Allocates and initializes a QueuePairEntry structure. Allocates a * QueuePair rid (and handle) iff the given entry has an invalid handle. * 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved handles. Assumes * that the QP list lock is held by the caller. * * Results: * Pointer to structure intialized. * * Side effects: * None. * *----------------------------------------------------------------------------- */ QueuePairEntry * QueuePairEntryCreate(VMCIHandle handle, // IN: VMCIId peer, // IN: uint32 flags, // IN: uint64 produceSize, // IN: uint64 consumeSize, // IN: VA produceQVA, // IN: VA consumeQVA) // IN: { static VMCIId queuePairRID = VMCI_RESERVED_RESOURCE_ID_MAX + 1; QueuePairEntry *entry; const uint64 numPPNs = CEILING(produceSize, PAGE_SIZE) + CEILING(consumeSize, PAGE_SIZE) + 2; /* One page each for the queue headers. */ ASSERT((produceSize || consumeSize) && produceQVA && consumeQVA); if (VMCI_HANDLE_INVALID(handle)) { VMCIId contextID = VMCI_GetContextID(); VMCIId oldRID = queuePairRID; /* * Generate a unique QueuePair rid. Keep on trying until we wrap around * in the RID space. */ ASSERT(oldRID > VMCI_RESERVED_RESOURCE_ID_MAX); do { handle = VMCI_MAKE_HANDLE(contextID, queuePairRID); entry = QueuePairList_FindEntry(handle); queuePairRID++; if (UNLIKELY(!queuePairRID)) { /* * Skip the reserved rids. */ queuePairRID = VMCI_RESERVED_RESOURCE_ID_MAX + 1; } } while (entry && queuePairRID != oldRID); if (UNLIKELY(entry != NULL)) { ASSERT(queuePairRID == oldRID); /* * We wrapped around --- no rids were free. */ return NULL; } } ASSERT(!VMCI_HANDLE_INVALID(handle) && QueuePairList_FindEntry(handle) == NULL); entry = VMCI_AllocKernelMem(sizeof *entry, VMCI_MEMORY_NORMAL); if (entry) { entry->handle = handle; entry->peer = peer; entry->flags = flags; entry->produceSize = produceSize; entry->consumeSize = consumeSize; entry->numPPNs = numPPNs; memset(&entry->ppnSet, 0, sizeof entry->ppnSet); entry->produceQVA = produceQVA; entry->consumeQVA = consumeQVA; entry->refCount = 0; INIT_LIST_ITEM(&entry->listItem); } return entry; } /* *----------------------------------------------------------------------------- * * QueuePairEntryDestroy -- * * Frees a QueuePairEntry structure. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void QueuePairEntryDestroy(QueuePairEntry *entry) // IN: { ASSERT(entry); ASSERT(entry->refCount == 0); VMCI_FreePPNSet(&entry->ppnSet); VMCI_FreeQueueKVA(entry->produceQVA, entry->produceSize); VMCI_FreeQueueKVA(entry->consumeQVA, entry->consumeSize); VMCI_FreeKernelMem(entry, sizeof *entry); } /* *----------------------------------------------------------------------------- * * VMCIQueuePairAlloc_HyperCall -- * * Helper to make a QueuePairAlloc hypercall. * * Results: * Result of the hypercall. * * Side effects: * Memory is allocated & freed. * *----------------------------------------------------------------------------- */ int VMCIQueuePairAlloc_HyperCall(const QueuePairEntry *entry) // IN: { VMCIQueuePairAllocMsg *allocMsg; size_t msgSize; int result; if (!entry || entry->numPPNs <= 2) { return VMCI_ERROR_INVALID_ARGS; } ASSERT(!(entry->flags & VMCI_QPFLAG_LOCAL)); msgSize = sizeof *allocMsg + (size_t)entry->numPPNs * sizeof(PPN); allocMsg = VMCI_AllocKernelMem(msgSize, VMCI_MEMORY_NONPAGED); if (!allocMsg) { return VMCI_ERROR_NO_MEM; } allocMsg->hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_QUEUEPAIR_ALLOC); allocMsg->hdr.src = VMCI_ANON_SRC_HANDLE; allocMsg->hdr.payloadSize = msgSize - VMCI_DG_HEADERSIZE; allocMsg->handle = entry->handle; allocMsg->peer = entry->peer; allocMsg->flags = entry->flags; allocMsg->produceSize = entry->produceSize; allocMsg->consumeSize = entry->consumeSize; allocMsg->numPPNs = entry->numPPNs; result = VMCI_PopulatePPNList((uint8 *)allocMsg + sizeof *allocMsg, &entry->ppnSet); if (result == VMCI_SUCCESS) { result = VMCI_SendDatagram((VMCIDatagram *)allocMsg); } VMCI_FreeKernelMem(allocMsg, msgSize); return result; } /* *----------------------------------------------------------------------------- * * VMCIQueuePairAllocHelper -- * * Helper for VMCI QueuePairAlloc. Allocates physical pages for the * QueuePair. Makes OS dependent calls through generic wrappers. * * Results: * Success or failure. * * Side effects: * Memory is allocated. * *----------------------------------------------------------------------------- */ static int VMCIQueuePairAllocHelper(VMCIHandle *handle, // IN/OUT: VMCIQueue **produceQ, // OUT: uint64 produceSize, // IN: VMCIQueue **consumeQ, // OUT: uint64 consumeSize, // IN: VMCIId peer, // IN: uint32 flags) // IN: { const uint64 numProducePages = CEILING(produceSize, PAGE_SIZE) + 1; const uint64 numConsumePages = CEILING(consumeSize, PAGE_SIZE) + 1; VA produceVA = 0; VA consumeVA = 0; int result; QueuePairEntry *queuePairEntry = NULL; /* * XXX Check for possible overflow of 'size' arguments when passed to * compat_get_order (after some arithmetic ops). */ ASSERT(handle && produceQ && consumeQ && (produceSize || consumeSize)); QueuePairList_Lock(); if ((queuePairEntry = QueuePairList_FindEntry(*handle))) { if (queuePairEntry->flags & VMCI_QPFLAG_LOCAL) { /* Local attach case. */ if (queuePairEntry->refCount > 1) { VMCI_LOG((LGPFX "Error attempting to attach more than once.\n")); result = VMCI_ERROR_UNAVAILABLE; goto errorKeepEntry; } if (queuePairEntry->produceSize != consumeSize || queuePairEntry->consumeSize != produceSize || queuePairEntry->flags != (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { VMCI_LOG((LGPFX "Error mismatched queue pair in local attach.\n")); result = VMCI_ERROR_QUEUEPAIR_MISMATCH; goto errorKeepEntry; } /* * Do a local attach. We swap the consume and produce queues for the * attacher and deliver an attach event. */ result = QueuePairNotifyPeerLocal(TRUE, *handle); if (result < VMCI_SUCCESS) { goto errorKeepEntry; } produceVA = queuePairEntry->consumeQVA; consumeVA = queuePairEntry->produceQVA; goto out; } result = VMCI_ERROR_ALREADY_EXISTS; goto errorKeepEntry; } produceVA = VMCI_AllocQueueKVA(produceSize); if (!produceVA) { VMCI_LOG((LGPFX "Error allocating pages for produce queue.\n")); result = VMCI_ERROR_NO_MEM; goto error; } consumeVA = VMCI_AllocQueueKVA(consumeSize); if (!consumeVA) { VMCI_LOG((LGPFX "Error allocating pages for consume queue.\n")); result = VMCI_ERROR_NO_MEM; goto error; } queuePairEntry = QueuePairEntryCreate(*handle, peer, flags, produceSize, consumeSize, produceVA, consumeVA); if (!queuePairEntry) { VMCI_LOG((LGPFX "Error allocating memory in %s.\n", __FUNCTION__)); result = VMCI_ERROR_NO_MEM; goto error; } result = VMCI_AllocPPNSet(produceVA, numProducePages, consumeVA, numConsumePages, &queuePairEntry->ppnSet); if (result < VMCI_SUCCESS) { VMCI_LOG((LGPFX "VMCI_AllocPPNSet failed.\n")); goto error; } /* * It's only necessary to notify the host if this queue pair will be * attached to from another context. */ if (queuePairEntry->flags & VMCI_QPFLAG_LOCAL) { /* Local create case. */ VMCIId contextId = VMCI_GetContextID(); /* * Enforce similar checks on local queue pairs as we do for regular ones. * The handle's context must match the creator or attacher context id * (here they are both the current context id) and the attach-only flag * cannot exist during create. We also ensure specified peer is this * context or an invalid one. */ if (queuePairEntry->handle.context != contextId || (queuePairEntry->peer != VMCI_INVALID_ID && queuePairEntry->peer != contextId)) { result = VMCI_ERROR_NO_ACCESS; goto error; } if (queuePairEntry->flags & VMCI_QPFLAG_ATTACH_ONLY) { result = VMCI_ERROR_NOT_FOUND; goto error; } } else { result = VMCIQueuePairAlloc_HyperCall(queuePairEntry); if (result < VMCI_SUCCESS) { VMCI_LOG((LGPFX "VMCIQueuePairAlloc_HyperCall result = %d.\n", result)); goto error; } } QueuePairList_AddEntry(queuePairEntry); out: queuePairEntry->refCount++; *handle = queuePairEntry->handle; *produceQ = (VMCIQueue *)produceVA; *consumeQ = (VMCIQueue *)consumeVA; /* * We should initialize the queue pair header pages on a local queue pair * create. For non-local queue pairs, the hypervisor initializes the header * pages in the create step. */ if ((queuePairEntry->flags & VMCI_QPFLAG_LOCAL) && queuePairEntry->refCount == 1) { VMCIQueue_Init(*handle, *produceQ); VMCIQueue_Init(*handle, *consumeQ); } QueuePairList_Unlock(); return VMCI_SUCCESS; error: QueuePairList_Unlock(); if (queuePairEntry) { /* The KVAs will be freed inside the destroy routine. */ QueuePairEntryDestroy(queuePairEntry); } else { if (produceVA) { VMCI_FreeQueueKVA(produceVA, produceSize); } if (consumeVA) { VMCI_FreeQueueKVA(consumeVA, consumeSize); } } return result; errorKeepEntry: /* This path should only be used when an existing entry was found. */ ASSERT(queuePairEntry->refCount > 0); QueuePairList_Unlock(); return result; } /* *----------------------------------------------------------------------------- * * VMCIQueuePairDetachHelper -- * * Helper for VMCI QueuePair detach interface on Linux. Frees the physical * pages for the QueuePair. * * Results: * Success or failure. * * Side effects: * Memory may be freed. * *----------------------------------------------------------------------------- */ static int VMCIQueuePairDetachHelper(VMCIHandle handle) // IN: { int result; QueuePairEntry *entry; uint32 refCount; ASSERT(!VMCI_HANDLE_INVALID(handle)); QueuePairList_Lock(); entry = QueuePairList_FindEntry(handle); if (!entry) { result = VMCI_ERROR_NOT_FOUND; goto out; } ASSERT(entry->refCount >= 1); if (entry->flags & VMCI_QPFLAG_LOCAL) { result = VMCI_SUCCESS; if (entry->refCount > 1) { result = QueuePairNotifyPeerLocal(FALSE, handle); if (result < VMCI_SUCCESS) { goto out; } } } else { VMCIQueuePairDetachMsg detachMsg; detachMsg.hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_QUEUEPAIR_DETACH); detachMsg.hdr.src = VMCI_ANON_SRC_HANDLE; detachMsg.hdr.payloadSize = sizeof handle; detachMsg.handle = handle; result = VMCI_SendDatagram((VMCIDatagram *)&detachMsg); } out: if (result >= VMCI_SUCCESS) { entry->refCount--; if (entry->refCount == 0) { QueuePairList_RemoveEntry(entry); } } /* If we didn't remove the entry, this could change once we unlock. */ refCount = entry ? entry->refCount : 0xffffffff; /* * Value does not matter, silence the * compiler. */ QueuePairList_Unlock(); if (result >= VMCI_SUCCESS && refCount == 0) { QueuePairEntryDestroy(entry); } return result; } /* *---------------------------------------------------------------------------- * * QueuePairNotifyPeerLocal -- * * Dispatches a queue pair event message directly into the local event * queue. * * Results: * VMCI_SUCCESS on success, error code otherwise * * Side effects: * None. * *---------------------------------------------------------------------------- */ static int QueuePairNotifyPeerLocal(Bool attach, // IN: attach or detach? VMCIHandle handle) // IN: queue pair handle { VMCIEventMsg *eMsg; VMCIEventPayload_QP *ePayload; /* buf is only 48 bytes. */ char buf[sizeof *eMsg + sizeof *ePayload]; VMCIId contextId; contextId = VMCI_GetContextID(); eMsg = (VMCIEventMsg *)buf; ePayload = VMCIEventMsgPayload(eMsg); eMsg->hdr.dst = VMCI_MAKE_HANDLE(contextId, VMCI_EVENT_HANDLER); eMsg->hdr.src = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); eMsg->hdr.payloadSize = sizeof *eMsg + sizeof *ePayload - sizeof eMsg->hdr; eMsg->eventData.event = attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; ePayload->peerId = contextId; ePayload->handle = handle; return VMCIEvent_Dispatch((VMCIDatagram *)eMsg); } vmci-only/vmciGuestDs.c0000444000000000000000000001565012025726724014106 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciGuestDs.c * * Implements the client-access API to the VMCI discovery service in * the guest kernel. * */ #ifdef __linux__ # include "driver-config.h" # define EXPORT_SYMTAB # include # include "compat_kernel.h" # include "compat_pci.h" #elif defined(_WIN32) # include #elif defined(SOLARIS) # include # include #else # error "Platform not support by VMCI datagram API." #endif // linux #include "vm_basic_types.h" #include "vm_atomic.h" #include "vm_assert.h" #include "vmci_defs.h" #include "vmci_kernel_if.h" #include "vmci_infrastructure.h" #include "vmciInt.h" #include "vmciUtil.h" #include "vmciDatagram.h" static Atomic_uint32 MsgIdCounter = { 0 }; typedef struct VMCIDsRecvData { VMCIHost context; VMCILock lock; int status; uint8 buffer[VMCI_DS_MAX_MSG_SIZE]; } VMCIDsRecvData; static int VMCIDsDoCall(int action, const char *name, VMCIHandle handle, VMCIHandle *handleOut); static int VMCIDsRecvCB(void *clientData, struct VMCIDatagram *msg); /* *------------------------------------------------------------------------- * * VMCIDs_Lookup -- * * Look up a handle in the VMCI discovery service based on * the given name. * * Results: * Error code. 0 if success. * * Side effects: * None. * *------------------------------------------------------------------------- */ #ifdef __linux__ EXPORT_SYMBOL(VMCIDs_Lookup); #endif int VMCIDs_Lookup(const char *name, // IN VMCIHandle *out) // { return VMCIDsDoCall(VMCI_DS_ACTION_LOOKUP, name, VMCI_INVALID_HANDLE, out); } /* *------------------------------------------------------------------------- * * VMCIDsDoCall -- * * Serialize a call into the CDS wire-format, send it across * the VMCI device, wait for a response, and return * the results. * * Results: * Error code. 0 if success. * * Side effects: * None. * *------------------------------------------------------------------------- */ static int VMCIDsDoCall(int action, // IN const char *name, // IN VMCIHandle handle, // IN: For the "register" action VMCIHandle *handleOut) // OUT: For the "lookup" action { int8 *sendBuffer = NULL; const size_t sendBufferSize = VMCI_DS_MAX_MSG_SIZE + sizeof(VMCIDatagram); int nameLen, requestSize, res; uint32 savedMsgIdCounter; VMCIDsReplyHeader *reply; VMCIHandle dsHandle = VMCI_INVALID_HANDLE; VMCIDsRecvData *recvData = NULL; VMCIDatagram *dgram; VMCIDsRequestHeader *request; VMCILockFlags flags; nameLen = strlen(name); if (nameLen + sizeof *request > sendBufferSize) { res = VMCI_ERROR_INVALID_ARGS; goto out; } sendBuffer = VMCI_AllocKernelMem(sendBufferSize, VMCI_MEMORY_NONPAGED); if (sendBuffer == NULL) { res = VMCI_ERROR_NO_MEM; goto out; } recvData = VMCI_AllocKernelMem(sizeof *recvData, VMCI_MEMORY_NONPAGED); if (recvData == NULL) { res = VMCI_ERROR_NO_MEM; goto out; } VMCIHost_InitContext(&recvData->context, (uintptr_t) recvData); VMCI_InitLock(&recvData->lock, "VMCIDsRecvHandler", VMCI_LOCK_RANK_MIDDLE_BH); savedMsgIdCounter = Atomic_FetchAndInc(&MsgIdCounter); dgram = (VMCIDatagram *) sendBuffer; request = (VMCIDsRequestHeader *) (sendBuffer + sizeof *dgram); /* Serialize request. */ request->action = action; request->msgid = savedMsgIdCounter; request->handle = handle; request->nameLen = nameLen; memcpy(request->name, name, nameLen + 1); requestSize = sizeof *request + nameLen; if (VMCIDatagram_CreateHnd(VMCI_INVALID_ID, 0, VMCIDsRecvCB, recvData, &dsHandle) != VMCI_SUCCESS) { res = VMCI_ERROR_NO_HANDLE; goto out; } dgram->dst = VMCI_DS_HANDLE; dgram->src = dsHandle; dgram->payloadSize = requestSize; /* Send the datagram to CDS. */ res = VMCIDatagram_Send(dgram); if (res <= 0) { goto out; } /* Block here waiting for the reply */ VMCI_GrabLock_BH(&recvData->lock, &flags); VMCIHost_WaitForCallLocked(&recvData->context, &recvData->lock, &flags, TRUE); VMCI_ReleaseLock_BH(&recvData->lock, flags); if (recvData->status != VMCI_SUCCESS) { res = recvData->status; goto out; } reply = (VMCIDsReplyHeader *) recvData->buffer; /* Check that the msgid matches what we expect. */ if (reply->msgid != savedMsgIdCounter) { res = VMCI_ERROR_GENERIC; goto out; } if (handleOut != NULL) { *handleOut = reply->handle; } res = reply->code; out: if (!VMCI_HANDLE_EQUAL(dsHandle, VMCI_INVALID_HANDLE)) { VMCIDatagram_DestroyHnd(dsHandle); } if (recvData) { VMCI_CleanupLock(&recvData->lock); VMCIHost_ReleaseContext(&recvData->context); VMCI_FreeKernelMem(recvData, sizeof *recvData); } if (sendBuffer) { VMCI_FreeKernelMem(sendBuffer, sendBufferSize); } return res; } /* *----------------------------------------------------------------------------- * * VMCIDsRecvCB -- * * Receive callback for the Discovery Service query datagram * handle. * * Results: * If the received payload is not larger than the MAX, it is * copied into clientData. * * Side effects: * Signals the thread waiting for the reply. * *----------------------------------------------------------------------------- */ static int VMCIDsRecvCB(void *clientData, // IN: client data for handler struct VMCIDatagram *msg) // IN { VMCIDsRecvData *recvData = clientData; VMCILockFlags flags; ASSERT(msg->payloadSize <= VMCI_DS_MAX_MSG_SIZE); if (msg->payloadSize <= VMCI_DS_MAX_MSG_SIZE) { memcpy(recvData->buffer, VMCI_DG_PAYLOAD(msg), (size_t)msg->payloadSize); recvData->status = VMCI_SUCCESS; } else { recvData->status = VMCI_ERROR_PAYLOAD_TOO_LARGE; } VMCI_GrabLock_BH(&recvData->lock, &flags); VMCIHost_SignalCall(&recvData->context); VMCI_ReleaseLock_BH(&recvData->lock, flags); return 0; } vmci-only/vmciGuestKernelIf.h0000444000000000000000000000355412025726724015244 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciGuestKernelIf.h -- * * This file defines OS encapsulation helper functions that are * needed only in VMCI guest kernel code. It must work for * windows, solaris and linux kernel, ie. using defines * where necessary. */ #ifndef _VMCI_GUEST_KERNEL_IF_H_ #define _VMCI_GUEST_KERNEL_IF_H_ #if !defined(linux) && !defined(_WIN32) && !defined(SOLARIS) #error "Platform not supported." #endif #if defined(_WIN32) #include #endif #ifdef SOLARIS # include # include # include #endif #include "vm_basic_types.h" #include "vmci_defs.h" #if defined(linux) typedef unsigned short int VMCIIoPort; typedef int VMCIIoHandle; #elif defined(_WIN32) typedef PUCHAR VMCIIoPort; typedef int VMCIIoHandle; #elif defined(SOLARIS) typedef uint8_t * VMCIIoPort; typedef ddi_acc_handle_t VMCIIoHandle; #endif // VMKERNEL void VMCI_ReadPortBytes(VMCIIoHandle handle, VMCIIoPort port, uint8 *buffer, size_t bufferLength); #endif // _VMCI_GUEST_KERNEL_IF_H_ vmci-only/vmciGuestKernelIf.c0000444000000000000000000000354112025726724015233 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciGuestKernelIf.c -- * * This file implements guest only OS helper functions for VMCI. * This is the linux specific implementation. */ /* Must come before any kernel header file */ #include "driver-config.h" #if !defined(linux) || defined(VMKERNEL) #error "Wrong platform." #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9) #include #endif #include "compat_version.h" #include "compat_pci.h" #include "vm_basic_types.h" #include "vmciGuestKernelIf.h" /* *----------------------------------------------------------------------------- * * VMCI_ReadPortBytes -- * * Copy memory from an I/O port to kernel memory. * * Results: * No results. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCI_ReadPortBytes(VMCIIoHandle handle, // IN: Unused VMCIIoPort port, // IN uint8 *buffer, // OUT size_t bufferLength) // IN { insb(port, buffer, bufferLength); } vmci-only/vmciInt.h0000444000000000000000000000245412025726724013265 0ustar rootroot/********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __VMCI_INT_H__ #define __VMCI_INT_H__ #define INCLUDE_ALLOW_MODULE #include "includeCheck.h" #include "vm_basic_types.h" #include "vmci_call_defs.h" #include "vmciProcess.h" #define DOLOG(...) printk(KERN_INFO __VA_ARGS__) #define VMCI_LOG(_args) DOLOG _args /* * Called by common code, hence the different naming convention. * XXX Should be in vmci.h. */ int VMCI_SendDatagram(VMCIDatagram *dg); Bool VMCI_DeviceEnabled(void); #endif /* __VMCIINT_H__ */ vmci-only/vmci_drv.c0000444000000000000000000005547312025726725013472 0ustar rootroot/********************************************************* * Copyright (C) 2005 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmci.c -- * * Linux guest driver for the VMCI device. */ #include "driver-config.h" #define EXPORT_SYMTAB #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9) #include #endif #include "compat_kernel.h" #include "compat_module.h" #include "compat_pci.h" #include "compat_wait.h" #include "compat_init.h" #include "compat_ioport.h" #include "compat_interrupt.h" #include "compat_page.h" #include "vm_basic_types.h" #include "vm_device_version.h" #include "kernelStubs.h" #include "vmci_iocontrols.h" #include "vmci_defs.h" #include "vmciInt.h" #include "vmci_infrastructure.h" #include "vmciDatagram.h" #include "vmciProcess.h" #include "vmciUtil.h" #include "vmciEvent.h" #include "vmciQueuePairInt.h" #include "vmci_version.h" #define LGPFX "VMCI: " #define VMCI_DEVICE_MINOR_NUM 0 typedef struct vmci_device { struct semaphore lock; unsigned int ioaddr; unsigned int ioaddr_size; unsigned int irq; Bool enabled; spinlock_t dev_spinlock; } vmci_device; static int vmci_probe_device(struct pci_dev *pdev, const struct pci_device_id *id); static void vmci_remove_device(struct pci_dev* pdev); static int vmci_open(struct inode *inode, struct file *file); static int vmci_close(struct inode *inode, struct file *file); static int vmci_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); static unsigned int vmci_poll(struct file *file, poll_table *wait); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) static compat_irqreturn_t vmci_interrupt(int irq, void *dev_id, struct pt_regs * regs); #else static compat_irqreturn_t vmci_interrupt(int irq, void *dev_id); #endif static void dispatch_datagrams(unsigned long data); static const struct pci_device_id vmci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, { 0 }, }; static struct file_operations vmci_ops = { .owner = THIS_MODULE, .open = vmci_open, .release = vmci_close, .ioctl = vmci_ioctl, .poll = vmci_poll, }; static struct pci_driver vmci_driver = { .name = "vmci", .id_table = vmci_ids, .probe = vmci_probe_device, .remove = vmci_remove_device, }; static vmci_device vmci_dev; /* We dynamically request the device major number at init time. */ static int device_major_nr = 0; DECLARE_TASKLET(vmci_tasklet, dispatch_datagrams, (unsigned long)&vmci_dev); /* * Allocate a buffer for incoming datagrams globally to avoid repeated * allocation in the interrupt handler's atomic context. */ static uint8 *data_buffer = NULL; static uint32 data_buffer_size = VMCI_MAX_DG_SIZE; /* *----------------------------------------------------------------------------- * * vmci_init -- * * Initialization, called by Linux when the module is loaded. * * Results: * Returns 0 for success, negative errno value otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int vmci_init(void) { int err = -ENOMEM; /* Register device node ops. */ err = register_chrdev(0, "vmci", &vmci_ops); if (err < 0) { printk(KERN_ERR "Unable to register vmci device\n"); return err; } device_major_nr = err; printk("VMCI: Major device number is: %d\n", device_major_nr); /* Initialize device data. */ init_MUTEX(&vmci_dev.lock); spin_lock_init(&vmci_dev.dev_spinlock); vmci_dev.enabled = FALSE; data_buffer = vmalloc(data_buffer_size); if (data_buffer == NULL) { goto error; } /* This should be last to make sure we are done initializing. */ err = pci_register_driver(&vmci_driver); if (err < 0) { goto error; } return 0; error: unregister_chrdev(device_major_nr, "vmci"); vfree(data_buffer); return err; } /* *----------------------------------------------------------------------------- * * vmci_exit -- * * Cleanup, called by Linux when the module is unloaded. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static void vmci_exit(void) { pci_unregister_driver(&vmci_driver); unregister_chrdev(device_major_nr, "vmci"); vfree(data_buffer); } /* *----------------------------------------------------------------------------- * * vmci_probe_device -- * * Most of the initialization at module load time is done here. * * Results: * Returns 0 for success, an error otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int vmci_probe_device(struct pci_dev *pdev, // IN: vmci PCI device const struct pci_device_id *id) // IN: matching device ID { unsigned int ioaddr; unsigned int ioaddr_size; unsigned int capabilities; int result; printk(KERN_INFO "Probing for vmci/PCI.\n"); result = compat_pci_enable_device(pdev); if (result) { printk(KERN_ERR "Cannot VMCI device %s: error %d\n", compat_pci_name(pdev), result); return result; } compat_pci_set_master(pdev); /* To enable QueuePair functionality. */ ioaddr = compat_pci_resource_start(pdev, 0); ioaddr_size = compat_pci_resource_len(pdev, 0); /* * Request I/O region with adjusted base address and size. The adjusted * values are needed and used if we release the region in case of failure. */ if (!compat_request_region(ioaddr, ioaddr_size, "vmci")) { printk(KERN_INFO "vmci: Another driver already loaded " "for device in slot %s.\n", compat_pci_name(pdev)); goto pci_disable; } printk(KERN_INFO "Found vmci/PCI at %#x, irq %u.\n", ioaddr, pdev->irq); /* * Verify that the VMCI Device supports the capabilities that * we need. If the device is missing capabilities that we would * like to use, check for fallback capabilities and use those * instead (so we can run a new VM on old hosts). Fail the load if * a required capability is missing and there is no fallback. * * Right now, we need datagrams. There are no fallbacks. */ capabilities = inl(ioaddr + VMCI_CAPS_ADDR); if ((capabilities & VMCI_CAPS_DATAGRAM) == 0) { printk(KERN_ERR "VMCI device does not support datagrams.\n"); goto release; } /* Let the host know which capabilities we intend to use. */ outl(VMCI_CAPS_DATAGRAM, ioaddr + VMCI_CAPS_ADDR); /* Device struct initialization. */ down(&vmci_dev.lock); if (vmci_dev.enabled) { printk(KERN_ERR "VMCI device already enabled.\n"); goto unlock; } vmci_dev.ioaddr = ioaddr; vmci_dev.ioaddr_size = ioaddr_size; vmci_dev.irq = pdev->irq; /* Check host capabilities. */ if (!VMCI_CheckHostCapabilities()) { goto unlock; } /* Enable device. */ vmci_dev.enabled = TRUE; pci_set_drvdata(pdev, &vmci_dev); /* * We do global initialization here because we need datagrams for * event init. If we ever support more than one VMCI device we will * have to create seperate LateInit/EarlyExit functions that can be * used to do initialization/cleanup that depends on the device * being accessible. We need to initialize VMCI components before * requesting an irq - the VMCI interrupt handler uses these * components, and it may be invoked once request_irq() has * registered the handler (as the irq line may be shared). */ VMCIProcess_Init(); VMCIDatagram_Init(); VMCIEvent_Init(); VMCIUtil_Init(); VMCIQueuePair_Init(); if (request_irq(vmci_dev.irq, vmci_interrupt, COMPAT_IRQF_SHARED, "vmci", &vmci_dev)) { printk(KERN_ERR "vmci: irq %u in use\n", vmci_dev.irq); goto components_exit; } printk(KERN_INFO "Registered vmci device.\n"); up(&vmci_dev.lock); /* Enable specific interrupt bits. */ outl(VMCI_IMR_DATAGRAM, vmci_dev.ioaddr + VMCI_IMR_ADDR); /* Enable interrupts. */ outl(VMCI_CONTROL_INT_ENABLE, vmci_dev.ioaddr + VMCI_CONTROL_ADDR); return 0; components_exit: VMCIQueuePair_Exit(); VMCIUtil_Exit(); VMCIEvent_Exit(); VMCIProcess_Exit(); unlock: up(&vmci_dev.lock); release: release_region(ioaddr, ioaddr_size); pci_disable: compat_pci_disable_device(pdev); return -EBUSY; } /* *----------------------------------------------------------------------------- * * vmci_remove_device -- * * Cleanup, called for each device on unload. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static void vmci_remove_device(struct pci_dev* pdev) { struct vmci_device *dev = pci_get_drvdata(pdev); printk(KERN_INFO "Removing vmci device\n"); VMCIQueuePair_Exit(); // XXX Todo add exit/cleanup functions for util, sm, dg, and resource apis. VMCIUtil_Exit(); VMCIEvent_Exit(); //VMCIDatagram_Exit(); VMCIProcess_Exit(); down(&dev->lock); printk(KERN_INFO "Resetting vmci device\n"); outl(VMCI_CONTROL_RESET, vmci_dev.ioaddr + VMCI_CONTROL_ADDR); free_irq(dev->irq, dev); release_region(dev->ioaddr, dev->ioaddr_size); dev->enabled = FALSE; printk(KERN_INFO "Unregistered vmci device.\n"); up(&dev->lock); compat_pci_disable_device(pdev); } /* *----------------------------------------------------------------------------- * * vmci_open -- * * Open device. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int vmci_open(struct inode *inode, // IN struct file *file) // IN { VMCIGuestDeviceHandle *devHndl; int errcode; printk(KERN_INFO "Opening vmci device\n"); if (MINOR(inode->i_rdev) != VMCI_DEVICE_MINOR_NUM) { return -ENODEV; } down(&vmci_dev.lock); if (!vmci_dev.enabled) { printk(KERN_INFO "Received open on uninitialized vmci device.\n"); errcode = -ENODEV; goto unlock; } /* Do open ... */ devHndl = VMCI_AllocKernelMem(sizeof *devHndl, VMCI_MEMORY_NORMAL); if (!devHndl) { printk(KERN_INFO "Failed to create device obj when opening device.\n"); errcode = -ENOMEM; goto unlock; } devHndl->obj = NULL; devHndl->objType = VMCIOBJ_NOT_SET; file->private_data = devHndl; up(&vmci_dev.lock); return 0; unlock: up(&vmci_dev.lock); return errcode; } /* *----------------------------------------------------------------------------- * * vmci_close -- * * Close device. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int vmci_close(struct inode *inode, // IN struct file *file) // IN { VMCIGuestDeviceHandle *devHndl = (VMCIGuestDeviceHandle *) file->private_data; if (devHndl) { if (devHndl->objType == VMCIOBJ_PROCESS) { VMCIProcess_Destroy((VMCIProcess *) devHndl->obj); } else if (devHndl->objType == VMCIOBJ_DATAGRAM_PROCESS) { VMCIDatagramProcess_Destroy((VMCIDatagramProcess *) devHndl->obj); } VMCI_FreeKernelMem(devHndl, sizeof *devHndl); file->private_data = NULL; } return 0; } /* *----------------------------------------------------------------------------- * * vmci_ioctl -- * * IOCTL interface to device. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int vmci_ioctl(struct inode *inode, // IN struct file *file, // IN unsigned int cmd, // IN unsigned long arg) // IN { #ifndef VMX86_DEVEL return -ENOTTY; #else int retval; VMCIGuestDeviceHandle *devHndl = (VMCIGuestDeviceHandle *) file->private_data; if (devHndl == NULL) { return -EINVAL; } switch (cmd) { case IOCTL_VMCI_CREATE_PROCESS: { if (devHndl->objType != VMCIOBJ_NOT_SET) { printk("VMCI: Received IOCTLCMD_VMCI_CREATE_PROCESS on " "initialized handle.\n"); retval = -EINVAL; break; } ASSERT(!devHndl->obj); retval = VMCIProcess_Create((VMCIProcess **) &devHndl->obj, -1); if (retval != 0) { printk("VMCI: Failed to create process.\n"); break; } devHndl->objType = VMCIOBJ_PROCESS; break; } case IOCTL_VMCI_CREATE_DATAGRAM_PROCESS: { VMCIDatagramCreateInfo createInfo; VMCIDatagramProcess *dgmProc; if (devHndl->objType != VMCIOBJ_NOT_SET) { printk("VMCI: Received IOCTLCMD_VMCI_CREATE_DATAGRAM_PROCESS on " "initialized handle.\n"); retval = -EINVAL; break; } ASSERT(!devHndl->obj); retval = copy_from_user(&createInfo, (void *)arg, sizeof createInfo); if (retval != 0) { printk("VMCI: Error getting datagram create info, %d.\n", retval); retval = -EFAULT; break; } if (VMCIDatagramProcess_Create(&dgmProc, &createInfo) < VMCI_SUCCESS) { retval = -EINVAL; break; } retval = copy_to_user((void *)arg, &createInfo, sizeof createInfo); if (retval != 0) { VMCIDatagramProcess_Destroy(dgmProc); printk("VMCI: Failed to create datagram process.\n"); retval = -EFAULT; break; } devHndl->obj = dgmProc; devHndl->objType = VMCIOBJ_DATAGRAM_PROCESS; break; } case IOCTL_VMCI_DATAGRAM_SEND: { VMCIDatagramSendRecvInfo sendInfo; VMCIDatagram *dg = NULL; if (devHndl->objType != VMCIOBJ_DATAGRAM_PROCESS) { printk("VMCI: Ioctl %d only valid for process datagram handle.\n", cmd); retval = -EINVAL; break; } retval = copy_from_user(&sendInfo, (void *) arg, sizeof sendInfo); if (retval) { printk("VMCI: copy_from_user failed.\n"); retval = -EFAULT; break; } if (sendInfo.len > VMCI_MAX_DG_SIZE) { printk("VMCI: datagram size too big.\n"); retval = -EINVAL; break; } dg = VMCI_AllocKernelMem(sendInfo.len, VMCI_MEMORY_NORMAL); if (dg == NULL) { printk("VMCI: Cannot allocate memory to dispatch datagram.\n"); retval = -ENOMEM; break; } retval = copy_from_user(dg, (char *)(VA)sendInfo.addr, sendInfo.len); if (retval != 0) { printk("VMCI: Error getting datagram: %d\n", retval); VMCI_FreeKernelMem(dg, sendInfo.len); retval = -EFAULT; break; } DEBUG_ONLY(printk("VMCI: Datagram dst handle 0x%x:0x%x, src handle " "0x%x:0x%x, payload size %"FMT64"u.\n", dg->dst.context, dg->dst.resource, dg->src.context, dg->src.resource, dg->payloadSize)); sendInfo.result = VMCIDatagram_Send(dg); VMCI_FreeKernelMem(dg, sendInfo.len); retval = copy_to_user((void *)arg, &sendInfo, sizeof sendInfo); break; } case IOCTL_VMCI_DATAGRAM_RECEIVE: { VMCIDatagramSendRecvInfo recvInfo; VMCIDatagram *dg = NULL; if (devHndl->objType != VMCIOBJ_DATAGRAM_PROCESS) { printk("VMCI: Ioctl %d only valid for process datagram handle.\n", cmd); retval = -EINVAL; break; } retval = copy_from_user(&recvInfo, (void *) arg, sizeof recvInfo); if (retval) { printk("VMCI: copy_from_user failed.\n"); retval = -EFAULT; break; } ASSERT(devHndl->obj); recvInfo.result = VMCIDatagramProcess_ReadCall((VMCIDatagramProcess *)devHndl->obj, recvInfo.len, &dg); if (recvInfo.result < VMCI_SUCCESS) { retval = -EINVAL; break; } ASSERT(dg); retval = copy_to_user((void *) ((uintptr_t) recvInfo.addr), dg, VMCI_DG_SIZE(dg)); VMCI_FreeKernelMem(dg, VMCI_DG_SIZE(dg)); if (retval != 0) { break; } retval = copy_to_user((void *)arg, &recvInfo, sizeof recvInfo); break; } case IOCTL_VMCI_GET_CONTEXT_ID: { VMCIId cid = VMCI_GetContextID(); retval = copy_to_user((void *)arg, &cid, sizeof cid); break; } default: printk(KERN_DEBUG "vmci_ioctl(): unknown ioctl 0x%x.\n", cmd); retval = -EINVAL; break; } return retval; #endif } /* *----------------------------------------------------------------------------- * * vmci_poll -- * * vmci poll function * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static unsigned int vmci_poll(struct file *file, // IN poll_table *wait) // IN { VMCILockFlags flags; unsigned int mask = 0; VMCIGuestDeviceHandle *devHndl = (VMCIGuestDeviceHandle *) file->private_data; /* * Check for call to this VMCI process. */ if (!devHndl) { return mask; } if (devHndl->objType == VMCIOBJ_DATAGRAM_PROCESS) { VMCIDatagramProcess *dgmProc = (VMCIDatagramProcess *) devHndl->obj; ASSERT(dgmProc); if (wait != NULL) { poll_wait(file, &dgmProc->host.waitQueue, wait); } VMCI_GrabLock_BH(&dgmProc->datagramQueueLock, &flags); if (dgmProc->pendingDatagrams > 0) { mask = POLLIN; } VMCI_ReleaseLock_BH(&dgmProc->datagramQueueLock, flags); } return mask; } /* *----------------------------------------------------------------------------- * * vmci_interrupt -- * * Interrupt handler. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) static compat_irqreturn_t vmci_interrupt(int irq, // IN void *clientdata, // IN struct pt_regs *regs) // IN #else static compat_irqreturn_t vmci_interrupt(int irq, // IN void *clientdata) // IN #endif { vmci_device *dev = clientdata; unsigned int icr = 0; if (dev == NULL) { printk (KERN_DEBUG "vmci_interrupt(): irq %d for unknown device.\n", irq); return COMPAT_IRQ_NONE; } /* Acknowledge interrupt and determine what needs doing. */ icr = inl(dev->ioaddr + VMCI_ICR_ADDR); if (icr == 0) { return COMPAT_IRQ_NONE; } if (icr & VMCI_ICR_DATAGRAM) { tasklet_schedule(&vmci_tasklet); icr &= ~VMCI_ICR_DATAGRAM; } if (icr != 0) { printk(KERN_INFO LGPFX"Ignoring unknown interrupt cause (%d).\n", icr); } return COMPAT_IRQ_HANDLED; } /* *----------------------------------------------------------------------------- * * VMCI_DeviceEnabled -- * * Checks whether the VMCI device is enabled. * * Results: * TRUE if device is enabled, FALSE otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ Bool VMCI_DeviceEnabled(void) { Bool retval; down(&vmci_dev.lock); retval = vmci_dev.enabled; up(&vmci_dev.lock); return retval; } /* *----------------------------------------------------------------------------- * * VMCI_SendDatagram -- * * VM to hypervisor call mechanism. We use the standard VMware naming * convention since shared code is calling this function as well. * * Results: * The result of the hypercall. * * Side effects: * None. * *----------------------------------------------------------------------------- */ int VMCI_SendDatagram(VMCIDatagram *dg) { unsigned long flags; int result; /* Check args. */ if (dg == NULL) { return VMCI_ERROR_INVALID_ARGS; } /* * Need to acquire spinlock on the device because * the datagram data may be spread over multiple pages and the monitor may * interleave device user rpc calls from multiple VCPUs. Acquiring the * spinlock precludes that possibility. Disabling interrupts to avoid * incoming datagrams during a "rep out" and possibly landing up in this * function. */ spin_lock_irqsave(&vmci_dev.dev_spinlock, flags); /* * Send the datagram and retrieve the return value from the result register. */ __asm__ __volatile__( "cld\n\t" "rep outsb\n\t" : /* No output. */ : "d"(vmci_dev.ioaddr + VMCI_DATA_OUT_ADDR), "c"(VMCI_DG_SIZE(dg)), "S"(dg) ); /* * XXX Should read result high port as well when updating handlers to * return 64bit. */ result = inl(vmci_dev.ioaddr + VMCI_RESULT_LOW_ADDR); spin_unlock_irqrestore(&vmci_dev.dev_spinlock, flags); return result; } /* *----------------------------------------------------------------------------- * * dispatch_datagrams -- * * Reads and dispatches incoming datagrams. * * Results: * None. * * Side effects: * Reads data from the device. * *----------------------------------------------------------------------------- */ void dispatch_datagrams(unsigned long data) { vmci_device *dev = (vmci_device *)data; if (dev == NULL) { printk(KERN_DEBUG "vmci: dispatch_datagrams(): no vmci device" "present.\n"); return; } if (data_buffer == NULL) { printk(KERN_DEBUG "vmci: dispatch_datagrams(): no buffer present.\n"); return; } VMCI_ReadDatagramsFromPort((VMCIIoHandle) 0, dev->ioaddr + VMCI_DATA_IN_ADDR, data_buffer, data_buffer_size); } module_init(vmci_init); module_exit(vmci_exit); MODULE_DEVICE_TABLE(pci, vmci_ids); /* Module information. */ MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface"); MODULE_VERSION(VMCI_DRIVER_VERSION_STRING); MODULE_LICENSE("GPL v2"); /* * Starting with SLE10sp2, Novell requires that IHVs sign a support agreement * with them and mark their kernel modules as externally supported via a * change to the module header. If this isn't done, the module will not load * by default (i.e., neither mkinitrd nor modprobe will accept it). */ MODULE_INFO(supported, "external"); vmci-only/vmci_version.h0000444000000000000000000000220312025726724014347 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmci_version.h -- * * Version definitions for the Linux vmci driver. */ #ifndef _VMCI_VERSION_H_ #define _VMCI_VERSION_H_ #define VMCI_DRIVER_VERSION 1.0.15.0 #define VMCI_DRIVER_VERSION_COMMAS 1,0,15,0 #define VMCI_DRIVER_VERSION_STRING "1.0.15.0" #endif /* _VMCI_VERSION_H_ */ vmci-only/Makefile.kernel0000444000000000000000000000336412025726725014424 0ustar rootroot#!/usr/bin/make -f ########################################################## # Copyright (C) 2005 VMware, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation version 2 and no later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # ########################################################## #### #### VMware vmci Makefile to be distributed externally #### INCLUDE := -I. EXTRA_CFLAGS := $(CC_OPTS) $(INCLUDE) EXTRA_CFLAGS += $(call vm_check_build, $(SRCROOT)/autoconf/epoll.c, -DVMW_HAVE_EPOLL, ) EXTRA_CFLAGS += -DVMX86_TOOLS obj-m += $(DRIVER).o $(DRIVER)-y := $(subst $(SRCROOT)/, , $(patsubst %.c, %.o, $(wildcard $(SRCROOT)/*.c))) T := /tmp MODPOST_VMCI_SYMVERS := $(T)/VMwareVMCIModule.symvers clean: rm -rf $(wildcard $(DRIVER).mod.c $(DRIVER).ko .tmp_versions \ Module.symvers Modules.symvers Module.markers modules.order \ $(MODPOST_VMCI_SYMVERS) \ $(foreach dir,./,$(addprefix $(dir),.*.cmd .*.o.flags *.o))) # # If this build generated a Module.symvers, copy it to a public place where # the VMCI Sockets build will be able to find it. # postbuild:: ifeq ($(call vm_check_file,$(SRCROOT)/Module.symvers), yes) cp -f $(SRCROOT)/Module.symvers $(MODPOST_VMCI_SYMVERS) endif vmci-only/vmciKernelIf.c0000444000000000000000000010044212025726724014221 0ustar rootroot/********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciKernelIf.c -- * * This file implements defines and helper functions for VMCI * host _and_ guest kernel code. This is the linux specific * implementation. */ /* Must come before any kernel header file */ #include "driver-config.h" #if !defined(linux) || defined(VMKERNEL) #error "Wrong platform." #endif #define EXPORT_SYMTAB #define __NO_VERSION__ #include "compat_module.h" #include "compat_version.h" #include "compat_wait.h" #include "compat_interrupt.h" #include "compat_spinlock.h" #include "compat_slab.h" #include "compat_semaphore.h" #include "compat_page.h" #include "compat_mm.h" #include "compat_highmem.h" #include "vm_basic_types.h" #include "pgtbl.h" #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) # include /* For vmalloc_to_page() */ #endif #include /* For memcpy_{to,from}iovec(). */ #include "vm_assert.h" #include "vmci_kernel_if.h" #include "vmci_queue_pair.h" /* * In Linux 2.6.25 kernels and onwards, the symbol init_mm is no * longer exported. This affects the function PgtblKVa2MPN, as it * calls pgd_offset_k which in turn is a macro referencing init_mm. * * We can avoid using PgtblKVa2MPN on more recent kernels by instead * using the function vmalloc_to_page followed by * page_to_pfn. vmalloc_to_page was introduced in the 2.5 kernels and * backported to some 2.4.x kernels. We use vmalloc_to_page on all * 2.6.x kernels, where it is present for sure, and use PgtblKVa2MPN * on older kernels where it works just fine. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) # define VMCIKVaToMPN(__VA) page_to_pfn(vmalloc_to_page((void *)(__VA))) #else # define VMCIKVaToMPN(__VA) PgtblKVa2MPN(__VA) #endif /* *----------------------------------------------------------------------------- * * VMCI_InitLock * * Initializes the lock. Must be called before use. * * Results: * None * * Side effects: * Thread can block. * *----------------------------------------------------------------------------- */ void VMCI_InitLock(VMCILock *lock, // IN: char *name, // IN: Unused on Linux VMCILockRank rank) // IN: Unused on Linux { spin_lock_init(lock); } /* *----------------------------------------------------------------------------- * * VMCI_CleanupLock * * Cleanup the lock. Must be called before deallocating lock. * * Results: * None * * Side effects: * Deletes kernel lock state * *----------------------------------------------------------------------------- */ void VMCI_CleanupLock(VMCILock *lock) { } /* *----------------------------------------------------------------------------- * * VMCI_GrabLock * * Grabs the given lock. XXX Fill in specific lock requirements. XXX Move * locking code into hostif if VMCI stays in vmmon. * * Results: * None * * Side effects: * Thread can block. * *----------------------------------------------------------------------------- */ void VMCI_GrabLock(VMCILock *lock, // IN VMCILockFlags *flags) // OUT: used to restore irql on windows { spin_lock(lock); } /* *----------------------------------------------------------------------------- * * VMCI_ReleaseLock * * Releases the given lock. XXX Move locking code into hostif if VMCI * stays in vmmon. * * Results: * None * * Side effects: * A thread blocked on this lock may wake up. * *----------------------------------------------------------------------------- */ void VMCI_ReleaseLock(VMCILock *lock, // IN VMCILockFlags flags) // IN { spin_unlock(lock); } /* *----------------------------------------------------------------------------- * * VMCI_GrabLock_BH * * Grabs the given lock and for linux kernels disables bottom half execution. * . This should be used with locks accessed both from bottom half/tasklet * contexts, ie. guestcall handlers, and from process contexts to avoid * deadlocks where the process has the lock and gets descheduled due to a * bh/tasklet coming in. * * Results: * None * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCI_GrabLock_BH(VMCILock *lock, // IN VMCILockFlags *flags) // OUT: used to restore { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 4) spin_lock_bh(lock); #else /* * Before 2.3.4 linux kernels spin_unlock_bh didn't exist so we are using * spin_lock_irqsave/restore instead. I wanted to define spin_[un]lock_bh * functions in compat_spinlock.h as local_bh_disable;spin_lock(lock) and * so on, but local_bh_disable/enable does not exist on 2.2.26. */ spin_lock_irqsave(lock, *flags); #endif // LINUX_VERSION_CODE } /* *----------------------------------------------------------------------------- * * VMCI_ReleaseLock_BH * * Releases the given lock and for linux kernels reenables bottom half * execution. * . This should be used with locks accessed both from bottom half/tasklet * contexts, ie. guestcall handlers, and from process contexts to avoid * deadlocks where the process has the lock and get descheduled due to a * bh/tasklet coming in. * * Results: * None * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCI_ReleaseLock_BH(VMCILock *lock, // IN VMCILockFlags flags) // IN { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 4) spin_unlock_bh(lock); #else /* * Before 2.3.4 linux kernels spin_unlock_bh didn't exist so we are using * spin_lock_irqsave/restore instead. I wanted to define spin_[un]lock_bh * functions in compat_spinlock.h as local_bh_disable;spin_lock(lock) and * so on, but local_bh_disable/enable does not exist on 2.2.26. */ spin_unlock_irqrestore(lock, flags); #endif // LINUX_VERSION_CODE } /* *---------------------------------------------------------------------- * * VMCIHost_InitContext -- * * Host-specific initialization of VMCI context state. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void VMCIHost_InitContext(VMCIHost *hostContext, // IN uintptr_t eventHnd) // IN { init_waitqueue_head(&hostContext->waitQueue); } /* *---------------------------------------------------------------------- * * VMCIHost_ReleaseContext -- * * Host-specific release of state allocated by * VMCIHost_InitContext. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void VMCIHost_ReleaseContext(VMCIHost *hostContext) // IN { } /* *---------------------------------------------------------------------- * * VMCIHost_SignalCall -- * * Signal to userlevel that a VMCI call is waiting. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void VMCIHost_SignalCall(VMCIHost *hostContext) // IN { wake_up(&hostContext->waitQueue); } /* *---------------------------------------------------------------------- * * VMCIHost_WaitForCallLocked -- * * Wait until a VMCI call is pending or the waiting thread is * interrupted. It is assumed that a lock is held prior to * calling this function. The lock will be released during the * wait. The correctnes of this funtion depends on that the same * lock is held when the call is signalled. * * Results: * TRUE on success * FALSE if the wait was interrupted. * * Side effects: * The call may block. * *---------------------------------------------------------------------- */ Bool VMCIHost_WaitForCallLocked(VMCIHost *hostContext, // IN VMCILock *lock, // IN VMCILockFlags *flags, // IN Bool useBH) // IN { DECLARE_WAITQUEUE(wait, current); /* * The thread must be added to the wait queue and have its state * changed while holding the lock - otherwise a signal may change * the state in between and have it overwritten causing a loss of * the event. */ add_wait_queue(&hostContext->waitQueue, &wait); current->state = TASK_INTERRUPTIBLE; if (useBH) { VMCI_ReleaseLock_BH(lock, *flags); } else { VMCI_ReleaseLock(lock, *flags); } schedule(); if (useBH) { VMCI_GrabLock_BH(lock, flags); } else { VMCI_GrabLock(lock, flags); } current->state = TASK_RUNNING; remove_wait_queue(&hostContext->waitQueue, &wait); if (signal_pending(current)) { return FALSE; } return TRUE; } /* *---------------------------------------------------------------------- * * VMCIHost_ClearCall -- * * Clear the pending call signal. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void VMCIHost_ClearCall(VMCIHost *hostContext) // IN { } /* *---------------------------------------------------------------------- * * VMCI_AllocKernelMem * * Allocate some kernel memory for the VMCI driver. * * Results: * The address allocated or NULL on error. * * * Side effects: * memory is malloced *---------------------------------------------------------------------- */ void * VMCI_AllocKernelMem(size_t size, int flags) { void *ptr; if ((flags & VMCI_MEMORY_ATOMIC) != 0) { ptr = kmalloc(size, GFP_ATOMIC); } else { ptr = kmalloc(size, GFP_KERNEL); } return ptr; } /* *---------------------------------------------------------------------- * * VMCI_FreeKernelMem * * Free kernel memory allocated for the VMCI driver. * * Results: * None. * * Side effects: * memory is freed. *---------------------------------------------------------------------- */ void VMCI_FreeKernelMem(void *ptr, // IN: size_t size) // IN: Unused on Linux { kfree(ptr); } /* *---------------------------------------------------------------------- * * VMCI_AllocBuffer * * Allocate some kernel memory for the VMCI driver. The memory is * not guaranteed to have a mapping in the virtual address * space. Use VMCI_MapBuffer to get a VA mapping for the memory. * * Results: * A reference to the allocated memory or VMCI_BUFFER_INVALID * on error. * * * Side effects: * memory is allocated. *---------------------------------------------------------------------- */ VMCIBuffer VMCI_AllocBuffer(size_t size, int flags) { return VMCI_AllocKernelMem(size, flags); } /* *---------------------------------------------------------------------- * * VMCI_MapBuffer * * Ensures that the kernel memory allocated with VMCI_AllocBuffer * has a mapping in the virtual address space. * * Results: * None. * * Side effects: * virtual address mapping of kernel memory is established. *---------------------------------------------------------------------- */ void * VMCI_MapBuffer(VMCIBuffer buf) { return buf; } /* *---------------------------------------------------------------------- * * VMCI_ReleaseBuffer * * Releases the VA mapping of kernel memory allocated with * VMCI_AllocBuffer. * * Results: * None. * * Side effects: * virtual address mapping of kernel memory is released. *---------------------------------------------------------------------- */ void VMCI_ReleaseBuffer(void *ptr) // IN: The VA of the mapped memory { } /* *---------------------------------------------------------------------- * * VMCI_FreeBuffer * * Free temporary kernel memory allocated for the VMCI driver. * * Results: * None. * * Side effects: * memory is freed. *---------------------------------------------------------------------- */ void VMCI_FreeBuffer(VMCIBuffer buf, // IN: size_t size) // IN: Unused on Linux { VMCI_FreeKernelMem(buf, size); } /* *----------------------------------------------------------------------------- * * VMCI_CopyToUser -- * * Copy memory to the user application from a kernel buffer. This * function may block, so don't call it while holding any kind of * lock. * * Results: * 0 on success. * Nonzero on failure. * * Side effects: * None * *----------------------------------------------------------------------------- */ int VMCI_CopyToUser(void *dst, // OUT const void *src, // IN unsigned int len) // IN { return copy_to_user(dst, src, len) ? -EFAULT : 0; } /* *----------------------------------------------------------------------------- * * VMCI_CopyFromUser -- * * Copy memory from the user application to a kernel buffer. This * function may block, so don't call it while holding any kind of * lock. * * Results: * 0 on success. * Nonzero on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ int VMCI_CopyFromUser(void *dst, // OUT const void *src, // IN size_t len) // IN { return copy_from_user(dst, src, len); } /* *----------------------------------------------------------------------------- * * VMCI_CreateEvent -- * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCI_CreateEvent(VMCIEvent *event) // IN: { init_waitqueue_head(event); } /* *----------------------------------------------------------------------------- * * VMCI_DestroyEvent -- * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCI_DestroyEvent(VMCIEvent *event) // IN: { /* Nothing to do. */ } /* *----------------------------------------------------------------------------- * * VMCI_SignalEvent -- * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCI_SignalEvent(VMCIEvent *event) // IN: { wake_up(event); } /* *----------------------------------------------------------------------------- * * VMCI_WaitOnEvent -- * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCI_WaitOnEvent(VMCIEvent *event, // IN: VMCIEventReleaseCB releaseCB, // IN: void *clientData) // IN: { DECLARE_WAITQUEUE(wait, current); if (event == NULL || releaseCB == NULL) { return; } add_wait_queue(event, &wait); current->state = TASK_INTERRUPTIBLE; /* * Release the lock or other primitive that makes it possible for us to * put the current thread on the wait queue without missing the signal. * Ie. on Linux we need to put ourselves on the wait queue and set our * stateto TASK_INTERRUPTIBLE without another thread signalling us. * The releaseCB is used to synchronize this. */ releaseCB(clientData); schedule(); current->state = TASK_RUNNING; remove_wait_queue(event, &wait); } /* *----------------------------------------------------------------------------- * * VMCIMutex_Init -- * * Initializes the mutex. Must be called before use. * * Results: * Success. * * Side effects: * None. * *----------------------------------------------------------------------------- */ int VMCIMutex_Init(VMCIMutex *mutex) // IN: { sema_init(mutex, 1); return VMCI_SUCCESS; } /* *----------------------------------------------------------------------------- * * VMCIMutex_Destroy -- * * Destroys the mutex. Does nothing on Linux. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCIMutex_Destroy(VMCIMutex *mutex) // IN: Unused { } /* *----------------------------------------------------------------------------- * * VMCIMutex_Acquire -- * * Acquires the mutex. * * Results: * None. * * Side effects: * Thread may block. * *----------------------------------------------------------------------------- */ void VMCIMutex_Acquire(VMCIMutex *mutex) // IN: { down(mutex); } /* *----------------------------------------------------------------------------- * * VMCIMutex_Release -- * * Releases the mutex. * * Results: * None. * * Side effects: * May wake up the thread blocking on this mutex. * *----------------------------------------------------------------------------- */ void VMCIMutex_Release(VMCIMutex *mutex) // IN: { up(mutex); } /* *----------------------------------------------------------------------------- * * VMCI_AllocQueueKVA -- * * Allocates kernel memory for the queue header (1 page) plus the * translation structure for offset -> page mappings. Allocates physical * pages for the queue (buffer area), and initializes the translation * structure. * * Results: * The VA on success, NULL otherwise. * * Side effects: * Memory is allocated. * *----------------------------------------------------------------------------- */ VA VMCI_AllocQueueKVA(uint64 size) // IN: size of queue (not including header) { const uint64 numPages = CEILING(size, PAGE_SIZE); VMCIQueue *queue; queue = vmalloc(sizeof *queue + numPages * sizeof queue->page[0]); if (queue) { uint64 i; /* * Allocate physical pages, they will be mapped/unmapped on demand. */ for (i = 0; i < numPages; i++) { queue->page[i] = alloc_pages(GFP_KERNEL, 0); /* One page. */ if (!queue->page[i]) { /* * Free all pages allocated. */ while (i) { __free_page(queue->page[--i]); } vfree(queue); queue = NULL; break; } } } return (VA)queue; } /* *----------------------------------------------------------------------------- * * VMCI_FreeQueueKVA -- * * Frees kernel memory for a given queue (header plus translation * structure). Frees all physical pages that held the buffers for this * queue. * * Results: * None. * * Side effects: * Memory is freed. * *----------------------------------------------------------------------------- */ void VMCI_FreeQueueKVA(VA va, // IN: uint64 size) // IN: size of queue (not including header) { VMCIQueue *queue = (VMCIQueue *)va; if (queue) { uint64 i; for (i = 0; i < CEILING(size, PAGE_SIZE); i++) { __free_page(queue->page[i]); } vfree(queue); } } /* *----------------------------------------------------------------------------- * * VMCI_AllocPPNSet -- * * Allocates two list of PPNs --- one for the pages in the produce queue, * and the other for the pages in the consume queue. Intializes the list * of PPNs with the page frame numbers of the KVA for the two queues (and * the queue headers). * * Results: * Success or failure. * * Side effects: * Memory may be allocated. * *----------------------------------------------------------------------------- */ int VMCI_AllocPPNSet(VA produceVA, // IN: uint64 numProducePages, // IN: for queue plus header VA consumeVA, // IN: uint64 numConsumePages, // IN: for queue plus header PPNSet *ppnSet) // OUT: { VMCIPpnList producePPNs; VMCIPpnList consumePPNs; uint64 i; VMCIQueue *produceQ = (VMCIQueue *)produceVA; VMCIQueue *consumeQ = (VMCIQueue *)consumeVA; if (!produceVA || !numProducePages || !consumeVA || !numConsumePages || !ppnSet) { return VMCI_ERROR_INVALID_ARGS; } if (ppnSet->initialized) { return VMCI_ERROR_ALREADY_EXISTS; } producePPNs = VMCI_AllocKernelMem(numProducePages * sizeof *producePPNs, VMCI_MEMORY_NORMAL); if (!producePPNs) { return VMCI_ERROR_NO_MEM; } consumePPNs = VMCI_AllocKernelMem(numConsumePages * sizeof *consumePPNs, VMCI_MEMORY_NORMAL); if (!consumePPNs) { VMCI_FreeKernelMem(producePPNs, numProducePages * sizeof *producePPNs); return VMCI_ERROR_NO_MEM; } producePPNs[0] = VMCIKVaToMPN(produceVA); for (i = 1; i < numProducePages; i++) { unsigned long pfn; producePPNs[i] = pfn = page_to_pfn(produceQ->page[i - 1]); /* * Fail allocation if PFN isn't supported by hypervisor. */ if (sizeof pfn > sizeof *producePPNs && pfn != producePPNs[i]) { goto ppnError; } } consumePPNs[0] = VMCIKVaToMPN(consumeVA); for (i = 1; i < numConsumePages; i++) { unsigned long pfn; consumePPNs[i] = pfn = page_to_pfn(consumeQ->page[i - 1]); /* * Fail allocation if PFN isn't supported by hypervisor. */ if (sizeof pfn > sizeof *consumePPNs && pfn != consumePPNs[i]) { goto ppnError; } } ppnSet->numProducePages = numProducePages; ppnSet->numConsumePages = numConsumePages; ppnSet->producePPNs = producePPNs; ppnSet->consumePPNs = consumePPNs; ppnSet->initialized = TRUE; return VMCI_SUCCESS; ppnError: VMCI_FreeKernelMem(producePPNs, numProducePages * sizeof *producePPNs); VMCI_FreeKernelMem(consumePPNs, numConsumePages * sizeof *consumePPNs); return VMCI_ERROR_INVALID_ARGS; } /* *----------------------------------------------------------------------------- * * VMCI_FreePPNSet -- * * Frees the two list of PPNs for a queue pair. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void VMCI_FreePPNSet(PPNSet *ppnSet) // IN: { ASSERT(ppnSet); if (ppnSet->initialized) { /* Do not call these functions on NULL inputs. */ ASSERT(ppnSet->producePPNs && ppnSet->consumePPNs); VMCI_FreeKernelMem(ppnSet->producePPNs, ppnSet->numProducePages * sizeof *ppnSet->producePPNs); VMCI_FreeKernelMem(ppnSet->consumePPNs, ppnSet->numConsumePages * sizeof *ppnSet->consumePPNs); } memset(ppnSet, 0, sizeof *ppnSet); } /* *----------------------------------------------------------------------------- * * VMCI_PopulatePPNList -- * * Populates the list of PPNs in the hypercall structure with the PPNS * of the produce queue and the consume queue. * * Results: * VMCI_SUCCESS. * * Side effects: * None. * *----------------------------------------------------------------------------- */ int VMCI_PopulatePPNList(uint8 *callBuf, // OUT: const PPNSet *ppnSet) // IN: { ASSERT(callBuf && ppnSet && ppnSet->initialized); memcpy(callBuf, ppnSet->producePPNs, ppnSet->numProducePages * sizeof *ppnSet->producePPNs); memcpy(callBuf + ppnSet->numProducePages * sizeof *ppnSet->producePPNs, ppnSet->consumePPNs, ppnSet->numConsumePages * sizeof *ppnSet->consumePPNs); return VMCI_SUCCESS; } #ifdef __KERNEL__ /* *----------------------------------------------------------------------------- * * __VMCIMemcpyToQueue -- * * Copies from a given buffer or iovector to a VMCI Queue. Uses * kmap()/kunmap() to dynamically map/unmap required portions of the queue * by traversing the offset -> page translation structure for the queue. * Assumes that offset + size does not wrap around in the queue. * * Results: * Zero on success, negative error code on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ int __VMCIMemcpyToQueue(VMCIQueue *queue, // OUT: uint64 queueOffset, // IN: const void *src, // IN: size_t size, // IN: Bool isIovec) // IN: if src is a struct iovec * { size_t bytesCopied = 0; while (bytesCopied < size) { uint64 pageIndex = (queueOffset + bytesCopied) / PAGE_SIZE; size_t pageOffset = (queueOffset + bytesCopied) & (PAGE_SIZE - 1); void *va = kmap(queue->page[pageIndex]); size_t toCopy; ASSERT(va); if (size - bytesCopied > PAGE_SIZE - pageOffset) { /* Enough payload to fill up from this page. */ toCopy = PAGE_SIZE - pageOffset; } else { toCopy = size - bytesCopied; } if (isIovec) { struct iovec *iov = (struct iovec *)src; int err; /* The iovec will track bytesCopied internally. */ err = memcpy_fromiovec((uint8 *)va + pageOffset, iov, toCopy); if (err != 0) { kunmap(queue->page[pageIndex]); return err; } } else { memcpy((uint8 *)va + pageOffset, (uint8 *)src + bytesCopied, toCopy); } bytesCopied += toCopy; kunmap(queue->page[pageIndex]); } return 0; } /* *----------------------------------------------------------------------------- * * __VMCIMemcpyFromQueue -- * * Copies to a given buffer or iovector from a VMCI Queue. Uses * kmap()/kunmap() to dynamically map/unmap required portions of the queue * by traversing the offset -> page translation structure for the queue. * Assumes that offset + size does not wrap around in the queue. * * Results: * Zero on success, negative error code on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ int __VMCIMemcpyFromQueue(void *dest, // OUT: const VMCIQueue *queue, // IN: uint64 queueOffset, // IN: size_t size, // IN: Bool isIovec) // IN: if dest is a struct iovec * { size_t bytesCopied = 0; while (bytesCopied < size) { uint64 pageIndex = (queueOffset + bytesCopied) / PAGE_SIZE; size_t pageOffset = (queueOffset + bytesCopied) & (PAGE_SIZE - 1); void *va = kmap(queue->page[pageIndex]); size_t toCopy; ASSERT(va); if (size - bytesCopied > PAGE_SIZE - pageOffset) { /* Enough payload to fill up this page. */ toCopy = PAGE_SIZE - pageOffset; } else { toCopy = size - bytesCopied; } if (isIovec) { struct iovec *iov = (struct iovec *)dest; int err; /* The iovec will track bytesCopied internally. */ err = memcpy_toiovec(iov, (uint8 *)va + pageOffset, toCopy); if (err != 0) { kunmap(queue->page[pageIndex]); return err; } } else { memcpy((uint8 *)dest + bytesCopied, (uint8 *)va + pageOffset, toCopy); } bytesCopied += toCopy; kunmap(queue->page[pageIndex]); } return 0; } /* *----------------------------------------------------------------------------- * * VMCIMemcpyToQueue -- * * Copies from a given buffer to a VMCI Queue. * * Results: * Zero on success, negative error code on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ EXPORT_SYMBOL(VMCIMemcpyToQueue); int VMCIMemcpyToQueue(VMCIQueue *queue, // OUT: uint64 queueOffset, // IN: const void *src, // IN: size_t srcOffset, // IN: size_t size) // IN: { return __VMCIMemcpyToQueue(queue, queueOffset, (uint8 *)src + srcOffset, size, FALSE); } /* *----------------------------------------------------------------------------- * * VMCIMemcpyFromQueue -- * * Copies to a given buffer from a VMCI Queue. * * Results: * Zero on success, negative error code on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ EXPORT_SYMBOL(VMCIMemcpyFromQueue); int VMCIMemcpyFromQueue(void *dest, // OUT: size_t destOffset, // IN: const VMCIQueue *queue, // IN: uint64 queueOffset, // IN: size_t size) // IN: { return __VMCIMemcpyFromQueue((uint8 *)dest + destOffset, queue, queueOffset, size, FALSE); } /* *---------------------------------------------------------------------------- * * VMCIMemcpyToQueueV -- * * Copies from a given iovec from a VMCI Queue. * * Results: * Zero on success, negative error code on failure. * * Side effects: * None. * *---------------------------------------------------------------------------- */ EXPORT_SYMBOL(VMCIMemcpyToQueueV); int VMCIMemcpyToQueueV(VMCIQueue *queue, // OUT: uint64 queueOffset, // IN: const void *src, // IN: iovec size_t srcOffset, // IN: ignored size_t size) // IN: { /* * We ignore srcOffset because src is really a struct iovec * and will * maintain offset internally. */ return __VMCIMemcpyToQueue(queue, queueOffset, src, size, TRUE); } /* *---------------------------------------------------------------------------- * * VMCIMemcpyFromQueueV -- * * Copies to a given iovec from a VMCI Queue. * * Results: * Zero on success, negative error code on failure. * * Side effects: * None. * *---------------------------------------------------------------------------- */ EXPORT_SYMBOL(VMCIMemcpyFromQueueV); int VMCIMemcpyFromQueueV(void *dest, // OUT: iovec size_t destOffset, // IN: ignored const VMCIQueue *queue, // IN: uint64 queueOffset, // IN: size_t size) // IN: { /* * We ignore destOffset because dest is really a struct iovec * and will * maintain offset internally. */ return __VMCIMemcpyFromQueue(dest, queue, queueOffset, size, TRUE); } #endif /* *----------------------------------------------------------------------------- * * VMCIWellKnownID_AllowMap -- * * Checks whether the calling context is allowed to register for the given * well known service ID. Currently returns FALSE if the service ID is * within the reserved range and VMCI_PRIVILEGE_FLAG_TRUSTED is not * provided as the input privilege flags. Otherwise returns TRUE. * XXX TODO access control based on host configuration information; this * will be platform specific implementation. * * Results: * Boolean value indicating access granted or denied. * * Side effects: * None. * *----------------------------------------------------------------------------- */ Bool VMCIWellKnownID_AllowMap(VMCIId wellKnownID, // IN: VMCIPrivilegeFlags privFlags) // IN: { if (wellKnownID < VMCI_RESERVED_RESOURCE_ID_MAX && !(privFlags & VMCI_PRIVILEGE_FLAG_TRUSTED)) { return FALSE; } return TRUE; } vmci-only/Makefile.normal0000444000000000000000000000547512025726725014441 0ustar rootroot#!/usr/bin/make -f ########################################################## # Copyright (C) 2005 VMware, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation version 2 and no later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # ########################################################## #### #### VMware vmci Makefile to be distributed externally #### vm_check_build = $(shell if $(CC) $(CC_OPTS) $(INCLUDE) -Werror -S -o /dev/null -xc $(1) \ > /dev/null 2>&1; then echo "$(2)"; else echo "$(3)"; fi) DRIVERNAME = $(DRIVER)-$(VM_UNAME) ifneq (,$(filter x86_64%, $(shell $(CC) -dumpmachine))) MACHINE := x86_64 else MACHINE := x386 endif #### #### You must compile with at least -O level of optimization #### or the module won't load. #### If desparate, I think that bringing in might #### suffice. #### CC_WARNINGS := -Wall -Wstrict-prototypes # Don't use -pipe or egcs-2.91.66 (shipped with RedHat) will die CC_KFLAGS := -D__KERNEL__ -fno-strength-reduce -fno-omit-frame-pointer \ -fno-common -DKBUILD_MODNAME=$(DRIVER) CC_KFLAGS += $(call vm_check_gcc,-falign-loops=2 -falign-jumps=2 -falign-functions=2, \ -malign-loops=2 -malign-jumps=2 -malign-functions=2) CC_KFLAGS += $(call vm_check_gcc,-fno-strict-aliasing,) CC_KFLAGS += -DVMX86_TOOLS ifeq ($(MACHINE),x86_64) CC_KFLAGS += -mno-red-zone -mcmodel=kernel else # Gcc 3.0 deprecates -m486 --hpreg CC_KFLAGS += -DCPU=586 $(call check_gcc,-march=i586,-m486) endif CC_OPTS := -g3 -O2 -DMODULE $(GLOBAL_DEFS) $(CC_KFLAGS) $(CC_WARNINGS) INCLUDE := -I. -I$(HEADER_DIR) INCLUDE += $(shell $(CC) $(CC_OPTS) $(INCLUDE) \ -E $(SRCROOT)/autoconf/geninclude.c \ | sed -n -e 's!^APATH!-I$(HEADER_DIR)/asm!p') CC_OPTS += $(call vm_check_build, $(SRCROOT)/autoconf/epoll.c, -DVMW_HAVE_EPOLL, ) OBJS := vmci_drv.o OBJS += vmciDatagram.o OBJS += vmciEvent.o OBJS += vmciProcess.o OBJS += vmciUtil.o OBJS += vmciKernelIf.o OBJS += vmciGuestKernelIf.o OBJS += vmciQueuePair.o OBJS += kernelStubsLinux.o OBJS += vmciGuestDs.o CFLAGS := $(CC_OPTS) $(INCLUDE) LIBS := default: all all: $(DRIVER).o $(DRIVERNAME): $(OBJS) $(LD) -r -o $@ $^ $(DRIVER) $(DRIVER).o ../$(DRIVER).o: $(DRIVERNAME) cp -f $< $@ auto-build: ../$(DRIVER).o clean: rm -f $(DRIVERNAME) ../$(DRIVERNAME) $(DRIVER) $(OBJS) .SILENT: vmci-only/Makefile0000444000000000000000000000726212025726725013146 0ustar rootroot#!/usr/bin/make -f ########################################################## # Copyright (C) 1998 VMware, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation version 2 and no later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # ########################################################## #### #### VMware kernel module Makefile to be distributed externally #### #### #### SRCROOT _must_ be a relative path. #### SRCROOT = . VM_UNAME = $(shell uname -r) # Header directory for the running kernel HEADER_DIR = /lib/modules/$(VM_UNAME)/build/include BUILD_DIR = $(HEADER_DIR)/.. DRIVER := vmci PRODUCT := @PRODUCT@ # Grep program GREP = /bin/grep vm_check_gcc = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null \ > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi) vm_check_file = $(shell if test -f $(1); then echo "yes"; else echo "no"; fi) ifndef VM_KBUILD VM_KBUILD := no ifeq ($(call vm_check_file,$(BUILD_DIR)/Makefile), yes) ifneq ($(call vm_check_file,$(BUILD_DIR)/Rules.make), yes) VM_KBUILD := 26 endif endif export VM_KBUILD endif ifndef VM_KBUILD_SHOWN ifeq ($(VM_KBUILD), no) VM_DUMMY := $(shell echo >&2 "Using standalone build system.") else ifeq ($(VM_KBUILD), 24) VM_DUMMY := $(shell echo >&2 "Using 2.4.x kernel build system.") else VM_DUMMY := $(shell echo >&2 "Using 2.6.x kernel build system.") endif endif VM_KBUILD_SHOWN := yes export VM_KBUILD_SHOWN endif ifneq ($(VM_KBUILD), no) VMCCVER := $(shell $(CC) -dumpversion) # If there is no version defined, we are in toplevel pass, not yet in kernel makefiles... ifeq ($(VERSION),) ifeq ($(VM_KBUILD), 24) DRIVER_KO := $(DRIVER).o else DRIVER_KO := $(DRIVER).ko endif .PHONY: $(DRIVER_KO) auto-build: $(DRIVER_KO) cp -f $< $(SRCROOT)/../$(DRIVER).o # $(DRIVER_KO) is a phony target, so compare file times explicitly $(DRIVER): $(DRIVER_KO) if [ $< -nt $@ ] || [ ! -e $@ ] ; then cp -f $< $@; fi # Pass gcc version down the chain, so we can detect if kernel attempts to use unapproved compiler VM_CCVER := $(VMCCVER) export VM_CCVER VM_CC := $(CC) export VM_CC MAKEOVERRIDES := $(filter-out CC=%,$(MAKEOVERRIDES)) # # Define a setup target that gets built before the actual driver. # This target may not be used at all, but if it is then it will be defined # in Makefile.kernel # prebuild:: ; postbuild:: ; $(DRIVER_KO): prebuild make -C $(BUILD_DIR) SUBDIRS=$$PWD SRCROOT=$$PWD/$(SRCROOT) modules make -C $$PWD SRCROOT=$$PWD/$(SRCROOT) postbuild endif vm_check_build = $(shell if $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) \ $(CPPFLAGS) $(CFLAGS) $(CFLAGS_KERNEL) $(LINUXINCLUDE) \ $(EXTRA_CFLAGS) -Iinclude2/asm/mach-default \ -DKBUILD_BASENAME=\"$(DRIVER)\" \ -Werror -S -o /dev/null -xc $(1) \ > /dev/null 2>&1; then echo "$(2)"; else echo "$(3)"; fi) CC_WARNINGS := -Wall -Wstrict-prototypes CC_OPTS := $(GLOBAL_DEFS) $(CC_WARNINGS) -DVMW_USING_KBUILD ifdef VMX86_DEVEL CC_OPTS += -DVMX86_DEVEL endif ifdef VMX86_DEBUG CC_OPTS += -DVMX86_DEBUG endif include $(SRCROOT)/Makefile.kernel ifdef TOPDIR ifeq ($(VM_KBUILD), 24) O_TARGET := $(DRIVER).o obj-y := $($(DRIVER)-y) include $(TOPDIR)/Rules.make endif endif else include $(SRCROOT)/Makefile.normal endif #.SILENT: vmci-only/COPYING0000444000000000000000000004310312025726724012532 0ustar rootroot GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License.