vmci-only/ 0000755 0000000 0000000 00000000000 12522066670 011477 5 ustar root root vmci-only/shared/ 0000755 0000000 0000000 00000000000 12522066670 012745 5 ustar root root vmci-only/shared/vm_basic_types.h 0000444 0000000 0000000 00000063115 12522066074 016127 0 ustar root root /*********************************************************
* Copyright (C) 1998-2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
*
* vm_basic_types.h --
*
* basic data types.
*/
#ifndef _VM_BASIC_TYPES_H_
#define _VM_BASIC_TYPES_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
/* STRICT ANSI means the Xserver build and X defines Bool differently. */
#if !defined(_XTYPEDEF_BOOL) && \
(!defined(__STRICT_ANSI__) || defined(__FreeBSD__) || defined(__MINGW32__))
#define _XTYPEDEF_BOOL
typedef char Bool;
#endif
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
#endif
#define IsBool(x) (((x) & ~1) == 0)
#define IsBool2(x, y) ((((x) | (y)) & ~1) == 0)
/*
* Macros __i386__ and __ia64 are intrinsically defined by GCC
*/
#if defined _MSC_VER && defined _M_X64
# define __x86_64__
#elif defined _MSC_VER && defined _M_IX86
# define __i386__
#endif
#ifdef __i386__
#define VM_I386
#endif
#ifdef __x86_64__
#define VM_X86_64
#define VM_I386
#define vm_x86_64 (1)
#else
#define vm_x86_64 (0)
#endif
#ifdef _MSC_VER
#pragma warning (3 :4505) // unreferenced local function
#pragma warning (disable :4018) // signed/unsigned mismatch
#pragma warning (disable :4761) // integral size mismatch in argument; conversion supplied
#pragma warning (disable :4305) // truncation from 'const int' to 'short'
#pragma warning (disable :4244) // conversion from 'unsigned short' to 'unsigned char'
#pragma warning (disable :4267) // truncation of 'size_t'
#pragma warning (disable :4146) // unary minus operator applied to unsigned type, result still unsigned
#pragma warning (disable :4142) // benign redefinition of type
#endif
#if defined(__APPLE__) || defined(HAVE_STDINT_H)
/*
* TODO: This is a C99 standard header. We should be able to test for
* #if __STDC_VERSION__ >= 199901L, but that breaks the Netware build
* (which doesn't have stdint.h).
*/
#include
typedef uint64_t uint64;
typedef int64_t int64;
typedef uint32_t uint32;
typedef int32_t int32;
typedef uint16_t uint16;
typedef int16_t int16;
typedef uint8_t uint8;
typedef int8_t int8;
/*
* Note: C does not specify whether char is signed or unsigned, and
* both gcc and msvc implement processor-specific signedness. With
* three types:
* typeof(char) != typeof(signed char) != typeof(unsigned char)
*
* Be careful here, because gcc (4.0.1 and others) likes to warn about
* conversions between signed char * and char *.
*/
#else /* !HAVE_STDINT_H */
#ifdef _MSC_VER
typedef unsigned __int64 uint64;
typedef signed __int64 int64;
#elif __GNUC__
/* The Xserver source compiles with -ansi -pendantic */
# if !defined(__STRICT_ANSI__) || defined(__FreeBSD__)
# if defined(VM_X86_64)
typedef unsigned long uint64;
typedef long int64;
# else
typedef unsigned long long uint64;
typedef long long int64;
# endif
# endif
#else
# error - Need compiler define for int64/uint64
#endif /* _MSC_VER */
typedef unsigned int uint32;
typedef unsigned short uint16;
typedef unsigned char uint8;
typedef int int32;
typedef short int16;
typedef signed char int8;
#endif /* HAVE_STDINT_H */
/*
* FreeBSD (for the tools build) unconditionally defines these in
* sys/inttypes.h so don't redefine them if this file has already
* been included. [greg]
*
* This applies to Solaris as well.
*/
/*
* Before trying to do the includes based on OS defines, see if we can use
* feature-based defines to get as much functionality as possible
*/
#ifdef HAVE_INTTYPES_H
#include
#endif
#ifdef HAVE_SYS_TYPES_H
#include
#endif
#ifdef HAVE_SYS_INTTYPES_H
#include
#endif
#ifdef HAVE_STDLIB_H
#include
#endif
#ifdef __FreeBSD__
#include /* For __FreeBSD_version */
#endif
#if !defined(USING_AUTOCONF)
# if defined(__FreeBSD__) || defined(sun)
# ifdef KLD_MODULE
# include
# else
# if __FreeBSD_version >= 500043
# if !defined(VMKERNEL)
# include
# endif
# include
# else
# include
# endif
# endif
# elif defined __APPLE__
# if KERNEL
# include
# include /* mostly for size_t */
# include
# else
# include
# include
# include
# include
# endif
# elif defined __ANDROID__
# include
# else
# if !defined(__intptr_t_defined) && !defined(intptr_t)
# ifdef VM_I386
# define __intptr_t_defined
# ifdef VM_X86_64
typedef int64 intptr_t;
# else
typedef int32 intptr_t;
# endif
# elif defined(__arm__)
typedef int32 intptr_t;
# endif
# endif
# ifndef _STDINT_H
# ifdef VM_I386
# ifdef VM_X86_64
typedef uint64 uintptr_t;
# else
typedef uint32 uintptr_t;
# endif
# elif defined(__arm__)
typedef uint32 uintptr_t;
# endif
# endif
# endif
#endif
/*
* Time
* XXX These should be cleaned up. -- edward
*/
typedef int64 VmTimeType; /* Time in microseconds */
typedef int64 VmTimeRealClock; /* Real clock kept in microseconds */
typedef int64 VmTimeVirtualClock; /* Virtual Clock kept in CPU cycles */
/*
* Printf format specifiers for size_t and 64-bit number.
* Use them like this:
* printf("%"FMT64"d\n", big);
*
* FMTH is for handles/fds.
*/
#ifdef _MSC_VER
#define FMT64 "I64"
#ifdef VM_X86_64
#define FMTSZ "I64"
#define FMTPD "I64"
#define FMTH "I64"
#else
#define FMTSZ "I"
#define FMTPD "I"
#define FMTH "I"
#endif
#elif defined __APPLE__
/* Mac OS hosts use the same formatters for 32- and 64-bit. */
#define FMT64 "ll"
#if KERNEL
#define FMTSZ "l"
#else
#define FMTSZ "z"
#endif
#define FMTPD "l"
#define FMTH ""
#elif __GNUC__
#define FMTH ""
#if defined(N_PLAT_NLM) || defined(sun) || \
(defined(__FreeBSD__) && (__FreeBSD__ + 0) && ((__FreeBSD__ + 0) < 5))
/*
* Why (__FreeBSD__ + 0)? See bug 141008.
* Yes, we really need to test both (__FreeBSD__ + 0) and
* ((__FreeBSD__ + 0) < 5). No, we can't remove "+ 0" from
* ((__FreeBSD__ + 0) < 5).
*/
#ifdef VM_X86_64
#define FMTSZ "l"
#define FMTPD "l"
#else
#define FMTSZ ""
#define FMTPD ""
#endif
#elif defined(__linux__) \
|| (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 200112L) \
|| (defined(_POSIX_VERSION) && _POSIX_VERSION >= 200112L) \
|| (defined(_POSIX2_VERSION) && _POSIX2_VERSION >= 200112L)
/* BSD, Linux */
#define FMTSZ "z"
#if defined(VM_X86_64)
#define FMTPD "l"
#else
#define FMTPD ""
#endif
#else
/* Systems with a pre-C99 libc */
#define FMTSZ "Z"
#ifdef VM_X86_64
#define FMTPD "l"
#else
#define FMTPD ""
#endif
#endif
#ifdef VM_X86_64
#define FMT64 "l"
#elif defined(sun) || defined(__FreeBSD__)
#define FMT64 "ll"
#else
#define FMT64 "L"
#endif
#else
#error - Need compiler define for FMT64 and FMTSZ
#endif
/*
* Suffix for 64-bit constants. Use it like this:
* CONST64(0x7fffffffffffffff) for signed or
* CONST64U(0x7fffffffffffffff) for unsigned.
*
* 2004.08.30(thutt):
* The vmcore/asm64/gen* programs are compiled as 32-bit
* applications, but must handle 64 bit constants. If the
* 64-bit-constant defining macros are already defined, the
* definition will not be overwritten.
*/
#if !defined(CONST64) || !defined(CONST64U)
#ifdef _MSC_VER
#define CONST64(c) c##I64
#define CONST64U(c) c##uI64
#elif defined __APPLE__
#define CONST64(c) c##LL
#define CONST64U(c) c##uLL
#elif __GNUC__
#ifdef VM_X86_64
#define CONST64(c) c##L
#define CONST64U(c) c##uL
#else
#define CONST64(c) c##LL
#define CONST64U(c) c##uLL
#endif
#else
#error - Need compiler define for CONST64
#endif
#endif
/*
* Use CONST3264/CONST3264U if you want a constant to be
* treated as a 32-bit number on 32-bit compiles and
* a 64-bit number on 64-bit compiles. Useful in the case
* of shifts, like (CONST3264U(1) << x), where x could be
* more than 31 on a 64-bit compile.
*/
#ifdef VM_X86_64
#define CONST3264(a) CONST64(a)
#define CONST3264U(a) CONST64U(a)
#else
#define CONST3264(a) (a)
#define CONST3264U(a) (a)
#endif
#define MIN_INT8 ((int8)0x80)
#define MAX_INT8 ((int8)0x7f)
#define MIN_UINT8 ((uint8)0)
#define MAX_UINT8 ((uint8)0xff)
#define MIN_INT16 ((int16)0x8000)
#define MAX_INT16 ((int16)0x7fff)
#define MIN_UINT16 ((uint16)0)
#define MAX_UINT16 ((uint16)0xffff)
#define MIN_INT32 ((int32)0x80000000)
#define MAX_INT32 ((int32)0x7fffffff)
#define MIN_UINT32 ((uint32)0)
#define MAX_UINT32 ((uint32)0xffffffff)
#define MIN_INT64 (CONST64(0x8000000000000000))
#define MAX_INT64 (CONST64(0x7fffffffffffffff))
#define MIN_UINT64 (CONST64U(0))
#define MAX_UINT64 (CONST64U(0xffffffffffffffff))
typedef uint8 *TCA; /* Pointer into TC (usually). */
/*
* Type big enough to hold an integer between 0..100
*/
typedef uint8 Percent;
#define AsPercent(v) ((Percent)(v))
typedef uintptr_t VA;
typedef uintptr_t VPN;
typedef uint64 PA;
typedef uint32 PPN;
typedef uint64 PhysMemOff;
typedef uint64 PhysMemSize;
/* The Xserver source compiles with -ansi -pendantic */
#ifndef __STRICT_ANSI__
typedef uint64 BA;
#endif
typedef uint32 BPN;
typedef uint32 PageNum;
typedef unsigned MemHandle;
typedef unsigned int IoHandle;
typedef int32 World_ID;
/* !! do not alter the definition of INVALID_WORLD_ID without ensuring
* that the values defined in both bora/public/vm_basic_types.h and
* lib/vprobe/vm_basic_types.h are the same. Additionally, the definition
* of VMK_INVALID_WORLD_ID in vmkapi_world.h also must be defined with
* the same value
*/
#define INVALID_WORLD_ID ((World_ID)0)
typedef World_ID User_CartelID;
#define INVALID_CARTEL_ID INVALID_WORLD_ID
typedef User_CartelID User_SessionID;
#define INVALID_SESSION_ID INVALID_CARTEL_ID
typedef User_CartelID User_CartelGroupID;
#define INVALID_CARTELGROUP_ID INVALID_CARTEL_ID
typedef uint32 Worldlet_ID;
#define INVALID_WORLDLET_ID ((Worldlet_ID)-1)
/* The Xserver source compiles with -ansi -pendantic */
#ifndef __STRICT_ANSI__
typedef uint64 MA;
typedef uint32 MPN;
#endif
/*
* This type should be used for variables that contain sector
* position/quantity.
*/
typedef uint64 SectorType;
/*
* Linear address
*/
typedef uintptr_t LA;
typedef uintptr_t LPN;
#define LA_2_LPN(_la) ((_la) >> PAGE_SHIFT)
#define LPN_2_LA(_lpn) ((_lpn) << PAGE_SHIFT)
#define LAST_LPN ((((LA) 1) << (8 * sizeof(LA) - PAGE_SHIFT)) - 1)
#define LAST_LPN32 ((((LA32)1) << (8 * sizeof(LA32) - PAGE_SHIFT)) - 1)
#define LAST_LPN64 ((((LA64)1) << (8 * sizeof(LA64) - PAGE_SHIFT)) - 1)
/* Valid bits in a LPN. */
#define LPN_MASK LAST_LPN
#define LPN_MASK32 LAST_LPN32
#define LPN_MASK64 LAST_LPN64
/*
* On 64 bit platform, address and page number types default
* to 64 bit. When we need to represent a 32 bit address, we use
* types defined below.
*
* On 32 bit platform, the following types are the same as the
* default types.
*/
typedef uint32 VA32;
typedef uint32 VPN32;
typedef uint32 LA32;
typedef uint32 LPN32;
typedef uint32 PA32;
typedef uint32 PPN32;
typedef uint32 MA32;
typedef uint32 MPN32;
/*
* On 64 bit platform, the following types are the same as the
* default types.
*/
typedef uint64 VA64;
typedef uint64 VPN64;
typedef uint64 LA64;
typedef uint64 LPN64;
typedef uint64 PA64;
typedef uint64 PPN64;
typedef uint64 MA64;
typedef uint64 MPN64;
/*
* VA typedefs for user world apps.
*/
typedef VA32 UserVA32;
typedef VA64 UserVA64;
typedef UserVA64 UserVAConst; /* Userspace ptr to data that we may only read. */
typedef UserVA32 UserVA32Const; /* Userspace ptr to data that we may only read. */
typedef UserVA64 UserVA64Const; /* Used by 64-bit syscalls until conversion is finished. */
#ifdef VMKERNEL
typedef UserVA64 UserVA;
#else
typedef void * UserVA;
#endif
/*
* Maximal possible PPN value (errors too) that PhysMem can handle.
* Must be at least as large as MAX_PPN which is the maximum PPN
* for any region other than buserror.
*/
#define PHYSMEM_MAX_PPN ((PPN)0xffffffff)
#define MAX_PPN ((PPN)0x3fffffff) /* Maximal observable PPN value. */
#define INVALID_PPN ((PPN)0xffffffff)
#define APIC_INVALID_PPN ((PPN)0xfffffffe)
#define INVALID_BPN ((BPN)0x3fffffff)
#define RESERVED_MPN ((MPN) 0)
#define INVALID_MPN ((MPN)-1)
#define MEMREF_MPN ((MPN)-2)
#define RELEASED_MPN ((MPN)-3)
/* 0xfffffffc to account for special MPNs defined above. */
#define MAX_MPN ((MPN)0xfffffffc) /* 44 bits of address space. */
#define INVALID_LPN ((LPN)-1)
#define INVALID_VPN ((VPN)-1)
#define INVALID_LPN64 ((LPN64)-1)
#define INVALID_PAGENUM ((PageNum)-1)
#define INVALID_MPN64 ((MPN64)-1)
/*
* Format modifier for printing VA, LA, and VPN.
* Use them like this: Log("%#"FMTLA"x\n", laddr)
*/
#if defined(VMM) || defined(FROBOS64) || vm_x86_64 || defined __APPLE__
# define FMTLA "l"
# define FMTVA "l"
# define FMTVPN "l"
#else
# define FMTLA ""
# define FMTVA ""
# define FMTVPN ""
#endif
#ifndef EXTERN
#define EXTERN extern
#endif
#define CONST const
#ifndef INLINE
# ifdef _MSC_VER
# define INLINE __inline
# else
# define INLINE inline
# endif
#endif
/*
* Annotation for data that may be exported into a DLL and used by other
* apps that load that DLL and import the data.
*/
#if defined(_WIN32) && defined(VMX86_IMPORT_DLLDATA)
# define VMX86_EXTERN_DATA extern __declspec(dllimport)
#else // !_WIN32
# define VMX86_EXTERN_DATA extern
#endif
#if defined(_WIN32) && !defined(VMX86_NO_THREADS)
#define THREADSPECIFIC __declspec(thread)
#else
#define THREADSPECIFIC
#endif
/*
* Due to the wonderful "registry redirection" feature introduced in
* 64-bit Windows, if you access any key under HKLM\Software in 64-bit
* code, you need to open/create/delete that key with
* VMKEY_WOW64_32KEY if you want a consistent view with 32-bit code.
*/
#ifdef _WIN32
#ifdef _WIN64
#define VMW_KEY_WOW64_32KEY KEY_WOW64_32KEY
#else
#define VMW_KEY_WOW64_32KEY 0x0
#endif
#endif
/*
* At present, we effectively require a compiler that is at least
* gcc-3.3 (circa 2003). Enforce this here, various things below
* this line depend upon it.
*
* In practice, most things presently compile with gcc-4.1 or gcc-4.4.
* The various linux kernel modules may use older (gcc-3.3) compilers.
*/
#if defined __GNUC__ && (__GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3))
#error "gcc version is to old to compile assembly, need gcc-3.3 or better"
#endif
/*
* Consider the following reasons functions are inlined:
*
* 1) inlined for performance reasons
* 2) inlined because it's a single-use function
*
* Functions which meet only condition 2 should be marked with this
* inline macro; It is not critical to be inlined (but there is a
* code-space & runtime savings by doing so), so when other callers
* are added the inline-ness should be removed.
*/
#if defined __GNUC__
/*
* Starting at version 3.3, gcc does not always inline functions marked
* 'inline' (it depends on their size and other factors). To force gcc
* to inline a function, one must use the __always_inline__ attribute.
* This attribute should be used sparingly and with care. It is usually
* preferable to let gcc make its own inlining decisions
*/
# define INLINE_ALWAYS INLINE __attribute__((__always_inline__))
#else
# define INLINE_ALWAYS INLINE
#endif
#define INLINE_SINGLE_CALLER INLINE_ALWAYS
/*
* Used when a hard guaranteed of no inlining is needed. Very few
* instances need this since the absence of INLINE is a good hint
* that gcc will not do inlining.
*/
#if defined(__GNUC__) && (defined(VMM) || defined (VMKERNEL) || defined (VMKBOOT))
#define ABSOLUTELY_NOINLINE __attribute__((__noinline__))
#endif
/*
* Used when a function has no effects except the return value and the
* return value depends only on the parameters and/or global variables
* Such a function can be subject to common subexpression elimination
* and loop optimization just as an arithmetic operator would be.
*/
#if defined(__GNUC__) && (defined(VMM) || defined (VMKERNEL))
#define SIDE_EFFECT_FREE __attribute__((__pure__))
#else
#define SIDE_EFFECT_FREE
#endif
/*
* Attributes placed on function declarations to tell the compiler
* that the function never returns.
*/
#ifdef _MSC_VER
#define NORETURN __declspec(noreturn)
#elif defined __GNUC__
#define NORETURN __attribute__((__noreturn__))
#else
#define NORETURN
#endif
/*
* Static profiling hints for functions.
* A function can be either hot, cold, or neither.
* It is an error to specify both hot and cold for the same function.
* Note that there is no annotation for "neither."
*/
#if defined __GNUC__ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define HOT __attribute__((hot))
#define COLD __attribute__((cold))
#else
#define HOT
#define COLD
#endif
/*
* Branch prediction hints:
* LIKELY(exp) - Expression exp is likely TRUE.
* UNLIKELY(exp) - Expression exp is likely FALSE.
* Usage example:
* if (LIKELY(excCode == EXC_NONE)) {
* or
* if (UNLIKELY(REAL_MODE(vc))) {
*
* We know how to predict branches on gcc3 and later (hopefully),
* all others we don't so we do nothing.
*/
#if defined __GNUC__
/*
* gcc3 uses __builtin_expect() to inform the compiler of an expected value.
* We use this to inform the static branch predictor. The '!!' in LIKELY
* will convert any !=0 to a 1.
*/
#define LIKELY(_exp) __builtin_expect(!!(_exp), 1)
#define UNLIKELY(_exp) __builtin_expect((_exp), 0)
#else
#define LIKELY(_exp) (_exp)
#define UNLIKELY(_exp) (_exp)
#endif
/*
* GCC's argument checking for printf-like functions
* This is conditional until we have replaced all `"%x", void *'
* with `"0x%08x", (uint32) void *'. Note that %p prints different things
* on different platforms. Argument checking is enabled for the
* vmkernel, which has already been cleansed.
*
* fmtPos is the position of the format string argument, beginning at 1
* varPos is the position of the variable argument, beginning at 1
*/
#if defined(__GNUC__)
# define PRINTF_DECL(fmtPos, varPos) __attribute__((__format__(__printf__, fmtPos, varPos)))
#else
# define PRINTF_DECL(fmtPos, varPos)
#endif
#if defined(__GNUC__)
# define SCANF_DECL(fmtPos, varPos) __attribute__((__format__(__scanf__, fmtPos, varPos)))
#else
# define SCANF_DECL(fmtPos, varPos)
#endif
/*
* UNUSED_PARAM should surround the parameter name and type declaration,
* e.g. "int MyFunction(int var1, UNUSED_PARAM(int var2))"
*
*/
#ifndef UNUSED_PARAM
# if defined(__GNUC__)
# define UNUSED_PARAM(_parm) _parm __attribute__((__unused__))
# else
# define UNUSED_PARAM(_parm) _parm
# endif
#endif
#ifndef UNUSED_TYPE
// XXX _Pragma would better but doesn't always work right now.
# define UNUSED_TYPE(_parm) UNUSED_PARAM(_parm)
#endif
#ifndef UNUSED_VARIABLE
// XXX is there a better way?
# define UNUSED_VARIABLE(_var) (void)_var
#endif
/*
* gcc can warn us if we're ignoring returns
*/
#if defined(__GNUC__)
# define MUST_CHECK_RETURN __attribute__((warn_unused_result))
#else
# define MUST_CHECK_RETURN
#endif
/*
* ALIGNED specifies minimum alignment in "n" bytes.
*/
#ifdef __GNUC__
#define ALIGNED(n) __attribute__((__aligned__(n)))
#else
#define ALIGNED(n)
#endif
/*
* Once upon a time, this was used to silence compiler warnings that
* get generated when the compiler thinks that a function returns
* when it is marked noreturn. Don't do it. Use NOT_REACHED().
*/
#define INFINITE_LOOP() do { } while (1)
/*
* On FreeBSD (for the tools build), size_t is typedef'd if _BSD_SIZE_T_
* is defined. Use the same logic here so we don't define it twice. [greg]
*/
#ifdef __FreeBSD__
# ifdef _BSD_SIZE_T_
# undef _BSD_SIZE_T_
# ifdef VM_I386
# ifdef VM_X86_64
typedef uint64 size_t;
# else
typedef uint32 size_t;
# endif
# endif /* VM_I386 */
# endif
# ifdef _BSD_SSIZE_T_
# undef _BSD_SSIZE_T_
# ifdef VM_I386
# ifdef VM_X86_64
typedef int64 ssize_t;
# else
typedef int32 ssize_t;
# endif
# endif /* VM_I386 */
# endif
#else
# if !defined(_SIZE_T) && !defined(_SIZE_T_DEFINED)
# ifdef VM_I386
# define _SIZE_T
# ifdef VM_X86_64
typedef uint64 size_t;
# else
typedef uint32 size_t;
# endif
# elif defined(__arm__)
# define _SIZE_T
typedef uint32 size_t;
# endif
# endif
# if !defined(FROBOS) && !defined(_SSIZE_T) && !defined(_SSIZE_T_) && \
!defined(ssize_t) && !defined(__ssize_t_defined) && \
!defined(_SSIZE_T_DECLARED) && !defined(_SSIZE_T_DEFINED) && \
!defined(_SSIZE_T_DEFINED_)
# ifdef VM_I386
# define _SSIZE_T
# define __ssize_t_defined
# define _SSIZE_T_DECLARED
# ifdef VM_X86_64
typedef int64 ssize_t;
# else
typedef int32 ssize_t;
# endif
# elif defined(__arm__)
# define _SSIZE_T
# define __ssize_t_defined
# define _SSIZE_T_DECLARED
typedef int32 ssize_t;
# endif
# endif
#endif
/*
* Format modifier for printing pid_t. On sun the pid_t is a ulong, but on
* Linux it's an int.
* Use this like this: printf("The pid is %"FMTPID".\n", pid);
*/
#ifdef sun
# ifdef VM_X86_64
# define FMTPID "d"
# else
# define FMTPID "lu"
# endif
#else
# define FMTPID "d"
#endif
/*
* Format modifier for printing uid_t. On Solaris 10 and earlier, uid_t
* is a ulong, but on other platforms it's an unsigned int.
* Use this like this: printf("The uid is %"FMTUID".\n", uid);
*/
#if defined(sun) && !defined(SOL11)
# ifdef VM_X86_64
# define FMTUID "u"
# else
# define FMTUID "lu"
# endif
#else
# define FMTUID "u"
#endif
/*
* Format modifier for printing mode_t. On sun the mode_t is a ulong, but on
* Linux it's an int.
* Use this like this: printf("The mode is %"FMTMODE".\n", mode);
*/
#ifdef sun
# ifdef VM_X86_64
# define FMTMODE "o"
# else
# define FMTMODE "lo"
# endif
#else
# define FMTMODE "o"
#endif
/*
* Format modifier for printing time_t. Most platforms define a time_t to be
* a long int, but on FreeBSD (as of 5.0, it seems), the time_t is a signed
* size quantity. Refer to the definition of FMTSZ to see why we need silly
* preprocessor arithmetic.
* Use this like this: printf("The mode is %"FMTTIME".\n", time);
*/
#if defined(__FreeBSD__) && (__FreeBSD__ + 0) && ((__FreeBSD__ + 0) >= 5)
# define FMTTIME FMTSZ"d"
#else
# if defined(_MSC_VER)
# ifndef _SAFETIME_H_
# if (_MSC_VER < 1400) || defined(_USE_32BIT_TIME_T)
# define FMTTIME "ld"
# else
# define FMTTIME FMT64"d"
# endif
# else
# ifndef FMTTIME
# error "safetime.h did not define FMTTIME"
# endif
# endif
# else
# define FMTTIME "ld"
# endif
#endif
#ifdef __APPLE__
/*
* Format specifier for all these annoying types such as {S,U}Int32
* which are 'long' in 32-bit builds
* and 'int' in 64-bit builds.
*/
# ifdef __LP64__
# define FMTLI ""
# else
# define FMTLI "l"
# endif
/*
* Format specifier for all these annoying types such as NS[U]Integer
* which are 'int' in 32-bit builds
* and 'long' in 64-bit builds.
*/
# ifdef __LP64__
# define FMTIL "l"
# else
# define FMTIL ""
# endif
#endif
/*
* Define MXSemaHandle here so both vmmon and vmx see this definition.
*/
#ifdef _WIN32
typedef uintptr_t MXSemaHandle;
#else
typedef int MXSemaHandle;
#endif
/*
* Define type for poll device handles.
*/
typedef int64 PollDevHandle;
/*
* Define the utf16_t type.
*/
#if defined(_WIN32) && defined(_NATIVE_WCHAR_T_DEFINED)
typedef wchar_t utf16_t;
#else
typedef uint16 utf16_t;
#endif
/*
* Define for point and rectangle types. Defined here so they
* can be used by other externally facing headers in bora/public.
*/
typedef struct VMPoint {
int x, y;
} VMPoint;
#if defined _WIN32 && defined USERLEVEL
struct tagRECT;
typedef struct tagRECT VMRect;
#else
typedef struct VMRect {
int left;
int top;
int right;
int bottom;
} VMRect;
#endif
/*
* ranked locks "everywhere"
*/
typedef uint32 MX_Rank;
#endif /* _VM_BASIC_TYPES_H_ */
vmci-only/shared/x86cpuid_asm.h 0000444 0000000 0000000 00000022267 12522066074 015435 0 ustar root root /*********************************************************
* Copyright (C) 2003-2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* x86cpuid_asm.h
*
* CPUID-related assembly functions.
*/
#ifndef _X86CPUID_ASM_H_
#define _X86CPUID_ASM_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#include "includeCheck.h"
#include "vm_basic_asm.h"
#include "x86cpuid.h"
/*
* x86-64 windows doesn't support inline asm so we have to use these
* intrinsic functions defined in the compiler. Not all of these are well
* documented. There is an array in the compiler dll (c1.dll) which has
* an array of the names of all the intrinsics minus the leading
* underscore. Searching around in the ntddk.h file can also be helpful.
*
* The declarations for the intrinsic functions were taken from the DDK.
* Our declarations must match the ddk's otherwise the 64-bit c++ compiler
* will complain about second linkage of the intrinsic functions.
* We define the intrinsic using the basic types corresponding to the
* Windows typedefs. This avoids having to include windows header files
* to get to the windows types.
*/
#ifdef _MSC_VER
#ifdef __cplusplus
extern "C" {
#endif
#ifdef VM_X86_64
/*
* intrinsic functions only supported by x86-64 windows as of 2k3sp1
*/
void __cpuid(unsigned int*, unsigned int);
#pragma intrinsic(__cpuid)
#endif /* VM_X86_64 */
#ifdef __cplusplus
}
#endif
#endif /* _MSC_VER */
#ifdef __GNUC__ // {
/*
* Checked against the Intel manual and GCC --hpreg
*
* Need __volatile__ and "memory" since CPUID has a synchronizing effect.
* The CPUID may also change at runtime (APIC flag, etc).
*
*/
/*
* %ebx is reserved on i386 PIC. Apple's gcc-5493 (gcc 4.0) compiling
* for x86_64 incorrectly errors out saying %ebx is reserved. This is
* Apple bug 7304232.
*/
#if vm_x86_64 ? (defined __APPLE_CC__ && __APPLE_CC__ == 5493) : defined __PIC__
#if vm_x86_64
/*
* Note that this generates movq %rbx,%rbx; cpuid; xchgq %rbx,%rbx ...
* Unfortunately Apple's assembler does not have .ifnes, and I cannot
* figure out how to do that with .if. If we ever enable this code
* on other 64bit systems, both movq & xchgq should be surrounded by
* .ifnes \"%%rbx\", \"%q1\" & .endif
*/
#define VM_CPUID_BLOCK "movq %%rbx, %q1\n\t" \
"cpuid\n\t" \
"xchgq %%rbx, %q1\n\t"
#define VM_EBX_OUT(reg) "=&r"(reg)
#else
#define VM_CPUID_BLOCK "movl %%ebx, %1\n\t" \
"cpuid\n\t" \
"xchgl %%ebx, %1\n\t"
#define VM_EBX_OUT(reg) "=&rm"(reg)
#endif
#else
#define VM_CPUID_BLOCK "cpuid"
#define VM_EBX_OUT(reg) "=b"(reg)
#endif
static INLINE void
__GET_CPUID(int eax, // IN
CPUIDRegs *regs) // OUT
{
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (regs->eax), VM_EBX_OUT(regs->ebx), "=c" (regs->ecx), "=d" (regs->edx)
: "a" (eax)
: "memory"
);
}
static INLINE void
__GET_CPUID2(int eax, // IN
int ecx, // IN
CPUIDRegs *regs) // OUT
{
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (regs->eax), VM_EBX_OUT(regs->ebx), "=c" (regs->ecx), "=d" (regs->edx)
: "a" (eax), "c" (ecx)
: "memory"
);
}
static INLINE uint32
__GET_EAX_FROM_CPUID(int eax) // IN
{
uint32 ebx;
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (eax), VM_EBX_OUT(ebx)
: "a" (eax)
: "memory", "%ecx", "%edx"
);
return eax;
}
static INLINE uint32
__GET_EBX_FROM_CPUID(int eax) // IN
{
uint32 ebx;
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (eax), VM_EBX_OUT(ebx)
: "a" (eax)
: "memory", "%ecx", "%edx"
);
return ebx;
}
static INLINE uint32
__GET_ECX_FROM_CPUID(int eax) // IN
{
uint32 ecx;
uint32 ebx;
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (eax), VM_EBX_OUT(ebx), "=c" (ecx)
: "a" (eax)
: "memory", "%edx"
);
return ecx;
}
static INLINE uint32
__GET_EDX_FROM_CPUID(int eax) // IN
{
uint32 edx;
uint32 ebx;
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (eax), VM_EBX_OUT(ebx), "=d" (edx)
: "a" (eax)
: "memory", "%ecx"
);
return edx;
}
static INLINE uint32
__GET_EAX_FROM_CPUID4(int ecx) // IN
{
uint32 eax;
uint32 ebx;
__asm__ __volatile__(
VM_CPUID_BLOCK
: "=a" (eax), VM_EBX_OUT(ebx), "=c" (ecx)
: "a" (4), "c" (ecx)
: "memory", "%edx"
);
return eax;
}
#undef VM_CPUID_BLOCK
#undef VM_EBX_OUT
#elif defined(_MSC_VER) // } {
static INLINE void
__GET_CPUID(int input, CPUIDRegs *regs)
{
#ifdef VM_X86_64
__cpuid((unsigned int *)regs, input);
#else
__asm push esi
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, input
__asm mov esi, regs
__asm _emit 0x0f __asm _emit 0xa2
__asm mov 0x0[esi], eax
__asm mov 0x4[esi], ebx
__asm mov 0x8[esi], ecx
__asm mov 0xC[esi], edx
__asm pop edx
__asm pop ecx
__asm pop ebx
__asm pop esi
#endif
}
#ifdef VM_X86_64
/*
* No inline assembly in Win64. Implemented in bora/lib/misc in
* cpuidMasm64.asm.
*/
extern void
__GET_CPUID2(int inputEax, int inputEcx, CPUIDRegs *regs);
#else // VM_X86_64
static INLINE void
__GET_CPUID2(int inputEax, int inputEcx, CPUIDRegs *regs)
{
__asm push esi
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, inputEax
__asm mov ecx, inputEcx
__asm mov esi, regs
__asm _emit 0x0f __asm _emit 0xa2
__asm mov 0x0[esi], eax
__asm mov 0x4[esi], ebx
__asm mov 0x8[esi], ecx
__asm mov 0xC[esi], edx
__asm pop edx
__asm pop ecx
__asm pop ebx
__asm pop esi
}
#endif
static INLINE uint32
__GET_EAX_FROM_CPUID(int input)
{
#ifdef VM_X86_64
CPUIDRegs regs;
__cpuid((unsigned int *)®s, input);
return regs.eax;
#else
uint32 output;
//NOT_TESTED();
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, input
__asm _emit 0x0f __asm _emit 0xa2
__asm mov output, eax
__asm pop edx
__asm pop ecx
__asm pop ebx
return output;
#endif
}
static INLINE uint32
__GET_EBX_FROM_CPUID(int input)
{
#ifdef VM_X86_64
CPUIDRegs regs;
__cpuid((unsigned int *)®s, input);
return regs.ebx;
#else
uint32 output;
//NOT_TESTED();
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, input
__asm _emit 0x0f __asm _emit 0xa2
__asm mov output, ebx
__asm pop edx
__asm pop ecx
__asm pop ebx
return output;
#endif
}
static INLINE uint32
__GET_ECX_FROM_CPUID(int input)
{
#ifdef VM_X86_64
CPUIDRegs regs;
__cpuid((unsigned int *)®s, input);
return regs.ecx;
#else
uint32 output;
//NOT_TESTED();
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, input
__asm _emit 0x0f __asm _emit 0xa2
__asm mov output, ecx
__asm pop edx
__asm pop ecx
__asm pop ebx
return output;
#endif
}
static INLINE uint32
__GET_EDX_FROM_CPUID(int input)
{
#ifdef VM_X86_64
CPUIDRegs regs;
__cpuid((unsigned int *)®s, input);
return regs.edx;
#else
uint32 output;
//NOT_TESTED();
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, input
__asm _emit 0x0f __asm _emit 0xa2
__asm mov output, edx
__asm pop edx
__asm pop ecx
__asm pop ebx
return output;
#endif
}
#ifdef VM_X86_64
/*
* No inline assembly in Win64. Implemented in bora/lib/misc in
* cpuidMasm64.asm.
*/
extern uint32
__GET_EAX_FROM_CPUID4(int inputEcx);
#else // VM_X86_64
static INLINE uint32
__GET_EAX_FROM_CPUID4(int inputEcx)
{
uint32 output;
//NOT_TESTED();
__asm push ebx
__asm push ecx
__asm push edx
__asm mov eax, 4
__asm mov ecx, inputEcx
__asm _emit 0x0f __asm _emit 0xa2
__asm mov output, eax
__asm pop edx
__asm pop ecx
__asm pop ebx
return output;
}
#endif // VM_X86_64
#else // }
#error
#endif
#define CPUID_FOR_SIDE_EFFECTS() ((void)__GET_EAX_FROM_CPUID(0))
static INLINE void
__GET_CPUID4(int inputEcx, CPUIDRegs *regs)
{
__GET_CPUID2(4, inputEcx, regs);
}
/* The first parameter is used as an rvalue and then as an lvalue. */
#define GET_CPUID(_ax, _bx, _cx, _dx) { \
CPUIDRegs regs; \
__GET_CPUID(_ax, ®s); \
_ax = regs.eax; \
_bx = regs.ebx; \
_cx = regs.ecx; \
_dx = regs.edx; \
}
#endif
vmci-only/shared/compat_kernel.h 0000444 0000000 0000000 00000002735 12522066073 015743 0 ustar root root /*********************************************************
* Copyright (C) 2004 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_KERNEL_H__
# define __COMPAT_KERNEL_H__
#include
#include
/*
* container_of was introduced in 2.5.28 but it's easier to check like this.
*/
#ifndef container_of
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
#endif
/*
* vsnprintf became available in 2.4.10. For older kernels, just fall back on
* vsprintf.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)
#define vsnprintf(str, size, fmt, args) vsprintf(str, fmt, args)
#endif
#endif /* __COMPAT_KERNEL_H__ */
vmci-only/shared/compat_mm.h 0000444 0000000 0000000 00000006630 12522066073 015072 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_MM_H__
# define __COMPAT_MM_H__
#include
/* The get_page() API appeared in 2.3.7 --hpreg */
/* Sometime during development it became function instead of macro --petr */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) && !defined(get_page)
# define get_page(_page) atomic_inc(&(_page)->count)
/* The __free_page() API is exported in 2.1.67 --hpreg */
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 67)
# define put_page __free_page
# else
# include "compat_page.h"
# define page_to_phys(_page) (page_to_pfn(_page) << PAGE_SHIFT)
# define put_page(_page) free_page(page_to_phys(_page))
# endif
#endif
/* page_count() is 2.4.0 invention. Unfortunately unavailable in some RedHat
* kernels (for example 2.4.21-4-RHEL3). */
/* It is function since 2.6.0, and hopefully RedHat will not play silly games
* with mm_inline.h again... */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(page_count)
# define page_count(page) atomic_read(&(page)->count)
#endif
/* 2.2.x uses 0 instead of some define */
#ifndef NOPAGE_SIGBUS
#define NOPAGE_SIGBUS (0)
#endif
/* 2.2.x does not have HIGHMEM support */
#ifndef GFP_HIGHUSER
#define GFP_HIGHUSER (GFP_USER)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
#include "compat_page.h"
static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
{
unsigned long addr;
addr = __get_free_pages(gfp_mask, order);
if (!addr) {
return NULL;
}
return virt_to_page(addr);
}
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#endif
/*
* In 2.4.14, the logic behind the UnlockPage macro was moved to the
* unlock_page() function. Later (in 2.5.12), the UnlockPage macro was removed
* altogether, and nowadays everyone uses unlock_page().
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 14)
#define compat_unlock_page(page) UnlockPage(page)
#else
#define compat_unlock_page(page) unlock_page(page)
#endif
/*
* In 2.4.10, vmtruncate was changed from returning void to returning int.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)
#define compat_vmtruncate(inode, size) \
({ \
int result = 0; \
vmtruncate(inode, size); \
result; \
})
#else
#define compat_vmtruncate(inode, size) vmtruncate(inode, size)
#endif
#endif /* __COMPAT_MM_H__ */
vmci-only/shared/compat_workqueue.h 0000444 0000000 0000000 00000014361 12522066073 016510 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_WORKQUEUE_H__
# define __COMPAT_WORKQUEUE_H__
#include
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41)
# include
#endif
/*
*
* Work queues and delayed work queues.
*
* Prior to 2.5.41, the notion of work queues did not exist. Taskqueues are
* used for work queues and timers are used for delayed work queues.
*
* After 2.6.20, normal work structs ("work_struct") and delayed work
* ("delayed_work") structs were separated so that the work_struct could be
* slimmed down. The interface was also changed such that the address of the
* work_struct itself is passed in as the argument to the work function. This
* requires that one embed the work struct in the larger struct containing the
* information necessary to complete the work and use container_of() to obtain
* the address of the containing structure.
*
* Users of these macros should embed a compat_work or compat_delayed_work in
* a larger structure, then specify the larger structure as the _data argument
* for the initialization functions, specify the work function to take
* a compat_work_arg or compat_delayed_work_arg, then use the appropriate
* _GET_DATA macro to obtain the reference to the structure passed in as _data.
* An example is below.
*
*
* typedef struct WorkData {
* int data;
* compat_work work;
* } WorkData;
*
*
* void
* WorkFunc(compat_work_arg data)
* {
* WorkData *workData = COMPAT_WORK_GET_DATA(data, WorkData, work);
*
* ...
* }
*
*
* {
* WorkData *workData = kmalloc(sizeof *workData, GFP_EXAMPLE);
* if (!workData) {
* return -ENOMEM;
* }
*
* COMPAT_INIT_WORK(&workData->work, WorkFunc, workData);
* compat_schedule_work(&workData->work);
* }
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 41) /* { */
typedef struct tq_struct compat_work;
typedef struct compat_delayed_work {
struct tq_struct work;
struct timer_list timer;
} compat_delayed_work;
typedef void * compat_work_arg;
typedef void * compat_delayed_work_arg;
/*
* Delayed work queues need to run at some point in the future in process
* context, but task queues don't support delaying the task one is scheduling.
* Timers allow us to delay the execution of our work queue until the future,
* but timer handlers run in bottom-half context. As such, we use both a timer
* and task queue and use the timer handler below to schedule the task in
* process context immediately. The timer lets us delay execution, and the
* task queue lets us run in process context.
*
* Note that this is similar to how delayed_work is implemented with work
* queues in later kernel versions.
*/
static inline void
__compat_delayed_work_timer(unsigned long arg)
{
compat_delayed_work *dwork = (compat_delayed_work *)arg;
if (dwork) {
schedule_task(&dwork->work);
}
}
# define COMPAT_INIT_WORK(_work, _func, _data) \
INIT_LIST_HEAD(&(_work)->list); \
(_work)->sync = 0; \
(_work)->routine = _func; \
(_work)->data = _data
# define COMPAT_INIT_DELAYED_WORK(_work, _func, _data) \
COMPAT_INIT_WORK(&(_work)->work, _func, _data); \
init_timer(&(_work)->timer); \
(_work)->timer.expires = 0; \
(_work)->timer.function = __compat_delayed_work_timer; \
(_work)->timer.data = (unsigned long)_work
# define compat_schedule_work(_work) \
schedule_task(_work)
# define compat_schedule_delayed_work(_work, _delay) \
(_work)->timer.expires = jiffies + _delay; \
add_timer(&(_work)->timer)
# define COMPAT_WORK_GET_DATA(_p, _type, _member) \
(_type *)(_p)
# define COMPAT_DELAYED_WORK_GET_DATA(_p, _type, _member) \
(_type *)(_p)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) \
&& !defined(__VMKLNX__) /* } { */
typedef struct work_struct compat_work;
typedef struct work_struct compat_delayed_work;
typedef void * compat_work_arg;
typedef void * compat_delayed_work_arg;
# define COMPAT_INIT_WORK(_work, _func, _data) \
INIT_WORK(_work, _func, _data)
# define COMPAT_INIT_DELAYED_WORK(_work, _func, _data) \
INIT_WORK(_work, _func, _data)
# define compat_schedule_work(_work) \
schedule_work(_work)
# define compat_schedule_delayed_work(_work, _delay) \
schedule_delayed_work(_work, _delay)
# define COMPAT_WORK_GET_DATA(_p, _type, _member) \
(_type *)(_p)
# define COMPAT_DELAYED_WORK_GET_DATA(_p, _type, _member) \
(_type *)(_p)
#else /* } Linux >= 2.6.20 { */
typedef struct work_struct compat_work;
typedef struct delayed_work compat_delayed_work;
typedef struct work_struct * compat_work_arg;
typedef struct work_struct * compat_delayed_work_arg;
# define COMPAT_INIT_WORK(_work, _func, _data) \
INIT_WORK(_work, _func)
# define COMPAT_INIT_DELAYED_WORK(_work, _func, _data) \
INIT_DELAYED_WORK(_work, _func)
# define compat_schedule_work(_work) \
schedule_work(_work)
# define compat_schedule_delayed_work(_work, _delay) \
schedule_delayed_work(_work, _delay)
# define COMPAT_WORK_GET_DATA(_p, _type, _member) \
container_of(_p, _type, _member)
# define COMPAT_DELAYED_WORK_GET_DATA(_p, _type, _member) \
container_of(_p, _type, _member.work)
#endif /* } */
#endif /* __COMPAT_WORKQUEUE_H__ */
vmci-only/shared/driverLog.h 0000444 0000000 0000000 00000002232 12522066073 015045 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* driverLog.h --
*
* Logging functions for Linux kernel modules.
*/
#ifndef __DRIVERLOG_H__
#define __DRIVERLOG_H__
/*
* The definitions of Warning(), Log(), and Panic() come from vm_assert.h for
* consistency.
*/
#include "vm_assert.h"
void DriverLog_Init(const char *prefix);
#endif /* __DRIVERLOG_H__ */
vmci-only/shared/vmware_pack_end.h 0000444 0000000 0000000 00000002470 12522066074 016242 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmware_pack_end.h --
*
* End of structure packing. See vmware_pack_init.h for details.
*
* Note that we do not use the following construct in this include file,
* because we want to emit the code every time the file is included --hpreg
*
* #ifndef foo
* # define foo
* ...
* #endif
*
*/
#include "vmware_pack_init.h"
#ifdef _MSC_VER
# pragma pack(pop)
#elif __GNUC__
__attribute__((__packed__))
#else
# error Compiler packing...
#endif
vmci-only/shared/guest_msg_def.h 0000444 0000000 0000000 00000005642 12522066074 015734 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* guest_msg_def.h --
*
* Second layer of the internal communication channel between guest
* applications and vmware
*
*/
#ifndef _GUEST_MSG_DEF_H_
#define _GUEST_MSG_DEF_H_
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#include "includeCheck.h"
/* Basic request types */
typedef enum {
MESSAGE_TYPE_OPEN,
MESSAGE_TYPE_SENDSIZE,
MESSAGE_TYPE_SENDPAYLOAD,
MESSAGE_TYPE_RECVSIZE,
MESSAGE_TYPE_RECVPAYLOAD,
MESSAGE_TYPE_RECVSTATUS,
MESSAGE_TYPE_CLOSE,
} MessageType;
/* Reply statuses */
/* The basic request succeeded */
#define MESSAGE_STATUS_SUCCESS 0x0001
/* vmware has a message available for its party */
#define MESSAGE_STATUS_DORECV 0x0002
/* The channel has been closed */
#define MESSAGE_STATUS_CLOSED 0x0004
/* vmware removed the message before the party fetched it */
#define MESSAGE_STATUS_UNSENT 0x0008
/* A checkpoint occurred */
#define MESSAGE_STATUS_CPT 0x0010
/* An underlying device is powering off */
#define MESSAGE_STATUS_POWEROFF 0x0020
/* vmware has detected a timeout on the channel */
#define MESSAGE_STATUS_TIMEOUT 0x0040
/* vmware supports high-bandwidth for sending and receiving the payload */
#define MESSAGE_STATUS_HB 0x0080
/*
* This mask defines the status bits that the guest is allowed to set;
* we use this to mask out all other bits when receiving the status
* from the guest. Otherwise, the guest can manipulate VMX state by
* setting status bits that are only supposed to be changed by the
* VMX. See bug 45385.
*/
#define MESSAGE_STATUS_GUEST_MASK MESSAGE_STATUS_SUCCESS
/*
* Max number of channels.
* Unfortunately this has to be public because the monitor part
* of the backdoor needs it for its trivial-case optimization. [greg]
*/
#define GUESTMSG_MAX_CHANNEL 8
/* Flags to open a channel. --hpreg */
#define GUESTMSG_FLAG_COOKIE 0x80000000
#define GUESTMSG_FLAG_ALL GUESTMSG_FLAG_COOKIE
/*
* Maximum size of incoming message. This is to prevent denial of host service
* attacks from guest applications.
*/
#define GUESTMSG_MAX_IN_SIZE (64 * 1024)
#endif /* _GUEST_MSG_DEF_H_ */
vmci-only/shared/x86cpuid.h 0000644 0000000 0000000 00000211446 12522066074 014576 0 ustar root root /*********************************************************
* Copyright (C) 1998-2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef _X86CPUID_H_
#define _X86CPUID_H_
/* http://www.sandpile.org/ia32/cpuid.htm */
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMX
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMMON
#include "includeCheck.h"
#include "vm_basic_types.h"
#include "community_source.h"
#include "x86vendor.h"
#include "vm_assert.h"
/*
* The linux kernel's ptrace.h stupidly defines the bare
* EAX/EBX/ECX/EDX, which wrecks havoc with our preprocessor tricks.
*/
#undef EAX
#undef EBX
#undef ECX
#undef EDX
typedef struct CPUIDRegs {
uint32 eax, ebx, ecx, edx;
} CPUIDRegs;
typedef union CPUIDRegsUnion {
uint32 array[4];
CPUIDRegs regs;
} CPUIDRegsUnion;
/*
* Results of calling cpuid(eax, ecx) on all host logical CPU.
*/
#ifdef _MSC_VER
#pragma warning (disable :4200) // non-std extension: zero-sized array in struct
#endif
typedef
#include "vmware_pack_begin.h"
struct CPUIDReply {
/*
* Unique host logical CPU identifier. It does not change across queries, so
* we use it to correlate the replies of multiple queries.
*/
uint64 tag; // OUT
CPUIDRegs regs; // OUT
}
#include "vmware_pack_end.h"
CPUIDReply;
typedef
#include "vmware_pack_begin.h"
struct CPUIDQuery {
uint32 eax; // IN
uint32 ecx; // IN
uint32 numLogicalCPUs; // IN/OUT
CPUIDReply logicalCPUs[0]; // OUT
}
#include "vmware_pack_end.h"
CPUIDQuery;
/*
* CPUID levels the monitor caches and ones that are not cached, but
* have fields defined below (short name and actual value).
*
* The first parameter defines whether the level has its default masks
* generated from the values in this file. Any level which is marked
* as FALSE here *must* have all monitor support types set to NA. A
* static assert in lib/cpuidcompat/cpuidcompat.c will check this.
*/
#define CPUID_CACHED_LEVELS \
CPUIDLEVEL(TRUE, 0, 0) \
CPUIDLEVEL(TRUE, 1, 1) \
CPUIDLEVEL(FALSE, 5, 5) \
CPUIDLEVEL(TRUE, 7, 7) \
CPUIDLEVEL(FALSE, A, 0xA) \
CPUIDLEVEL(TRUE, D, 0xD) \
CPUIDLEVEL(FALSE,400, 0x40000000) \
CPUIDLEVEL(FALSE,401, 0x40000001) \
CPUIDLEVEL(FALSE,402, 0x40000002) \
CPUIDLEVEL(FALSE,403, 0x40000003) \
CPUIDLEVEL(FALSE,404, 0x40000004) \
CPUIDLEVEL(FALSE,405, 0x40000005) \
CPUIDLEVEL(FALSE,406, 0x40000006) \
CPUIDLEVEL(FALSE,410, 0x40000010) \
CPUIDLEVEL(FALSE, 80, 0x80000000) \
CPUIDLEVEL(TRUE, 81, 0x80000001) \
CPUIDLEVEL(FALSE, 87, 0x80000007) \
CPUIDLEVEL(FALSE, 88, 0x80000008) \
CPUIDLEVEL(TRUE, 8A, 0x8000000A)
#define CPUID_UNCACHED_LEVELS \
CPUIDLEVEL(FALSE, 4, 4) \
CPUIDLEVEL(FALSE, 6, 6) \
CPUIDLEVEL(FALSE, B, 0xB) \
CPUIDLEVEL(FALSE, 85, 0x80000005) \
CPUIDLEVEL(FALSE, 86, 0x80000006) \
CPUIDLEVEL(FALSE, 819, 0x80000019) \
CPUIDLEVEL(FALSE, 81A, 0x8000001A) \
CPUIDLEVEL(FALSE, 81B, 0x8000001B) \
CPUIDLEVEL(FALSE, 81C, 0x8000001C) \
CPUIDLEVEL(FALSE, 81D, 0x8000001D) \
CPUIDLEVEL(FALSE, 81E, 0x8000001E)
#define CPUID_ALL_LEVELS \
CPUID_CACHED_LEVELS \
CPUID_UNCACHED_LEVELS
/* Define cached CPUID levels in the form: CPUID_LEVEL_ */
typedef enum {
#define CPUIDLEVEL(t, s, v) CPUID_LEVEL_##s,
CPUID_CACHED_LEVELS
#undef CPUIDLEVEL
CPUID_NUM_CACHED_LEVELS
} CpuidCachedLevel;
/* Enum to translate between shorthand name and actual CPUID level value. */
enum {
#define CPUIDLEVEL(t, s, v) CPUID_LEVEL_VAL_##s = v,
CPUID_ALL_LEVELS
#undef CPUIDLEVEL
};
/* Named feature leaves */
#define CPUID_FEATURE_INFORMATION 0x01
#define CPUID_PROCESSOR_TOPOLOGY 4
#define CPUID_MWAIT_FEATURES 5
#define CPUID_XSAVE_FEATURES 0xd
#define CPUID_HYPERVISOR_LEVEL_0 0x40000000
#define CPUID_SVM_FEATURES 0x8000000a
/*
* CPUID result registers
*/
#define CPUID_REGS \
CPUIDREG(EAX, eax) \
CPUIDREG(EBX, ebx) \
CPUIDREG(ECX, ecx) \
CPUIDREG(EDX, edx)
typedef enum {
#define CPUIDREG(uc, lc) CPUID_REG_##uc,
CPUID_REGS
#undef CPUIDREG
CPUID_NUM_REGS
} CpuidReg;
#define CPUID_INTEL_VENDOR_STRING "GenuntelineI"
#define CPUID_AMD_VENDOR_STRING "AuthcAMDenti"
#define CPUID_CYRIX_VENDOR_STRING "CyriteadxIns"
#define CPUID_VIA_VENDOR_STRING "CentaulsaurH"
#define CPUID_HYPERV_HYPERVISOR_VENDOR_STRING "Microsoft Hv"
#define CPUID_KVM_HYPERVISOR_VENDOR_STRING "KVMKVMKVM\0\0\0"
#define CPUID_VMWARE_HYPERVISOR_VENDOR_STRING "VMwareVMware"
#define CPUID_XEN_HYPERVISOR_VENDOR_STRING "XenVMMXenVMM"
#define CPUID_INTEL_VENDOR_STRING_FIXED "GenuineIntel"
#define CPUID_AMD_VENDOR_STRING_FIXED "AuthenticAMD"
#define CPUID_CYRIX_VENDOR_STRING_FIXED "CyrixInstead"
#define CPUID_VIA_VENDOR_STRING_FIXED "CentaurHauls"
/*
* FIELD can be defined to process the CPUID information provided
* in the following CPUID_FIELD_DATA macro. The first parameter is
* the CPUID level of the feature (must be defined in
* CPUID_ALL_LEVELS, above. The second parameter is the CPUID result
* register in which the field is returned (defined in CPUID_REGS).
* The third field is the vendor(s) this feature applies to. "COMMON"
* means all vendors apply. UNKNOWN may not be used here. The fourth
* and fifth parameters are the bit position of the field and the
* width, respectively. The sixth is the text name of the field.
*
* The seventh parameters specifies the monitor support
* characteristics for this field. The value must be a valid
* CpuidFieldSupported value (omitting CPUID_FIELD_SUPPORT_ for
* convenience). The meaning of those values are described below.
*
* The eighth parameter describes whether the feature is capable of
* being used by usermode code (TRUE), or just CPL0 kernel code
* (FALSE).
*
* FLAG is defined identically to FIELD, but its accessors are more
* appropriate for 1-bit flags, and compile-time asserts enforce that
* the size is 1 bit wide.
*/
/*
* CpuidFieldSupported is made up of the following values:
*
* NO: A feature/field that IS NOT SUPPORTED by the monitor. Even
* if the host supports this feature, we will never expose it to
* the guest.
*
* YES: A feature/field that IS SUPPORTED by the monitor. If the
* host supports this feature, we will expose it to the guest. If
* not, then we will not set the feature.
*
* ANY: A feature/field that IS ALWAYS SUPPORTED by the monitor.
* Even if the host does not support the feature, the monitor can
* expose the feature to the guest.
*
* NA: Only legal for levels not masked/tested by default (see
* above for this definition). Such fields must always be marked
* as NA.
*
* These distinctions, when combined with the feature's CPL3
* properties can be translated into a common CPUID mask string as
* follows:
*
* NO + CPL3 --> "R" (Reserved). We don't support the feature,
* but we can't properly hide this from applications when using
* direct execution or HV with apps that do try/catch/fail, so we
* must still perform compatibility checks.
*
* NO + !CPL3 --> "0" (Masked). We can hide this from the guest.
*
* YES --> "H" (Host). We support the feature, so show it to the
* guest if the host has the feature.
*
* ANY/NA --> "X" (Ignore). By default, don't perform checks for
* this feature bit. Per-GOS masks may choose to set this bit in
* the guest. (e.g. the APIC feature bit is always set to 1.)
*
* See lib/cpuidcompat/cpuidcompat.c for any possible overrides to
* these defaults.
*/
typedef enum {
CPUID_FIELD_SUPPORTED_NO,
CPUID_FIELD_SUPPORTED_YES,
CPUID_FIELD_SUPPORTED_ANY,
CPUID_FIELD_SUPPORTED_NA,
CPUID_NUM_FIELD_SUPPORTEDS
} CpuidFieldSupported;
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_0 \
FIELD( 0, 0, EAX, 0, 32, NUMLEVELS, ANY, FALSE) \
FIELD( 0, 0, EBX, 0, 32, VENDOR1, YES, TRUE) \
FIELD( 0, 0, ECX, 0, 32, VENDOR3, YES, TRUE) \
FIELD( 0, 0, EDX, 0, 32, VENDOR2, YES, TRUE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_1 \
FIELD( 1, 0, EAX, 0, 4, STEPPING, ANY, FALSE) \
FIELD( 1, 0, EAX, 4, 4, MODEL, ANY, FALSE) \
FIELD( 1, 0, EAX, 8, 4, FAMILY, YES, FALSE) \
FIELD( 1, 0, EAX, 12, 2, TYPE, ANY, FALSE) \
FIELD( 1, 0, EAX, 16, 4, EXTENDED_MODEL, ANY, FALSE) \
FIELD( 1, 0, EAX, 20, 8, EXTENDED_FAMILY, YES, FALSE) \
FIELD( 1, 0, EBX, 0, 8, BRAND_ID, ANY, FALSE) \
FIELD( 1, 0, EBX, 8, 8, CLFL_SIZE, ANY, FALSE) \
FIELD( 1, 0, EBX, 16, 8, LCPU_COUNT, ANY, FALSE) \
FIELD( 1, 0, EBX, 24, 8, APICID, ANY, FALSE) \
FLAG( 1, 0, ECX, 0, 1, SSE3, YES, TRUE) \
FLAG( 1, 0, ECX, 1, 1, PCLMULQDQ, YES, TRUE) \
FLAG( 1, 0, ECX, 2, 1, DTES64, NO, FALSE) \
FLAG( 1, 0, ECX, 3, 1, MWAIT, YES, FALSE) \
FLAG( 1, 0, ECX, 4, 1, DSCPL, NO, FALSE) \
FLAG( 1, 0, ECX, 5, 1, VMX, YES, FALSE) \
FLAG( 1, 0, ECX, 6, 1, SMX, NO, FALSE) \
FLAG( 1, 0, ECX, 7, 1, EIST, NO, FALSE) \
FLAG( 1, 0, ECX, 8, 1, TM2, NO, FALSE) \
FLAG( 1, 0, ECX, 9, 1, SSSE3, YES, TRUE) \
FLAG( 1, 0, ECX, 10, 1, CNXTID, NO, FALSE) \
FLAG( 1, 0, ECX, 11, 1, NDA11, NO, FALSE) \
FLAG( 1, 0, ECX, 12, 1, FMA, YES, TRUE) \
FLAG( 1, 0, ECX, 13, 1, CMPXCHG16B, YES, TRUE) \
FLAG( 1, 0, ECX, 14, 1, xTPR, NO, FALSE) \
FLAG( 1, 0, ECX, 15, 1, PDCM, NO, FALSE) \
FLAG( 1, 0, ECX, 17, 1, PCID, YES, FALSE) \
FLAG( 1, 0, ECX, 18, 1, DCA, NO, FALSE) \
FLAG( 1, 0, ECX, 19, 1, SSE41, YES, TRUE) \
FLAG( 1, 0, ECX, 20, 1, SSE42, YES, TRUE) \
FLAG( 1, 0, ECX, 21, 1, x2APIC, ANY, FALSE) \
FLAG( 1, 0, ECX, 22, 1, MOVBE, YES, TRUE) \
FLAG( 1, 0, ECX, 23, 1, POPCNT, YES, TRUE) \
FLAG( 1, 0, ECX, 24, 1, TSC_DEADLINE, NO, FALSE) \
FLAG( 1, 0, ECX, 25, 1, AES, YES, TRUE) \
FLAG( 1, 0, ECX, 26, 1, XSAVE, YES, FALSE) \
FLAG( 1, 0, ECX, 27, 1, OSXSAVE, ANY, FALSE) \
FLAG( 1, 0, ECX, 28, 1, AVX, YES, FALSE) \
FLAG( 1, 0, ECX, 29, 1, F16C, YES, TRUE) \
FLAG( 1, 0, ECX, 30, 1, RDRAND, YES, TRUE) \
FLAG( 1, 0, ECX, 31, 1, HYPERVISOR, ANY, TRUE) \
FLAG( 1, 0, EDX, 0, 1, FPU, YES, TRUE) \
FLAG( 1, 0, EDX, 1, 1, VME, YES, FALSE) \
FLAG( 1, 0, EDX, 2, 1, DE, YES, FALSE) \
FLAG( 1, 0, EDX, 3, 1, PSE, YES, FALSE) \
FLAG( 1, 0, EDX, 4, 1, TSC, YES, TRUE) \
FLAG( 1, 0, EDX, 5, 1, MSR, YES, FALSE) \
FLAG( 1, 0, EDX, 6, 1, PAE, YES, FALSE) \
FLAG( 1, 0, EDX, 7, 1, MCE, YES, FALSE) \
FLAG( 1, 0, EDX, 8, 1, CX8, YES, TRUE) \
FLAG( 1, 0, EDX, 9, 1, APIC, ANY, FALSE) \
FLAG( 1, 0, EDX, 11, 1, SEP, YES, TRUE) \
FLAG( 1, 0, EDX, 12, 1, MTRR, YES, FALSE) \
FLAG( 1, 0, EDX, 13, 1, PGE, YES, FALSE) \
FLAG( 1, 0, EDX, 14, 1, MCA, YES, FALSE) \
FLAG( 1, 0, EDX, 15, 1, CMOV, YES, TRUE) \
FLAG( 1, 0, EDX, 16, 1, PAT, YES, FALSE) \
FLAG( 1, 0, EDX, 17, 1, PSE36, YES, FALSE) \
FLAG( 1, 0, EDX, 18, 1, PSN, YES, FALSE) \
FLAG( 1, 0, EDX, 19, 1, CLFSH, YES, TRUE) \
FLAG( 1, 0, EDX, 21, 1, DS, YES, FALSE) \
FLAG( 1, 0, EDX, 22, 1, ACPI, ANY, FALSE) \
FLAG( 1, 0, EDX, 23, 1, MMX, YES, TRUE) \
FLAG( 1, 0, EDX, 24, 1, FXSR, YES, TRUE) \
FLAG( 1, 0, EDX, 25, 1, SSE, YES, TRUE) \
FLAG( 1, 0, EDX, 26, 1, SSE2, YES, TRUE) \
FLAG( 1, 0, EDX, 27, 1, SS, YES, FALSE) \
FLAG( 1, 0, EDX, 28, 1, HTT, ANY, FALSE) \
FLAG( 1, 0, EDX, 29, 1, TM, NO, FALSE) \
FLAG( 1, 0, EDX, 30, 1, IA64, NO, FALSE) \
FLAG( 1, 0, EDX, 31, 1, PBE, NO, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_4 \
FIELD( 4, 0, EAX, 0, 5, LEAF4_CACHE_TYPE, NA, FALSE) \
FIELD( 4, 0, EAX, 5, 3, LEAF4_CACHE_LEVEL, NA, FALSE) \
FLAG( 4, 0, EAX, 8, 1, LEAF4_CACHE_SELF_INIT, NA, FALSE) \
FLAG( 4, 0, EAX, 9, 1, LEAF4_CACHE_FULLY_ASSOC, NA, FALSE) \
FIELD( 4, 0, EAX, 14, 12, LEAF4_CACHE_NUMHT_SHARING, NA, FALSE) \
FIELD( 4, 0, EAX, 26, 6, LEAF4_CORE_COUNT, NA, FALSE) \
FIELD( 4, 0, EBX, 0, 12, LEAF4_CACHE_LINE, NA, FALSE) \
FIELD( 4, 0, EBX, 12, 10, LEAF4_CACHE_PART, NA, FALSE) \
FIELD( 4, 0, EBX, 22, 10, LEAF4_CACHE_WAYS, NA, FALSE) \
FIELD( 4, 0, ECX, 0, 32, LEAF4_CACHE_SETS, NA, FALSE) \
FLAG( 4, 0, EDX, 0, 1, LEAF4_CACHE_WBINVD_NOT_GUARANTEED, NA, FALSE) \
FLAG( 4, 0, EDX, 1, 1, LEAF4_CACHE_IS_INCLUSIVE, NA, FALSE) \
FLAG( 4, 0, EDX, 2, 1, LEAF4_CACHE_COMPLEX_INDEXING, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_5 \
FIELD( 5, 0, EAX, 0, 16, MWAIT_MIN_SIZE, NA, FALSE) \
FIELD( 5, 0, EBX, 0, 16, MWAIT_MAX_SIZE, NA, FALSE) \
FLAG( 5, 0, ECX, 0, 1, MWAIT_EXTENSIONS, NA, FALSE) \
FLAG( 5, 0, ECX, 1, 1, MWAIT_INTR_BREAK, NA, FALSE) \
FIELD( 5, 0, EDX, 0, 4, MWAIT_C0_SUBSTATE, NA, FALSE) \
FIELD( 5, 0, EDX, 4, 4, MWAIT_C1_SUBSTATE, NA, FALSE) \
FIELD( 5, 0, EDX, 8, 4, MWAIT_C2_SUBSTATE, NA, FALSE) \
FIELD( 5, 0, EDX, 12, 4, MWAIT_C3_SUBSTATE, NA, FALSE) \
FIELD( 5, 0, EDX, 16, 4, MWAIT_C4_SUBSTATE, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_6 \
FLAG( 6, 0, EAX, 0, 1, THERMAL_SENSOR, NA, FALSE) \
FLAG( 6, 0, EAX, 1, 1, TURBO_MODE, NA, FALSE) \
FLAG( 6, 0, EAX, 2, 1, APIC_INVARIANT, NA, FALSE) \
FIELD( 6, 0, EBX, 0, 4, NUM_INTR_THRESHOLDS, NA, FALSE) \
FLAG( 6, 0, ECX, 0, 1, HW_COORD_FEEDBACK, NA, FALSE) \
FLAG( 6, 0, ECX, 3, 1, ENERGY_PERF_BIAS, NA, FALSE)
#define CPUID_7_EBX_13
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_7 \
FLAG( 7, 0, EBX, 0, 1, FSGSBASE, YES, FALSE) \
FLAG( 7, 0, EBX, 3, 1, BMI1, YES, TRUE) \
FLAG( 7, 0, EBX, 4, 1, HLE, NO, TRUE) \
FLAG( 7, 0, EBX, 5, 1, AVX2, NO, TRUE) \
FLAG( 7, 0, EBX, 7, 1, SMEP, YES, FALSE) \
FLAG( 7, 0, EBX, 8, 1, BMI2, NO, TRUE) \
FLAG( 7, 0, EBX, 9, 1, ENFSTRG, YES, FALSE) \
FLAG( 7, 0, EBX, 10, 1, INVPCID, NO, FALSE) \
FLAG( 7, 0, EBX, 11, 1, RTM, NO, TRUE) \
CPUID_7_EBX_13
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_A \
FIELD( A, 0, EAX, 0, 8, PMC_VERSION, NA, FALSE) \
FIELD( A, 0, EAX, 8, 8, PMC_NUM_GEN, NA, FALSE) \
FIELD( A, 0, EAX, 16, 8, PMC_WIDTH_GEN, NA, FALSE) \
FIELD( A, 0, EAX, 24, 8, PMC_EBX_LENGTH, NA, FALSE) \
FLAG( A, 0, EBX, 0, 1, PMC_CORE_CYCLES, NA, FALSE) \
FLAG( A, 0, EBX, 1, 1, PMC_INSTR_RETIRED, NA, FALSE) \
FLAG( A, 0, EBX, 2, 1, PMC_REF_CYCLES, NA, FALSE) \
FLAG( A, 0, EBX, 3, 1, PMC_LAST_LVL_CREF, NA, FALSE) \
FLAG( A, 0, EBX, 4, 1, PMC_LAST_LVL_CMISS, NA, FALSE) \
FLAG( A, 0, EBX, 5, 1, PMC_BR_INST_RETIRED, NA, FALSE) \
FLAG( A, 0, EBX, 6, 1, PMC_BR_MISS_RETIRED, NA, FALSE) \
FIELD( A, 0, EDX, 0, 5, PMC_NUM_FIXED, NA, FALSE) \
FIELD( A, 0, EDX, 5, 8, PMC_WIDTH_FIXED, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_B \
FIELD( B, 0, EAX, 0, 5, TOPOLOGY_MASK_WIDTH, NA, FALSE) \
FIELD( B, 0, EBX, 0, 16, TOPOLOGY_CPUS_SHARING_LEVEL, NA, FALSE) \
FIELD( B, 0, ECX, 0, 8, TOPOLOGY_LEVEL_NUMBER, NA, FALSE) \
FIELD( B, 0, ECX, 8, 8, TOPOLOGY_LEVEL_TYPE, NA, FALSE) \
FIELD( B, 0, EDX, 0, 32, TOPOLOGY_X2APIC_ID, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_D \
FLAG( D, 0, EAX, 0, 1, XCR0_MASTER_LEGACY_FP, YES, FALSE) \
FLAG( D, 0, EAX, 1, 1, XCR0_MASTER_SSE, YES, FALSE) \
FLAG( D, 0, EAX, 2, 1, XCR0_MASTER_YMM_H, YES, FALSE) \
FIELD( D, 0, EAX, 3, 29, XCR0_MASTER_LOWER, NO, FALSE) \
FIELD( D, 0, EBX, 0, 32, XSAVE_ENABLED_SIZE, ANY, FALSE) \
FIELD( D, 0, ECX, 0, 32, XSAVE_MAX_SIZE, YES, FALSE) \
FIELD( D, 0, EDX, 0, 29, XCR0_MASTER_UPPER, NO, FALSE) \
FLAG( D, 0, EDX, 30, 1, XCR0_MASTER_LWP, NO, FALSE) \
FLAG( D, 0, EDX, 31, 1, XCR0_MASTER_EXTENDED_XSAVE, NO, FALSE) \
FLAG( D, 1, EAX, 0, 1, XSAVEOPT, YES, FALSE) \
FIELD( D, 2, EAX, 0, 32, XSAVE_YMM_SIZE, YES, FALSE) \
FIELD( D, 2, EBX, 0, 32, XSAVE_YMM_OFFSET, YES, FALSE) \
FIELD( D, 2, ECX, 0, 32, XSAVE_YMM_RSVD1, YES, FALSE) \
FIELD( D, 2, EDX, 0, 32, XSAVE_YMM_RSVD2, YES, FALSE) \
FIELD( D, 62, EAX, 0, 32, XSAVE_LWP_SIZE, NO, FALSE) \
FIELD( D, 62, EBX, 0, 32, XSAVE_LWP_OFFSET, NO, FALSE) \
FIELD( D, 62, ECX, 0, 32, XSAVE_LWP_RSVD1, NO, FALSE) \
FIELD( D, 62, EDX, 0, 32, XSAVE_LWP_RSVD2, NO, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_400 \
FIELD(400, 0, EAX, 0, 32, NUM_HYP_LEVELS, NA, FALSE) \
FIELD(400, 0, EBX, 0, 32, HYPERVISOR1, NA, FALSE) \
FIELD(400, 0, ECX, 0, 32, HYPERVISOR2, NA, FALSE) \
FIELD(400, 0, EDX, 0, 32, HYPERVISOR3, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_410 \
FIELD(410, 0, EAX, 0, 32, TSC_HZ, NA, FALSE) \
FIELD(410, 0, EBX, 0, 32, ACPIBUS_HZ, NA, FALSE)
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_80 \
FIELD( 80, 0, EAX, 0, 32, NUM_EXT_LEVELS, NA, FALSE) \
FIELD( 80, 0, EBX, 0, 32, LEAF80_VENDOR1, NA, FALSE) \
FIELD( 80, 0, ECX, 0, 32, LEAF80_VENDOR3, NA, FALSE) \
FIELD( 80, 0, EDX, 0, 32, LEAF80_VENDOR2, NA, FALSE)
#define CPUID_81_ECX_17
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_81 \
FIELD( 81, 0, EAX, 0, 32, UNKNOWN81EAX, ANY, FALSE) \
FIELD( 81, 0, EAX, 0, 4, LEAF81_STEPPING, ANY, FALSE) \
FIELD( 81, 0, EAX, 4, 4, LEAF81_MODEL, ANY, FALSE) \
FIELD( 81, 0, EAX, 8, 4, LEAF81_FAMILY, ANY, FALSE) \
FIELD( 81, 0, EAX, 12, 2, LEAF81_TYPE, ANY, FALSE) \
FIELD( 81, 0, EAX, 16, 4, LEAF81_EXTENDED_MODEL, ANY, FALSE) \
FIELD( 81, 0, EAX, 20, 8, LEAF81_EXTENDED_FAMILY, ANY, FALSE) \
FIELD( 81, 0, EBX, 0, 32, UNKNOWN81EBX, ANY, FALSE) \
FIELD( 81, 0, EBX, 0, 16, LEAF81_BRAND_ID, ANY, FALSE) \
FIELD( 81, 0, EBX, 16, 16, UNDEF, ANY, FALSE) \
FLAG( 81, 0, ECX, 0, 1, LAHF64, YES, TRUE) \
FLAG( 81, 0, ECX, 1, 1, CMPLEGACY, ANY, FALSE) \
FLAG( 81, 0, ECX, 2, 1, SVM, YES, FALSE) \
FLAG( 81, 0, ECX, 3, 1, EXTAPICSPC, YES, FALSE) \
FLAG( 81, 0, ECX, 4, 1, CR8AVAIL, YES, FALSE) \
FLAG( 81, 0, ECX, 5, 1, ABM, YES, TRUE) \
FLAG( 81, 0, ECX, 6, 1, SSE4A, YES, TRUE) \
FLAG( 81, 0, ECX, 7, 1, MISALIGNED_SSE, YES, TRUE) \
FLAG( 81, 0, ECX, 8, 1, 3DNPREFETCH, YES, TRUE) \
FLAG( 81, 0, ECX, 9, 1, OSVW, ANY, FALSE) \
FLAG( 81, 0, ECX, 10, 1, IBS, NO, FALSE) \
FLAG( 81, 0, ECX, 11, 1, XOP, YES, TRUE) \
FLAG( 81, 0, ECX, 12, 1, SKINIT, NO, FALSE) \
FLAG( 81, 0, ECX, 13, 1, WATCHDOG, NO, FALSE) \
FLAG( 81, 0, ECX, 15, 1, LWP, NO, FALSE) \
FLAG( 81, 0, ECX, 16, 1, FMA4, YES, TRUE) \
CPUID_81_ECX_17 \
FLAG( 81, 0, ECX, 19, 1, NODEID_MSR, NO, FALSE) \
FLAG( 81, 0, ECX, 21, 1, TBM, YES, TRUE) \
FLAG( 81, 0, ECX, 22, 1, TOPOLOGY, NO, FALSE) \
FLAG( 81, 0, ECX, 23, 1, PERFCORE, ANY, TRUE) \
FLAG( 81, 0, EDX, 0, 1, LEAF81_FPU, YES, TRUE) \
FLAG( 81, 0, EDX, 1, 1, LEAF81_VME, YES, FALSE) \
FLAG( 81, 0, EDX, 2, 1, LEAF81_DE, YES, FALSE) \
FLAG( 81, 0, EDX, 3, 1, LEAF81_PSE, YES, FALSE) \
FLAG( 81, 0, EDX, 4, 1, LEAF81_TSC, YES, TRUE) \
FLAG( 81, 0, EDX, 5, 1, LEAF81_MSR, YES, FALSE) \
FLAG( 81, 0, EDX, 6, 1, LEAF81_PAE, YES, FALSE) \
FLAG( 81, 0, EDX, 7, 1, LEAF81_MCE, YES, FALSE) \
FLAG( 81, 0, EDX, 8, 1, LEAF81_CX8, YES, TRUE) \
FLAG( 81, 0, EDX, 9, 1, LEAF81_APIC, ANY, FALSE) \
FLAG( 81, 0, EDX, 11, 1, SYSC, ANY, TRUE) \
FLAG( 81, 0, EDX, 12, 1, LEAF81_MTRR, YES, FALSE) \
FLAG( 81, 0, EDX, 13, 1, LEAF81_PGE, YES, FALSE) \
FLAG( 81, 0, EDX, 14, 1, LEAF81_MCA, YES, FALSE) \
FLAG( 81, 0, EDX, 15, 1, LEAF81_CMOV, YES, TRUE) \
FLAG( 81, 0, EDX, 16, 1, LEAF81_PAT, YES, FALSE) \
FLAG( 81, 0, EDX, 17, 1, LEAF81_PSE36, YES, FALSE) \
FLAG( 81, 0, EDX, 20, 1, NX, YES, FALSE) \
FLAG( 81, 0, EDX, 22, 1, MMXEXT, YES, TRUE) \
FLAG( 81, 0, EDX, 23, 1, LEAF81_MMX, YES, TRUE) \
FLAG( 81, 0, EDX, 24, 1, LEAF81_FXSR, YES, TRUE) \
FLAG( 81, 0, EDX, 25, 1, FFXSR, YES, FALSE) \
FLAG( 81, 0, EDX, 26, 1, PDPE1GB, YES, FALSE) \
FLAG( 81, 0, EDX, 27, 1, RDTSCP, YES, TRUE) \
FLAG( 81, 0, EDX, 29, 1, LM, YES, FALSE) \
FLAG( 81, 0, EDX, 30, 1, 3DNOWPLUS, YES, TRUE) \
FLAG( 81, 0, EDX, 31, 1, 3DNOW, YES, TRUE)
#define CPUID_8A_EDX_11 \
FLAG( 8A, 0, EDX, 11, 1, SVMEDX_RSVD1, NO, FALSE)
#define CPUID_8A_EDX_13_31 \
FIELD( 8A, 0, EDX, 13, 19, SVMEDX_RSVD2, NO, FALSE)
/* LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_8x \
FIELD( 85, 0, EAX, 0, 8, ITLB_ENTRIES_2M4M_PGS, NA, FALSE) \
FIELD( 85, 0, EAX, 8, 8, ITLB_ASSOC_2M4M_PGS, NA, FALSE) \
FIELD( 85, 0, EAX, 16, 8, DTLB_ENTRIES_2M4M_PGS, NA, FALSE) \
FIELD( 85, 0, EAX, 24, 8, DTLB_ASSOC_2M4M_PGS, NA, FALSE) \
FIELD( 85, 0, EBX, 0, 8, ITLB_ENTRIES_4K_PGS, NA, FALSE) \
FIELD( 85, 0, EBX, 8, 8, ITLB_ASSOC_4K_PGS, NA, FALSE) \
FIELD( 85, 0, EBX, 16, 8, DTLB_ENTRIES_4K_PGS, NA, FALSE) \
FIELD( 85, 0, EBX, 24, 8, DTLB_ASSOC_4K_PGS, NA, FALSE) \
FIELD( 85, 0, ECX, 0, 8, L1_DCACHE_LINE_SIZE, NA, FALSE) \
FIELD( 85, 0, ECX, 8, 8, L1_DCACHE_LINES_PER_TAG, NA, FALSE) \
FIELD( 85, 0, ECX, 16, 8, L1_DCACHE_ASSOC, NA, FALSE) \
FIELD( 85, 0, ECX, 24, 8, L1_DCACHE_SIZE, NA, FALSE) \
FIELD( 85, 0, EDX, 0, 8, L1_ICACHE_LINE_SIZE, NA, FALSE) \
FIELD( 85, 0, EDX, 8, 8, L1_ICACHE_LINES_PER_TAG, NA, FALSE) \
FIELD( 85, 0, EDX, 16, 8, L1_ICACHE_ASSOC, NA, FALSE) \
FIELD( 85, 0, EDX, 24, 8, L1_ICACHE_SIZE, NA, FALSE) \
FIELD( 86, 0, EAX, 0, 12, L2_ITLB_ENTRIES_2M4M_PGS, NA, FALSE) \
FIELD( 86, 0, EAX, 12, 4, L2_ITLB_ASSOC_2M4M_PGS, NA, FALSE) \
FIELD( 86, 0, EAX, 16, 12, L2_DTLB_ENTRIES_2M4M_PGS, NA, FALSE) \
FIELD( 86, 0, EAX, 28, 4, L2_DTLB_ASSOC_2M4M_PGS, NA, FALSE) \
FIELD( 86, 0, EBX, 0, 12, L2_ITLB_ENTRIES_4K_PGS, NA, FALSE) \
FIELD( 86, 0, EBX, 12, 4, L2_ITLB_ASSOC_4K_PGS, NA, FALSE) \
FIELD( 86, 0, EBX, 16, 12, L2_DTLB_ENTRIES_4K_PGS, NA, FALSE) \
FIELD( 86, 0, EBX, 28, 4, L2_DTLB_ASSOC_4K_PGS, NA, FALSE) \
FIELD( 86, 0, ECX, 0, 8, L2CACHE_LINE, NA, FALSE) \
FIELD( 86, 0, ECX, 8, 4, L2CACHE_LINE_PER_TAG, NA, FALSE) \
FIELD( 86, 0, ECX, 12, 4, L2CACHE_WAYS, NA, FALSE) \
FIELD( 86, 0, ECX, 16, 16, L2CACHE_SIZE, NA, FALSE) \
FIELD( 86, 0, EDX, 0, 8, L3CACHE_LINE, NA, FALSE) \
FIELD( 86, 0, EDX, 8, 4, L3CACHE_LINE_PER_TAG, NA, FALSE) \
FIELD( 86, 0, EDX, 12, 4, L3CACHE_WAYS, NA, FALSE) \
FIELD( 86, 0, EDX, 18, 14, L3CACHE_SIZE, NA, FALSE) \
FLAG( 87, 0, EDX, 0, 1, TS, NA, FALSE) \
FLAG( 87, 0, EDX, 1, 1, FID, NA, FALSE) \
FLAG( 87, 0, EDX, 2, 1, VID, NA, FALSE) \
FLAG( 87, 0, EDX, 3, 1, TTP, NA, FALSE) \
FLAG( 87, 0, EDX, 4, 1, LEAF87_TM, NA, FALSE) \
FLAG( 87, 0, EDX, 5, 1, STC, NA, FALSE) \
FLAG( 87, 0, EDX, 6, 1, 100MHZSTEPS, NA, FALSE) \
FLAG( 87, 0, EDX, 7, 1, HWPSTATE, NA, FALSE) \
FLAG( 87, 0, EDX, 8, 1, TSC_INVARIANT, NA, FALSE) \
FLAG( 87, 0, EDX, 9, 1, CORE_PERF_BOOST, NA, FALSE) \
FIELD( 88, 0, EAX, 0, 8, PHYS_BITS, NA, FALSE) \
FIELD( 88, 0, EAX, 8, 8, VIRT_BITS, NA, FALSE) \
FIELD( 88, 0, EAX, 16, 8, GUEST_PHYS_ADDR_SZ, NA, FALSE) \
FIELD( 88, 0, ECX, 0, 8, LEAF88_CORE_COUNT, NA, FALSE) \
FIELD( 88, 0, ECX, 12, 4, APICID_COREID_SIZE, NA, FALSE) \
FIELD( 8A, 0, EAX, 0, 8, SVM_REVISION, YES, FALSE) \
FLAG( 8A, 0, EAX, 8, 1, SVM_HYPERVISOR, NO, FALSE) \
FIELD( 8A, 0, EAX, 9, 23, SVMEAX_RSVD, NO, FALSE) \
FIELD( 8A, 0, EBX, 0, 32, SVM_NUM_ASIDS, YES, FALSE) \
FIELD( 8A, 0, ECX, 0, 32, SVMECX_RSVD, NO, FALSE) \
FLAG( 8A, 0, EDX, 0, 1, SVM_NPT, YES, FALSE) \
FLAG( 8A, 0, EDX, 1, 1, SVM_LBR, NO, FALSE) \
FLAG( 8A, 0, EDX, 2, 1, SVM_LOCK, ANY, FALSE) \
FLAG( 8A, 0, EDX, 3, 1, SVM_NRIP, YES, FALSE) \
FLAG( 8A, 0, EDX, 4, 1, SVM_TSC_RATE_MSR, NO, FALSE) \
FLAG( 8A, 0, EDX, 5, 1, SVM_VMCB_CLEAN, YES, FALSE) \
FLAG( 8A, 0, EDX, 6, 1, SVM_FLUSH_BY_ASID, YES, FALSE) \
FLAG( 8A, 0, EDX, 7, 1, SVM_DECODE_ASSISTS, YES, FALSE) \
FIELD( 8A, 0, EDX, 8, 2, SVMEDX_RSVD0, NO, FALSE) \
FLAG( 8A, 0, EDX, 10, 1, SVM_PAUSE_FILTER, NO, FALSE) \
CPUID_8A_EDX_11 \
FLAG( 8A, 0, EDX, 12, 1, SVM_PAUSE_THRESHOLD, NO, FALSE) \
CPUID_8A_EDX_13_31
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, CPL3 */
#define CPUID_FIELD_DATA_LEVEL_81x \
FIELD(819, 0, EAX, 0, 12, L1_ITLB_ENTRIES_1G_PGS, NA, FALSE) \
FIELD(819, 0, EAX, 12, 4, L1_ITLB_ASSOC_1G_PGS, NA, FALSE) \
FIELD(819, 0, EAX, 16, 12, L1_DTLB_ENTRIES_1G_PGS, NA, FALSE) \
FIELD(819, 0, EAX, 28, 4, L1_DTLB_ASSOC_1G_PGS, NA, FALSE) \
FIELD(819, 0, EBX, 0, 12, L2_ITLB_ENTRIES_1G_PGS, NA, FALSE) \
FIELD(819, 0, EBX, 12, 4, L2_ITLB_ASSOC_1G_PGS, NA, FALSE) \
FIELD(819, 0, EBX, 16, 12, L2_DTLB_ENTRIES_1G_PGS, NA, FALSE) \
FIELD(819, 0, EBX, 28, 4, L2_DTLB_ASSOC_1G_PGS, NA, FALSE) \
FLAG( 81A, 0, EAX, 0, 1, FP128, NA, FALSE) \
FLAG( 81A, 0, EAX, 1, 1, MOVU, NA, FALSE) \
FLAG( 81B, 0, EAX, 0, 1, IBS_FFV, NA, FALSE) \
FLAG( 81B, 0, EAX, 1, 1, IBS_FETCHSAM, NA, FALSE) \
FLAG( 81B, 0, EAX, 2, 1, IBS_OPSAM, NA, FALSE) \
FLAG( 81B, 0, EAX, 3, 1, RW_OPCOUNT, NA, FALSE) \
FLAG( 81B, 0, EAX, 4, 1, OPCOUNT, NA, FALSE) \
FLAG( 81B, 0, EAX, 5, 1, BRANCH_TARGET_ADDR, NA, FALSE) \
FLAG( 81B, 0, EAX, 6, 1, OPCOUNT_EXT, NA, FALSE) \
FLAG( 81B, 0, EAX, 7, 1, RIP_INVALID_CHECK, NA, FALSE) \
FLAG( 81C, 0, EAX, 0, 1, LWP_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 1, 1, LWP_VAL_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 2, 1, LWP_IRE_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 3, 1, LWP_BRE_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 4, 1, LWP_DME_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 5, 1, LWP_CNH_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 6, 1, LWP_RNH_AVAIL, NA, FALSE) \
FLAG( 81C, 0, EAX, 31, 1, LWP_INT_AVAIL, NA, FALSE) \
FIELD(81C, 0, EBX, 0, 8, LWP_CB_SIZE, NA, FALSE) \
FIELD(81C, 0, EBX, 8, 8, LWP_EVENT_SIZE, NA, FALSE) \
FIELD(81C, 0, EBX, 16, 8, LWP_MAX_EVENTS, NA, FALSE) \
FIELD(81C, 0, EBX, 24, 8, LWP_EVENT_OFFSET, NA, FALSE) \
FIELD(81C, 0, ECX, 0, 4, LWP_LATENCY_MAX, NA, FALSE) \
FLAG( 81C, 0, ECX, 5, 1, LWP_DATA_ADDR_VALID, NA, FALSE) \
FIELD(81C, 0, ECX, 6, 3, LWP_LATENCY_ROUND, NA, FALSE) \
FIELD(81C, 0, ECX, 9, 7, LWP_VERSION, NA, FALSE) \
FIELD(81C, 0, ECX, 16, 8, LWP_MIN_BUF_SIZE, NA, FALSE) \
FLAG( 81C, 0, ECX, 28, 1, LWP_BRANCH_PRED, NA, FALSE) \
FLAG( 81C, 0, ECX, 29, 1, LWP_IP_FILTERING, NA, FALSE) \
FLAG( 81C, 0, ECX, 30, 1, LWP_CACHE_LEVEL, NA, FALSE) \
FLAG( 81C, 0, ECX, 31, 1, LWP_CACHE_LATENCY, NA, FALSE) \
FLAG( 81C, 0, EDX, 0, 1, LWP_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 1, 1, LWP_VAL_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 2, 1, LWP_IRE_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 3, 1, LWP_BRE_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 4, 1, LWP_DME_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 5, 1, LWP_CNH_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 6, 1, LWP_RNH_SUPPORTED, NA, FALSE) \
FLAG( 81C, 0, EDX, 31, 1, LWP_INT_SUPPORTED, NA, FALSE) \
FIELD(81D, 0, EAX, 0, 5, LEAF81D_CACHE_TYPE, NA, FALSE) \
FIELD(81D, 0, EAX, 5, 3, LEAF81D_CACHE_LEVEL, NA, FALSE) \
FLAG( 81D, 0, EAX, 8, 1, LEAF81D_CACHE_SELF_INIT, NA, FALSE) \
FLAG( 81D, 0, EAX, 9, 1, LEAF81D_CACHE_FULLY_ASSOC, NA, FALSE) \
FIELD(81D, 0, EAX, 14, 12, LEAF81D_NUM_SHARING_CACHE, NA, FALSE) \
FIELD(81D, 0, EBX, 0, 12, LEAF81D_CACHE_LINE_SIZE, NA, FALSE) \
FIELD(81D, 0, EBX, 12, 10, LEAF81D_CACHE_PHYS_PARTITIONS, NA, FALSE) \
FIELD(81D, 0, EBX, 22, 10, LEAF81D_CACHE_WAYS, NA, FALSE) \
FIELD(81D, 0, ECX, 0, 32, LEAF81D_CACHE_NUM_SETS, NA, FALSE) \
FLAG( 81D, 0, EDX, 0, 1, LEAF81D_CACHE_WBINVD, NA, FALSE) \
FLAG( 81D, 0, EDX, 1, 1, LEAF81D_CACHE_INCLUSIVE, NA, FALSE) \
FIELD(81E, 0, EAX, 0, 32, EXTENDED_APICID, NA, FALSE) \
FIELD(81E, 0, EBX, 0, 8, COMPUTE_UNIT_ID, NA, FALSE) \
FIELD(81E, 0, EBX, 8, 2, CORES_PER_COMPUTE_UNIT, NA, FALSE) \
FIELD(81E, 0, ECX, 0, 8, NODEID_VAL, NA, FALSE) \
FIELD(81E, 0, ECX, 8, 3, NODES_PER_PKG, NA, FALSE)
#define INTEL_CPUID_FIELD_DATA
#define AMD_CPUID_FIELD_DATA
#define CPUID_FIELD_DATA \
CPUID_FIELD_DATA_LEVEL_0 \
CPUID_FIELD_DATA_LEVEL_1 \
CPUID_FIELD_DATA_LEVEL_4 \
CPUID_FIELD_DATA_LEVEL_5 \
CPUID_FIELD_DATA_LEVEL_6 \
CPUID_FIELD_DATA_LEVEL_7 \
CPUID_FIELD_DATA_LEVEL_A \
CPUID_FIELD_DATA_LEVEL_B \
CPUID_FIELD_DATA_LEVEL_D \
CPUID_FIELD_DATA_LEVEL_400 \
CPUID_FIELD_DATA_LEVEL_410 \
CPUID_FIELD_DATA_LEVEL_80 \
CPUID_FIELD_DATA_LEVEL_81 \
CPUID_FIELD_DATA_LEVEL_8x \
CPUID_FIELD_DATA_LEVEL_81x \
INTEL_CPUID_FIELD_DATA \
AMD_CPUID_FIELD_DATA
/*
* Define all field and flag values as an enum. The result is a full
* set of values taken from the table above in the form:
*
* CPUID_FEATURE__ID_ == mask for feature
* CPUID__ID__MASK == mask for field
* CPUID__ID__SHIFT == offset of field
*
* e.g. - CPUID_FEATURE_COMMON_ID1EDX_FPU = 0x1
* - CPUID_COMMON_ID88EAX_VIRT_BITS_MASK = 0xff00
* - CPUID_COMMON_ID88EAX_VIRT_BITS_SHIFT = 8
*
* Note: The FEATURE/MASK definitions must use some gymnastics to get
* around a warning when shifting left by 32.
*/
#define VMW_BIT_MASK(shift) (((1 << (shift - 1)) << 1) - 1)
#define FIELD(lvl, ecxIn, reg, bitpos, size, name, s, c3) \
CPUID_ID##lvl##reg##_##name##_SHIFT = bitpos, \
CPUID_ID##lvl##reg##_##name##_MASK = VMW_BIT_MASK(size) << bitpos, \
CPUID_FEATURE_ID##lvl##reg##_##name = CPUID_ID##lvl##reg##_##name##_MASK, \
CPUID_INTERNAL_SHIFT_##name = bitpos, \
CPUID_INTERNAL_MASK_##name = VMW_BIT_MASK(size) << bitpos, \
CPUID_INTERNAL_REG_##name = CPUID_REG_##reg, \
CPUID_INTERNAL_EAXIN_##name = CPUID_LEVEL_VAL_##lvl, \
CPUID_INTERNAL_ECXIN_##name = ecxIn,
#define FLAG FIELD
enum {
/* Define data for every CPUID field we have */
CPUID_FIELD_DATA
};
#undef VMW_BIT_MASK
#undef FIELD
#undef FLAG
/* Level D subleaf 1 eax XSAVEOPT */
#define CPUID_COMMON_IDDsub1EAX_XSAVEOPT 1
/*
* Legal CPUID config file mask characters. For a description of the
* cpuid masking system, please see:
*
* http://vmweb.vmware.com/~mts/cgi-bin/view.cgi/Apps/CpuMigrationChecks
*/
#define CPUID_MASK_HIDE_CHR '0'
#define CPUID_MASK_HIDE_STR "0"
#define CPUID_MASK_FORCE_CHR '1'
#define CPUID_MASK_FORCE_STR "1"
#define CPUID_MASK_PASS_CHR '-'
#define CPUID_MASK_PASS_STR "-"
#define CPUID_MASK_TRUE_CHR 'T'
#define CPUID_MASK_TRUE_STR "T"
#define CPUID_MASK_FALSE_CHR 'F'
#define CPUID_MASK_FALSE_STR "F"
#define CPUID_MASK_IGNORE_CHR 'X'
#define CPUID_MASK_IGNORE_STR "X"
#define CPUID_MASK_HOST_CHR 'H'
#define CPUID_MASK_HOST_STR "H"
#define CPUID_MASK_RSVD_CHR 'R'
#define CPUID_MASK_RSVD_STR "R"
#define CPUID_MASK_INSTALL_CHR 'I'
#define CPUID_MASK_INSTALL_STR "I"
/*
* When LM is disabled, we overlay the following masks onto the
* guest's default masks. Any level that is not defined below should
* be treated as all "-"s
*/
#define CPT_ID1ECX_LM_DISABLED "----:----:----:----:--0-:----:----:----"
#define CPT_ID81EDX_LM_DISABLED "--0-:----:----:----:----:----:----:----"
#define CPT_ID81ECX_LM_DISABLED "----:----:----:----:----:----:----:---0"
#define CPT_GET_LM_DISABLED_MASK(lvl, reg) \
((lvl == 1 && reg == CPUID_REG_ECX) ? CPT_ID1ECX_LM_DISABLED : \
(lvl == 0x80000001 && reg == CPUID_REG_ECX) ? CPT_ID81ECX_LM_DISABLED : \
(lvl == 0x80000001 && reg == CPUID_REG_EDX) ? CPT_ID81EDX_LM_DISABLED : \
NULL)
/*
* CPUID_MASK --
* CPUID_SHIFT --
* CPUID_ISSET --
* CPUID_GET --
* CPUID_SET --
* CPUID_CLEAR --
* CPUID_SETTO --
*
* Accessor macros for all CPUID consts/fields/flags. Level and reg are not
* required, but are used to force compile-time asserts which help verify that
* the flag is being used on the right CPUID input and result register.
*
* Note: ASSERT_ON_COMPILE is duplicated rather than factored into its own
* macro, because token concatenation does not work as expected if an input is
* #defined (e.g. APIC) when macros are nested. Also, compound statements
* within parenthes is a GCC extension, so we must use runtime asserts with
* other compilers.
*/
#if defined(__GNUC__) && !defined(__clang__)
#define CPUID_MASK(eaxIn, reg, flag) \
({ \
ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##flag && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \
CPUID_INTERNAL_MASK_##flag; \
})
#define CPUID_SHIFT(eaxIn, reg, flag) \
({ \
ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##flag && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \
CPUID_INTERNAL_SHIFT_##flag; \
})
#define CPUID_ISSET(eaxIn, reg, flag, data) \
({ \
ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##flag && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \
(((data) & CPUID_INTERNAL_MASK_##flag) != 0); \
})
#define CPUID_GET(eaxIn, reg, field, data) \
({ \
ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##field && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##field); \
(((uint32)(data) & CPUID_INTERNAL_MASK_##field) >> \
CPUID_INTERNAL_SHIFT_##field); \
})
#else
/*
* CPUIDCheck --
*
* Return val after verifying parameters.
*/
static INLINE uint32
CPUIDCheck(uint32 eaxIn, uint32 eaxInCheck,
CpuidReg reg, CpuidReg regCheck, uint32 val)
{
ASSERT(eaxIn == eaxInCheck && reg == regCheck);
return val;
}
#define CPUID_MASK(eaxIn, reg, flag) \
CPUIDCheck((uint32)eaxIn, CPUID_INTERNAL_EAXIN_##flag, \
CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##flag, \
CPUID_INTERNAL_MASK_##flag)
#define CPUID_SHIFT(eaxIn, reg, flag) \
CPUIDCheck((uint32)eaxIn, CPUID_INTERNAL_EAXIN_##flag, \
CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##flag, \
CPUID_INTERNAL_SHIFT_##flag)
#define CPUID_ISSET(eaxIn, reg, flag, data) \
(CPUIDCheck((uint32)eaxIn, CPUID_INTERNAL_EAXIN_##flag, \
CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##flag, \
CPUID_INTERNAL_MASK_##flag & (data)) != 0)
#define CPUID_GET(eaxIn, reg, field, data) \
CPUIDCheck((uint32)eaxIn, CPUID_INTERNAL_EAXIN_##field, \
CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##field, \
((uint32)(data) & CPUID_INTERNAL_MASK_##field) >> \
CPUID_INTERNAL_SHIFT_##field)
#endif
#define CPUID_SET(eaxIn, reg, flag, dataPtr) \
do { \
ASSERT_ON_COMPILE( \
(uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##flag && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \
*(dataPtr) |= CPUID_INTERNAL_MASK_##flag; \
} while (0)
#define CPUID_CLEAR(eaxIn, reg, flag, dataPtr) \
do { \
ASSERT_ON_COMPILE( \
(uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##flag && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \
*(dataPtr) &= ~CPUID_INTERNAL_MASK_##flag; \
} while (0)
#define CPUID_SETTO(eaxIn, reg, field, dataPtr, val) \
do { \
uint32 _v = val; \
uint32 *_d = dataPtr; \
ASSERT_ON_COMPILE( \
(uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##field && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##field); \
*_d = (*_d & ~CPUID_INTERNAL_MASK_##field) | \
(_v << CPUID_INTERNAL_SHIFT_##field); \
ASSERT(_v == (*_d & CPUID_INTERNAL_MASK_##field) >> \
CPUID_INTERNAL_SHIFT_##field); \
} while (0)
#define CPUID_SETTO_SAFE(eaxIn, reg, field, dataPtr, val) \
do { \
uint32 _v = val & \
(CPUID_INTERNAL_MASK_##field >> CPUID_INTERNAL_SHIFT_##field); \
uint32 *_d = dataPtr; \
ASSERT_ON_COMPILE( \
(uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##field && \
CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##field); \
*_d = (*_d & ~CPUID_INTERNAL_MASK_##field) | \
(_v << CPUID_INTERNAL_SHIFT_##field); \
} while (0)
/*
* Definitions of various fields' values and more complicated
* macros/functions for reading cpuid fields.
*/
#define CPUID_FAMILY_EXTENDED 15
/* Effective Intel CPU Families */
#define CPUID_FAMILY_486 4
#define CPUID_FAMILY_P5 5
#define CPUID_FAMILY_P6 6
#define CPUID_FAMILY_P4 15
/* Effective AMD CPU Families */
#define CPUID_FAMILY_5x86 4
#define CPUID_FAMILY_K5 5
#define CPUID_FAMILY_K6 5
#define CPUID_FAMILY_K7 6
#define CPUID_FAMILY_K8 15
#define CPUID_FAMILY_K8L 16
#define CPUID_FAMILY_K8MOBILE 17
#define CPUID_FAMILY_LLANO 18
#define CPUID_FAMILY_BOBCAT 20
#define CPUID_FAMILY_BULLDOZER 21 // Bulldozer Piledriver Steamroller
#define CPUID_FAMILY_KYOTO 22
/* Effective VIA CPU Families */
#define CPUID_FAMILY_C7 6
/* Intel model information */
#define CPUID_MODEL_PPRO 1
#define CPUID_MODEL_PII_03 3
#define CPUID_MODEL_PII_05 5
#define CPUID_MODEL_CELERON_06 6
#define CPUID_MODEL_PM_09 9
#define CPUID_MODEL_PM_0D 13
#define CPUID_MODEL_PM_0E 14 // Yonah / Sossaman
#define CPUID_MODEL_CORE_0F 15 // Conroe / Merom
#define CPUID_MODEL_CORE_17 0x17 // Penryn
#define CPUID_MODEL_NEHALEM_1A 0x1a // Nehalem / Gainestown
#define CPUID_MODEL_ATOM_1C 0x1c // Silverthorne / Diamondville
#define CPUID_MODEL_CORE_1D 0x1d // Dunnington
#define CPUID_MODEL_NEHALEM_1E 0x1e // Lynnfield
#define CPUID_MODEL_NEHALEM_1F 0x1f // Havendale
#define CPUID_MODEL_NEHALEM_25 0x25 // Westmere / Clarkdale
#define CPUID_MODEL_SANDYBRIDGE_2A 0x2a // Sandybridge (desktop/mobile)
#define CPUID_MODEL_SANDYBRIDGE_2D 0x2d // Sandybridge-EP
#define CPUID_MODEL_NEHALEM_2C 0x2c // Westmere-EP
#define CPUID_MODEL_NEHALEM_2E 0x2e // Nehalem-EX
#define CPUID_MODEL_NEHALEM_2F 0x2f // Westmere-EX
#define CPUID_MODEL_SANDYBRIDGE_3A 0x3a // Ivy Bridge
#define CPUID_MODEL_SANDYBRIDGE_3E 0x3e // Ivy Bridge-EP
#define CPUID_MODEL_BROADWELL_3D 0x3d // Broadwell-Ult
#define CPUID_MODEL_HASWELL_3F 0x3f // Haswell EN/EP/EX
#define CPUID_MODEL_HASWELL_46 0x46 // CrystalWell
#define CPUID_MODEL_ATOM_4D 0x4d // Avoton
#define CPUID_MODEL_HASWELL_3C 0x3c // Haswell DT
#define CPUID_MODEL_HASWELL_45 0x45 // Haswell Ultrathin
#define CPUID_MODEL_PIII_07 7
#define CPUID_MODEL_PIII_08 8
#define CPUID_MODEL_PIII_0A 10
/* AMD model information */
#define CPUID_MODEL_BARCELONA_02 0x02 // Barcelona (Opteron & Phenom)
#define CPUID_MODEL_SHANGHAI_04 0x04 // Shanghai RB
#define CPUID_MODEL_SHANGHAI_05 0x05 // Shanghai BL
#define CPUID_MODEL_SHANGHAI_06 0x06 // Shanghai DA
#define CPUID_MODEL_ISTANBUL_MAGNY_08 0x08 // Istanbul (6 core) & Magny-cours (12) HY
#define CPUID_MODEL_ISTANBUL_MAGNY_09 0x09 // HY - G34 package
#define CPUID_MODEL_PHAROAH_HOUND_0A 0x0A // Pharoah Hound
#define CPUID_MODEL_PILEDRIVER_1F 0x1F // Max piledriver model defined in BKDG
#define CPUID_MODEL_PILEDRIVER_10 0x10 // family == CPUID_FAMILY_BULLDOZER
#define CPUID_MODEL_PILEDRIVER_02 0x02 // family == CPUID_FAMILY_BULLDOZER
#define CPUID_MODEL_OPTERON_REVF_41 0x41 // family == CPUID_FAMILY_K8
#define CPUID_MODEL_KYOTO_00 0x00 // family == CPUID_FAMILY_KYOTO
/* VIA model information */
#define CPUID_MODEL_NANO 15 // Isaiah
/*
*----------------------------------------------------------------------
*
* CPUID_IsVendor{AMD,Intel,VIA} --
*
* Determines if the vendor string in cpuid id0 is from {AMD,Intel,VIA}.
*
* Results:
* True iff vendor string is CPUID_{AMD,INTEL,VIA}_VENDOR_STRING
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE Bool
CPUID_IsRawVendor(CPUIDRegs *id0, const char* vendor)
{
// hard to get strcmp() in some environments, so do it in the raw
return (id0->ebx == *(const uint32 *) (vendor + 0) &&
id0->ecx == *(const uint32 *) (vendor + 4) &&
id0->edx == *(const uint32 *) (vendor + 8));
}
static INLINE Bool
CPUID_IsVendorAMD(CPUIDRegs *id0)
{
return CPUID_IsRawVendor(id0, CPUID_AMD_VENDOR_STRING);
}
static INLINE Bool
CPUID_IsVendorIntel(CPUIDRegs *id0)
{
return CPUID_IsRawVendor(id0, CPUID_INTEL_VENDOR_STRING);
}
static INLINE Bool
CPUID_IsVendorVIA(CPUIDRegs *id0)
{
return CPUID_IsRawVendor(id0, CPUID_VIA_VENDOR_STRING);
}
static INLINE uint32
CPUID_EFFECTIVE_FAMILY(uint32 v) /* %eax from CPUID with %eax=1. */
{
uint32 f = CPUID_GET(1, EAX, FAMILY, v);
return f != CPUID_FAMILY_EXTENDED ? f : f +
CPUID_GET(1, EAX, EXTENDED_FAMILY, v);
}
/* Normally only used when FAMILY==CPUID_FAMILY_EXTENDED, but Intel is
* now using the extended model field for FAMILY==CPUID_FAMILY_P6 to
* refer to the newer Core2 CPUs
*/
static INLINE uint32
CPUID_EFFECTIVE_MODEL(uint32 v) /* %eax from CPUID with %eax=1. */
{
uint32 m = CPUID_GET(1, EAX, MODEL, v);
uint32 em = CPUID_GET(1, EAX, EXTENDED_MODEL, v);
return m + (em << 4);
}
/*
* Notice that CPUID families for Intel and AMD overlap. The following macros
* should only be used AFTER the manufacturer has been established (through
* the use of CPUID standard function 0).
*/
static INLINE Bool
CPUID_FAMILY_IS_486(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_486;
}
static INLINE Bool
CPUID_FAMILY_IS_P5(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_P5;
}
static INLINE Bool
CPUID_FAMILY_IS_P6(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_P6;
}
static INLINE Bool
CPUID_FAMILY_IS_PENTIUM4(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_P4;
}
/*
* Intel Pentium M processors are Yonah/Sossaman or an older P-M
*/
static INLINE Bool
CPUID_UARCH_IS_PENTIUM_M(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
return CPUID_FAMILY_IS_P6(v) &&
(CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_09 ||
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_0D ||
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_0E);
}
/*
* Intel Core processors are Merom, Conroe, Woodcrest, Clovertown,
* Penryn, Dunnington, Kentsfield, Yorktown, Harpertown, ........
*/
static INLINE Bool
CPUID_UARCH_IS_CORE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
uint32 model = CPUID_EFFECTIVE_MODEL(v);
/* Assumes the CPU manufacturer is Intel. */
return CPUID_FAMILY_IS_P6(v) &&
model >= CPUID_MODEL_CORE_0F &&
(model < CPUID_MODEL_NEHALEM_1A ||
model == CPUID_MODEL_CORE_1D);
}
/*
* Intel Nehalem processors are: Nehalem, Gainestown, Lynnfield, Clarkdale.
*/
static INLINE Bool
CPUID_UARCH_IS_NEHALEM(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) &&
(effectiveModel == CPUID_MODEL_NEHALEM_1A ||
effectiveModel == CPUID_MODEL_NEHALEM_1E ||
effectiveModel == CPUID_MODEL_NEHALEM_1F ||
effectiveModel == CPUID_MODEL_NEHALEM_25 ||
effectiveModel == CPUID_MODEL_NEHALEM_2C ||
effectiveModel == CPUID_MODEL_NEHALEM_2E ||
effectiveModel == CPUID_MODEL_NEHALEM_2F);
}
static INLINE Bool
CPUID_UARCH_IS_SANDYBRIDGE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) &&
(effectiveModel == CPUID_MODEL_SANDYBRIDGE_2A ||
effectiveModel == CPUID_MODEL_SANDYBRIDGE_2D ||
effectiveModel == CPUID_MODEL_SANDYBRIDGE_3E ||
effectiveModel == CPUID_MODEL_SANDYBRIDGE_3A);
}
static INLINE Bool
CPUID_MODEL_IS_BROADWELL(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) &&
(effectiveModel == CPUID_MODEL_BROADWELL_3D
);
}
static INLINE Bool
CPUID_MODEL_IS_HASWELL(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) &&
(effectiveModel == CPUID_MODEL_HASWELL_3C ||
effectiveModel == CPUID_MODEL_HASWELL_3F ||
effectiveModel == CPUID_MODEL_HASWELL_45 ||
effectiveModel == CPUID_MODEL_HASWELL_46);
}
static INLINE Bool
CPUID_UARCH_IS_HASWELL(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
return CPUID_FAMILY_IS_P6(v) &&
(CPUID_MODEL_IS_BROADWELL(v) || CPUID_MODEL_IS_HASWELL(v));
}
static INLINE Bool
CPUID_MODEL_IS_CENTERTON(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
return CPUID_FAMILY_IS_P6(v) &&
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_ATOM_1C;
}
static INLINE Bool
CPUID_MODEL_IS_AVOTON(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
return CPUID_FAMILY_IS_P6(v) &&
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_ATOM_4D;
}
static INLINE Bool
CPUID_MODEL_IS_WESTMERE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) &&
(effectiveModel == CPUID_MODEL_NEHALEM_25 || // Clarkdale
effectiveModel == CPUID_MODEL_NEHALEM_2C || // Westmere-EP
effectiveModel == CPUID_MODEL_NEHALEM_2F); // Westmere-EX
}
static INLINE Bool
CPUID_MODEL_IS_SANDYBRIDGE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) &&
(effectiveModel == CPUID_MODEL_SANDYBRIDGE_2A ||
effectiveModel == CPUID_MODEL_SANDYBRIDGE_2D);
}
static INLINE Bool
CPUID_MODEL_IS_IVYBRIDGE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v);
return CPUID_FAMILY_IS_P6(v) && (
effectiveModel == CPUID_MODEL_SANDYBRIDGE_3E ||
effectiveModel == CPUID_MODEL_SANDYBRIDGE_3A);
}
static INLINE Bool
CPUID_FAMILY_IS_K7(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K7;
}
static INLINE Bool
CPUID_FAMILY_IS_K8(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K8;
}
static INLINE Bool
CPUID_FAMILY_IS_K8EXT(uint32 eax)
{
/*
* We check for this pattern often enough that it's
* worth a separate function, for syntactic sugar.
*/
return CPUID_FAMILY_IS_K8(eax) &&
CPUID_GET(1, EAX, EXTENDED_MODEL, eax) != 0;
}
static INLINE Bool
CPUID_FAMILY_IS_K8L(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K8L ||
CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_LLANO;
}
static INLINE Bool
CPUID_FAMILY_IS_LLANO(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_LLANO;
}
static INLINE Bool
CPUID_FAMILY_IS_K8MOBILE(uint32 eax)
{
/* Essentially a K8 (not K8L) part, but with mobile features. */
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K8MOBILE;
}
static INLINE Bool
CPUID_FAMILY_IS_K8STAR(uint32 eax)
{
/*
* Read function name as "K8*", as in wildcard.
* Matches K8 or K8L or K8MOBILE
*/
return CPUID_FAMILY_IS_K8(eax) || CPUID_FAMILY_IS_K8L(eax) ||
CPUID_FAMILY_IS_K8MOBILE(eax);
}
static INLINE Bool
CPUID_FAMILY_IS_BOBCAT(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BOBCAT;
}
static INLINE Bool
CPUID_FAMILY_IS_BULLDOZER(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BULLDOZER;
}
static INLINE Bool
CPUID_FAMILY_IS_KYOTO(uint32 eax)
{
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_KYOTO;
}
/*
* AMD Barcelona (of either Opteron or Phenom kind).
*/
static INLINE Bool
CPUID_MODEL_IS_BARCELONA(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is AMD. */
return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L &&
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_BARCELONA_02;
}
static INLINE Bool
CPUID_MODEL_IS_SHANGHAI(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is AMD. */
return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L &&
(CPUID_MODEL_SHANGHAI_04 <= CPUID_EFFECTIVE_MODEL(v) &&
CPUID_EFFECTIVE_MODEL(v) <= CPUID_MODEL_SHANGHAI_06);
}
static INLINE Bool
CPUID_MODEL_IS_ISTANBUL_MAGNY(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is AMD. */
return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L &&
(CPUID_MODEL_ISTANBUL_MAGNY_08 <= CPUID_EFFECTIVE_MODEL(v) &&
CPUID_EFFECTIVE_MODEL(v) <= CPUID_MODEL_ISTANBUL_MAGNY_09);
}
static INLINE Bool
CPUID_MODEL_IS_PHAROAH_HOUND(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is AMD. */
return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L &&
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PHAROAH_HOUND_0A;
}
static INLINE Bool
CPUID_MODEL_IS_BULLDOZER(uint32 eax)
{
/*
* Bulldozer is models of family 0x15 that are below 10 excluding
* Piledriver 02.
*/
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BULLDOZER &&
CPUID_EFFECTIVE_MODEL(eax) < CPUID_MODEL_PILEDRIVER_10 &&
CPUID_EFFECTIVE_MODEL(eax) != CPUID_MODEL_PILEDRIVER_02;
}
static INLINE Bool
CPUID_MODEL_IS_PILEDRIVER(uint32 eax)
{
/* Piledriver is models 0x02 & 0x10 of family 0x15 (so far). */
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BULLDOZER &&
((CPUID_EFFECTIVE_MODEL(eax) >= CPUID_MODEL_PILEDRIVER_10 &&
CPUID_EFFECTIVE_MODEL(eax) <= CPUID_MODEL_PILEDRIVER_1F) ||
CPUID_EFFECTIVE_MODEL(eax) == CPUID_MODEL_PILEDRIVER_02);
}
static INLINE Bool
CPUID_MODEL_IS_KYOTO(uint32 eax)
{
/* Kyoto is models 0x00 of family 0x16 (so far). */
return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_KYOTO &&
CPUID_EFFECTIVE_MODEL(eax) == CPUID_MODEL_KYOTO_00;
}
#define CPUID_TYPE_PRIMARY 0
#define CPUID_TYPE_OVERDRIVE 1
#define CPUID_TYPE_SECONDARY 2
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_NULL 0
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_DATA 1
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_INST 2
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_UNIF 3
#define CPUID_LEAF4_CACHE_TYPE_NULL 0
#define CPUID_LEAF4_CACHE_TYPE_DATA 1
#define CPUID_LEAF4_CACHE_TYPE_INST 2
#define CPUID_LEAF4_CACHE_TYPE_UNIF 3
#define CPUID_LEAF4_CACHE_INDEXING_DIRECT 0
#define CPUID_LEAF4_CACHE_INDEXING_COMPLEX 1
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_SELF_INIT 0x00000100
#define CPUID_INTEL_ID4EAX_LEAF4_CACHE_FULLY_ASSOC 0x00000200
#define CPUID_LEAF4_CACHE_SELF_INIT 0x00000100
#define CPUID_LEAF4_CACHE_FULLY_ASSOC 0x00000200
#define CPUID_INTEL_IDBECX_LEVEL_TYPE_INVALID 0
#define CPUID_INTEL_IDBECX_LEVEL_TYPE_SMT 1
#define CPUID_INTEL_IDBECX_LEVEL_TYPE_CORE 2
#define CPUID_TOPOLOGY_LEVEL_TYPE_INVALID 0
#define CPUID_TOPOLOGY_LEVEL_TYPE_SMT 1
#define CPUID_TOPOLOGY_LEVEL_TYPE_CORE 2
/*
* For certain AMD processors, an lfence instruction is necessary at various
* places to ensure ordering.
*/
static INLINE Bool
CPUID_VendorRequiresFence(CpuidVendor vendor)
{
return vendor == CPUID_VENDOR_AMD;
}
static INLINE Bool
CPUID_VersionRequiresFence(uint32 version)
{
return CPUID_EFFECTIVE_FAMILY(version) == CPUID_FAMILY_K8 &&
CPUID_EFFECTIVE_MODEL(version) < 0x40;
}
static INLINE Bool
CPUID_ID0RequiresFence(CPUIDRegs *id0)
{
if (id0->eax == 0) {
return FALSE;
}
return CPUID_IsVendorAMD(id0);
}
static INLINE Bool
CPUID_ID1RequiresFence(CPUIDRegs *id1)
{
return CPUID_VersionRequiresFence(id1->eax);
}
static INLINE Bool
CPUID_RequiresFence(CpuidVendor vendor, // IN
uint32 version) // IN: %eax from CPUID with %eax=1.
{
return CPUID_VendorRequiresFence(vendor) &&
CPUID_VersionRequiresFence(version);
}
/*
* The following low-level functions compute the number of
* cores per cpu. They should be used cautiously because
* they do not necessarily work on all types of CPUs.
* High-level functions that are correct for all CPUs are
* available elsewhere: see lib/cpuidInfo/cpuidInfo.c.
*/
static INLINE uint32
CPUID_IntelCoresPerPackage(uint32 v) /* %eax from CPUID with %eax=4 and %ecx=0. */
{
// Note: This is not guaranteed to work on older Intel CPUs.
return 1 + CPUID_GET(4, EAX, LEAF4_CORE_COUNT, v);
}
static INLINE uint32
CPUID_AMDCoresPerPackage(uint32 v) /* %ecx from CPUID with %eax=0x80000008. */
{
// Note: This is not guaranteed to work on older AMD CPUs.
return 1 + CPUID_GET(0x80000008, ECX, LEAF88_CORE_COUNT, v);
}
/*
* Hypervisor CPUID space is 0x400000XX.
*/
static INLINE Bool
CPUID_IsHypervisorLevel(uint32 level)
{
return (level & 0xffffff00) == 0x40000000;
}
/*
*----------------------------------------------------------------------
*
* CPUID_LevelUsesEcx --
*
* Returns TRUE for leaves that support input ECX != 0 (subleaves).
*
*----------------------------------------------------------------------
*/
static INLINE Bool
CPUID_LevelUsesEcx(uint32 level) {
return level == 4 || level == 7 || level == 0xb || level == 0xd ||
level == 0x8000001d;
}
/*
*----------------------------------------------------------------------
*
* CPUID_IsValid*Subleaf --
*
* Functions to determine the last subleaf for the level specified
*
*----------------------------------------------------------------------
*/
static INLINE Bool
CPUID_IsValidBSubleaf(uint32 ebx) // IN: %ebx = cpuid.b.sublevel.ebx
{
return ebx != 0;
}
static INLINE Bool
CPUID_IsValid4Subleaf(uint32 eax) // IN: %eax = cpuid.4.sublevel.eax
{
return eax != 0;
}
static INLINE Bool
CPUID_IsValid7Subleaf(uint32 eax, uint32 subleaf) // IN: %eax = cpuid.7.0.eax
{
/*
* cpuid.7.0.eax is the max ecx (subleaf) index
*/
return subleaf <= eax;
}
/*
*----------------------------------------------------------------------
*
* CPUID_IsValidDSubleaf --
*
* It is the caller's repsonsibility to determine if the processor
* supports XSAVE and therefore has D sub-leaves.
*
*----------------------------------------------------------------------
*/
static INLINE Bool
CPUID_IsValidDSubleaf(uint32 subleaf) // IN: subleaf to check
{
return subleaf <= 63;
}
/*
*----------------------------------------------------------------------
*
* CPUID_SupportsMsrPlatformInfo --
*
* Uses vendor and cpuid.1.0.eax to determine if the processor
* supports MSR_PLATFORM_INFO.
*
*----------------------------------------------------------------------
*/
static INLINE Bool
CPUID_SupportsMsrPlatformInfo(CpuidVendor vendor, uint32 version)
{
return vendor == CPUID_VENDOR_INTEL &&
(CPUID_UARCH_IS_NEHALEM(version) ||
CPUID_UARCH_IS_HASWELL(version) ||
CPUID_MODEL_IS_AVOTON(version) ||
CPUID_UARCH_IS_SANDYBRIDGE(version));
}
#endif
vmci-only/shared/compat_interrupt.h 0000444 0000000 0000000 00000003573 12522066073 016520 0 ustar root root /*********************************************************
* Copyright (C) 2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_INTERRUPT_H__
# define __COMPAT_INTERRUPT_H__
#include
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 69)
/*
* We cannot just define irqreturn_t, as some 2.4.x kernels have
* typedef void irqreturn_t; for "increasing" backward compatibility.
*/
typedef void compat_irqreturn_t;
#define COMPAT_IRQ_NONE
#define COMPAT_IRQ_HANDLED
#define COMPAT_IRQ_RETVAL(x)
#else
typedef irqreturn_t compat_irqreturn_t;
#define COMPAT_IRQ_NONE IRQ_NONE
#define COMPAT_IRQ_HANDLED IRQ_HANDLED
#define COMPAT_IRQ_RETVAL(x) IRQ_RETVAL(x)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
#define COMPAT_IRQF_DISABLED SA_INTERRUPT
#define COMPAT_IRQF_SHARED SA_SHIRQ
#else
#define COMPAT_IRQF_DISABLED IRQF_DISABLED
#define COMPAT_IRQF_SHARED IRQF_SHARED
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
#define COMPAT_IRQ_HANDLER_ARGS(irq, devp) (int irq, void *devp, struct pt_regs *regs)
#else
#define COMPAT_IRQ_HANDLER_ARGS(irq, devp) (int irq, void *devp)
#endif
#endif /* __COMPAT_INTERRUPT_H__ */
vmci-only/shared/vmci_call_defs.h 0000444 0000000 0000000 00000024063 12522066074 016051 0 ustar root root /*********************************************************
* Copyright (C) 2006-2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef _VMCI_CALL_DEFS_H_
#define _VMCI_CALL_DEFS_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMKMOD
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#include "includeCheck.h"
#include "vm_basic_types.h"
#include "vmci_defs.h"
/*
* All structs here are an integral size of their largest member, ie. a struct
* with at least one 8-byte member will have a size that is an integral of 8.
* A struct which has a largest member of size 4 will have a size that is an
* integral of 4. This is because Windows CL enforces this rule. 32 bit gcc
* doesn't e.g. 32 bit gcc can misalign an 8 byte member if it is preceeded by
* a 4 byte member.
*/
/*
* Base struct for vmci datagrams.
*/
typedef struct VMCIDatagram {
VMCIHandle dst;
VMCIHandle src;
uint64 payloadSize;
} VMCIDatagram;
/*
* Second flag is for creating a well-known handle instead of a per context
* handle. Next flag is for deferring datagram delivery, so that the
* datagram callback is invoked in a delayed context (not interrupt context).
*/
#define VMCI_FLAG_DG_NONE 0
#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
#define VMCI_FLAG_ANYCID_DG_HND 0x2
#define VMCI_FLAG_DG_DELAYED_CB 0x4
/* Event callback should fire in a delayed context (not interrupt context.) */
#define VMCI_FLAG_EVENT_NONE 0
#define VMCI_FLAG_EVENT_DELAYED_CB 0x1
/*
* Maximum supported size of a VMCI datagram for routable datagrams.
* Datagrams going to the hypervisor are allowed to be larger.
*/
#define VMCI_MAX_DG_SIZE (17 * 4096)
#define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - sizeof(VMCIDatagram))
#define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + sizeof(VMCIDatagram))
#define VMCI_DG_HEADERSIZE sizeof(VMCIDatagram)
#define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payloadSize)
#define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (size_t)CONST64U(0xfffffffffffffff8))
#define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
/*
* We allow at least 1024 more event datagrams from the hypervisor past the
* normally allowed datagrams pending for a given context. We define this
* limit on event datagrams from the hypervisor to guard against DoS attack
* from a malicious VM which could repeatedly attach to and detach from a queue
* pair, causing events to be queued at the destination VM. However, the rate
* at which such events can be generated is small since it requires a VM exit
* and handling of queue pair attach/detach call at the hypervisor. Event
* datagrams may be queued up at the destination VM if it has interrupts
* disabled or if it is not draining events for some other reason. 1024
* datagrams is a grossly conservative estimate of the time for which
* interrupts may be disabled in the destination VM, but at the same time does
* not exacerbate the memory pressure problem on the host by much (size of each
* event datagram is small).
*/
#define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
(VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
1024 * (sizeof(VMCIDatagram) + sizeof(VMCIEventData_Max)))
/*
* Struct for sending VMCI_DATAGRAM_REQUEST_MAP and
* VMCI_DATAGRAM_REMOVE_MAP datagrams. Struct size is 32 bytes. All
* fields in struct are aligned to their natural alignment. These
* datagrams are obsoleted by the removal of VM to VM communication.
*/
typedef struct VMCIDatagramWellKnownMapMsg {
VMCIDatagram hdr;
VMCIId wellKnownID;
uint32 _pad;
} VMCIDatagramWellKnownMapMsg;
/*
* Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
* hypervisor resources.
* Struct size is 16 bytes. All fields in struct are aligned to their natural
* alignment.
*/
typedef struct VMCIResourcesQueryHdr {
VMCIDatagram hdr;
uint32 numResources;
uint32 _padding;
} VMCIResourcesQueryHdr;
/*
* Convenience struct for negotiating vectors. Must match layout of
* VMCIResourceQueryHdr minus the VMCIDatagram header.
*/
typedef struct VMCIResourcesQueryMsg {
uint32 numResources;
uint32 _padding;
VMCI_Resource resources[1];
} VMCIResourcesQueryMsg;
/*
* The maximum number of resources that can be queried using
* VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
* bits of a positive return value. Negative values are reserved for
* errors.
*/
#define VMCI_RESOURCE_QUERY_MAX_NUM 31
/* Maximum size for the VMCI_RESOURCE_QUERY request. */
#define VMCI_RESOURCE_QUERY_MAX_SIZE sizeof(VMCIResourcesQueryHdr) \
+ VMCI_RESOURCE_QUERY_MAX_NUM * sizeof(VMCI_Resource)
/*
* Struct used for setting the notification bitmap. All fields in
* struct are aligned to their natural alignment.
*/
typedef struct VMCINotifyBitmapSetMsg {
VMCIDatagram hdr;
PPN bitmapPPN;
uint32 _pad;
} VMCINotifyBitmapSetMsg;
/*
* Struct used for linking a doorbell handle with an index in the
* notify bitmap. All fields in struct are aligned to their natural
* alignment.
*/
typedef struct VMCIDoorbellLinkMsg {
VMCIDatagram hdr;
VMCIHandle handle;
uint64 notifyIdx;
} VMCIDoorbellLinkMsg;
/*
* Struct used for unlinking a doorbell handle from an index in the
* notify bitmap. All fields in struct are aligned to their natural
* alignment.
*/
typedef struct VMCIDoorbellUnlinkMsg {
VMCIDatagram hdr;
VMCIHandle handle;
} VMCIDoorbellUnlinkMsg;
/*
* Struct used for generating a notification on a doorbell handle. All
* fields in struct are aligned to their natural alignment.
*/
typedef struct VMCIDoorbellNotifyMsg {
VMCIDatagram hdr;
VMCIHandle handle;
} VMCIDoorbellNotifyMsg;
/*
* This struct is used to contain data for events. Size of this struct is a
* multiple of 8 bytes, and all fields are aligned to their natural alignment.
*/
typedef struct VMCI_EventData {
VMCI_Event event; /* 4 bytes. */
uint32 _pad;
/*
* Event payload is put here.
*/
} VMCI_EventData;
/* Callback needed for correctly waiting on events. */
typedef int
(*VMCIDatagramRecvCB)(void *clientData, // IN: client data for handler
VMCIDatagram *msg); // IN:
/*
* We use the following inline function to access the payload data associated
* with an event data.
*/
static INLINE void *
VMCIEventDataPayload(VMCI_EventData *evData) // IN:
{
return (void *)((char *)evData + sizeof *evData);
}
/*
* Define the different VMCI_EVENT payload data types here. All structs must
* be a multiple of 8 bytes, and fields must be aligned to their natural
* alignment.
*/
typedef struct VMCIEventPayload_Context {
VMCIId contextID; /* 4 bytes. */
uint32 _pad;
} VMCIEventPayload_Context;
typedef struct VMCIEventPayload_QP {
VMCIHandle handle; /* QueuePair handle. */
VMCIId peerId; /* Context id of attaching/detaching VM. */
uint32 _pad;
} VMCIEventPayload_QP;
/*
* We define the following struct to get the size of the maximum event data
* the hypervisor may send to the guest. If adding a new event payload type
* above, add it to the following struct too (inside the union).
*/
typedef struct VMCIEventData_Max {
VMCI_EventData eventData;
union {
VMCIEventPayload_Context contextPayload;
VMCIEventPayload_QP qpPayload;
} evDataPayload;
} VMCIEventData_Max;
/*
* Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and VMCI_EVENT_HANDLER
* messages. Struct size is 32 bytes. All fields in struct are aligned to
* their natural alignment.
*/
typedef struct VMCIEventMsg {
VMCIDatagram hdr;
VMCI_EventData eventData; /* Has event type and payload. */
/*
* Payload gets put here.
*/
} VMCIEventMsg;
/*
* We use the following inline function to access the payload data associated
* with an event message.
*/
static INLINE void *
VMCIEventMsgPayload(VMCIEventMsg *eMsg) // IN:
{
return VMCIEventDataPayload(&eMsg->eventData);
}
/* Flags for VMCI QueuePair API. */
#define VMCI_QPFLAG_ATTACH_ONLY 0x1 /* Fail alloc if QP not created by peer. */
#define VMCI_QPFLAG_LOCAL 0x2 /* Only allow attaches from local context. */
#define VMCI_QPFLAG_NONBLOCK 0x4 /* Host won't block when guest is quiesced. */
#define VMCI_QPFLAG_PINNED 0x8 /* Keep all data pages pinned. This flag */
/* must be combined with NONBLOCK. */
/* For asymmetric queuepairs, update as new flags are added. */
#define VMCI_QP_ASYMM (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)
#define VMCI_QP_ASYMM_PEER (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM)
/* Update the following (bitwise OR flags) while adding new flags. */
#define VMCI_QP_ALL_FLAGS (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL | \
VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)
/*
* Structs used for QueuePair alloc and detach messages. We align fields of
* these structs to 64bit boundaries.
*/
typedef struct VMCIQueuePairAllocMsg {
VMCIDatagram hdr;
VMCIHandle handle;
VMCIId peer; /* 32bit field. */
uint32 flags;
uint64 produceSize;
uint64 consumeSize;
uint64 numPPNs;
/* List of PPNs placed here. */
} VMCIQueuePairAllocMsg;
typedef struct VMCIQueuePairDetachMsg {
VMCIDatagram hdr;
VMCIHandle handle;
} VMCIQueuePairDetachMsg;
#endif
vmci-only/shared/community_source.h 0000444 0000000 0000000 00000003742 12522066074 016524 0 ustar root root /*********************************************************
* Copyright (C) 2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* community_source.h --
*
* Macros for excluding source code from community.
*/
#ifndef _COMMUNITY_SOURCE_H_
#define _COMMUNITY_SOURCE_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
/*
* Convenience macro for COMMUNITY_SOURCE
*/
#undef EXCLUDE_COMMUNITY_SOURCE
#ifdef COMMUNITY_SOURCE
#define EXCLUDE_COMMUNITY_SOURCE(x)
#else
#define EXCLUDE_COMMUNITY_SOURCE(x) x
#endif
#undef COMMUNITY_SOURCE_AMD_SECRET
#if !defined(COMMUNITY_SOURCE) || defined(AMD_SOURCE)
/*
* It's ok to include AMD_SECRET source code for non-Community Source,
* or for drops directed at AMD.
*/
#define COMMUNITY_SOURCE_AMD_SECRET
#endif
#undef COMMUNITY_SOURCE_INTEL_SECRET
#if !defined(COMMUNITY_SOURCE) || defined(INTEL_SOURCE)
/*
* It's ok to include INTEL_SECRET source code for non-Community Source,
* or for drops directed at Intel.
*/
#define COMMUNITY_SOURCE_INTEL_SECRET
#endif
#endif
vmci-only/shared/compat_spinlock.h 0000444 0000000 0000000 00000003377 12522066073 016310 0 ustar root root /*********************************************************
* Copyright (C) 2005 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SPINLOCK_H__
# define __COMPAT_SPINLOCK_H__
#include
/*
* Preempt support was added during 2.5.x development cycle, and later
* it was backported to 2.4.x. In 2.4.x backport these definitions
* live in linux/spinlock.h, that's why we put them here (in 2.6.x they
* are defined in linux/preempt.h which is included by linux/spinlock.h).
*/
#ifdef CONFIG_PREEMPT
#define compat_preempt_disable() preempt_disable()
#define compat_preempt_enable() preempt_enable()
#else
#define compat_preempt_disable() do { } while (0)
#define compat_preempt_enable() do { } while (0)
#endif
/* Some older kernels - 2.6.10 and earlier - lack DEFINE_SPINLOCK */
#ifndef DEFINE_SPINLOCK
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
#endif
/* Same goes for DEFINE_RWLOCK */
#ifndef DEFINE_RWLOCK
#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
#endif
#endif /* __COMPAT_SPINLOCK_H__ */
vmci-only/shared/driver-config.h 0000444 0000000 0000000 00000004314 12522066073 015651 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* Sets the proper defines from the Linux header files
*
* This file must be included before the inclusion of any kernel header file,
* with the exception of linux/autoconf.h and linux/version.h --hpreg
*/
#ifndef __VMX_CONFIG_H__
#define __VMX_CONFIG_H__
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMKDRIVERS
#include "includeCheck.h"
#include "compat_version.h"
#include "compat_autoconf.h"
/*
* We rely on Kernel Module support. Check here.
*/
#ifndef CONFIG_MODULES
# error "No Module support in this kernel. Please configure with CONFIG_MODULES"
#endif
/*
* 2.2 kernels still use __SMP__ (derived from CONFIG_SMP
* in the main Makefile), so we do it here.
*/
#ifdef CONFIG_SMP
# define __SMP__ 1
#endif
#if defined(CONFIG_MODVERSIONS) && defined(KERNEL_2_1)
# if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,60)
/*
* MODVERSIONS might be already defined when using kernel's Makefiles.
*/
# ifndef MODVERSIONS
# define MODVERSIONS
# endif
# include
# endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
/*
* Force the uintptr_t definition to come from linux/types.h instead of vm_basic_types.h.
*/
# include
# define _STDINT_H 1
#endif
#ifndef __KERNEL__
# define __KERNEL__
#endif
#endif
vmci-only/shared/compat_scsi.h 0000444 0000000 0000000 00000003024 12522066073 015414 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SCSI_H__
# define __COMPAT_SCSI_H__
/* The scsi_bufflen() API appeared somewhere in time --hpreg */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
# define scsi_bufflen(cmd) ((cmd)->request_bufflen)
# define scsi_sg_count(cmd) ((cmd)->use_sg)
# define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer)
# define scsi_set_resid(cmd, _resid) ((cmd)->resid = _resid)
#endif
/*
* Using scsi_sglist to access the request buffer looks strange
* so instead we define this macro. What happened is later kernel
* put all SCSI data in sglists, since it simplifies passing buffers
*/
#define scsi_request_buffer(cmd) scsi_sglist(cmd)
#endif /* __COMPAT_SCSI_H__ */
vmci-only/shared/backdoor_types.h 0000444 0000000 0000000 00000006727 12522066074 016136 0 ustar root root /*********************************************************
* Copyright (C) 1999 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* backdoor_types.h --
*
* Type definitions for backdoor interaction code.
*/
#ifndef _BACKDOOR_TYPES_H_
#define _BACKDOOR_TYPES_H_
#ifndef VM_I386
#error The backdoor protocol is only supported on x86 architectures.
#endif
/*
* These #defines are intended for defining register structs as part of
* existing named unions. If the union should encapsulate the register
* (and nothing else), use DECLARE_REG_NAMED_STRUCT defined below.
*/
#define DECLARE_REG32_STRUCT \
struct { \
uint16 low; \
uint16 high; \
} halfs; \
uint32 word
#define DECLARE_REG64_STRUCT \
DECLARE_REG32_STRUCT; \
struct { \
uint32 low; \
uint32 high; \
} words; \
uint64 quad
#ifndef VM_X86_64
#define DECLARE_REG_STRUCT DECLARE_REG32_STRUCT
#else
#define DECLARE_REG_STRUCT DECLARE_REG64_STRUCT
#endif
#define DECLARE_REG_NAMED_STRUCT(_r) \
union { DECLARE_REG_STRUCT; } _r
/*
* Some of the registers are expressed by semantic name, because if they were
* expressed as register structs declared above, we could only address them
* by fixed size (half-word, word, quad, etc.) instead of by varying size
* (size_t, uintptr_t).
*
* To be cleaner, these registers are expressed ONLY by semantic name,
* rather than by a union of the semantic name and a register struct.
*/
typedef union {
struct {
DECLARE_REG_NAMED_STRUCT(ax);
size_t size; /* Register bx. */
DECLARE_REG_NAMED_STRUCT(cx);
DECLARE_REG_NAMED_STRUCT(dx);
DECLARE_REG_NAMED_STRUCT(si);
DECLARE_REG_NAMED_STRUCT(di);
} in;
struct {
DECLARE_REG_NAMED_STRUCT(ax);
DECLARE_REG_NAMED_STRUCT(bx);
DECLARE_REG_NAMED_STRUCT(cx);
DECLARE_REG_NAMED_STRUCT(dx);
DECLARE_REG_NAMED_STRUCT(si);
DECLARE_REG_NAMED_STRUCT(di);
} out;
} Backdoor_proto;
typedef union {
struct {
DECLARE_REG_NAMED_STRUCT(ax);
DECLARE_REG_NAMED_STRUCT(bx);
size_t size; /* Register cx. */
DECLARE_REG_NAMED_STRUCT(dx);
uintptr_t srcAddr; /* Register si. */
uintptr_t dstAddr; /* Register di. */
DECLARE_REG_NAMED_STRUCT(bp);
} in;
struct {
DECLARE_REG_NAMED_STRUCT(ax);
DECLARE_REG_NAMED_STRUCT(bx);
DECLARE_REG_NAMED_STRUCT(cx);
DECLARE_REG_NAMED_STRUCT(dx);
DECLARE_REG_NAMED_STRUCT(si);
DECLARE_REG_NAMED_STRUCT(di);
DECLARE_REG_NAMED_STRUCT(bp);
} out;
} Backdoor_proto_hb;
MY_ASSERTS(BACKDOOR_STRUCT_SIZES,
ASSERT_ON_COMPILE(sizeof(Backdoor_proto) == 6 * sizeof(uintptr_t));
ASSERT_ON_COMPILE(sizeof(Backdoor_proto_hb) == 7 * sizeof(uintptr_t));
)
#undef DECLARE_REG_STRUCT
#endif /* _BACKDOOR_TYPES_H_ */
vmci-only/shared/vm_basic_asm_x86_64.h 0000444 0000000 0000000 00000035733 12522066074 016566 0 ustar root root /*********************************************************
* Copyright (C) 1998-2004 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_basic_asm_x86_64.h
*
* Basic x86_64 asm macros.
*/
#ifndef _VM_BASIC_ASM_X86_64_H_
#define _VM_BASIC_ASM_X86_64_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#include "includeCheck.h"
#ifndef VM_X86_64
#error "This file is x86-64 only!"
#endif
#ifdef _MSC_VER
#ifdef __cplusplus
extern "C" {
#endif
uint64 _umul128(uint64 multiplier, uint64 multiplicand,
uint64 *highProduct);
int64 _mul128(int64 multiplier, int64 multiplicand,
int64 *highProduct);
uint64 __shiftright128(uint64 lowPart, uint64 highPart, uint8 shift);
#ifdef __cplusplus
}
#endif
#pragma intrinsic(_umul128, _mul128, __shiftright128)
#endif // _MSC_VER
/*
* GET_CURRENT_RIP
*
* Return an approximation of the current instruction pointer. For example for a
* function call
* foo.c
* L123: Foo(GET_CURRENT_RIP())
*
* The return value from GET_CURRENT_RIP will point a debugger to L123.
*/
#if defined(__GNUC__)
#define GET_CURRENT_RIP() ({ \
void *__rip; \
asm("lea 0(%%rip), %0;\n\t" \
: "=r" (__rip)); \
__rip; \
})
#endif
/*
* FXSAVE/FXRSTOR
* save/restore SIMD/MMX fpu state
*
* The pointer passed in must be 16-byte aligned.
*
* Intel and AMD processors behave differently w.r.t. fxsave/fxrstor. Intel
* processors unconditionally save the exception pointer state (instruction
* ptr., data ptr., and error instruction opcode). FXSAVE_ES1 and FXRSTOR_ES1
* work correctly for Intel processors.
*
* AMD processors only save the exception pointer state if ES=1. This leads to a
* security hole whereby one process/VM can inspect the state of another process
* VM. The AMD recommended workaround involves clobbering the exception pointer
* state unconditionally, and this is implemented in FXRSTOR_AMD_ES0. Note that
* FXSAVE_ES1 will only save the exception pointer state for AMD processors if
* ES=1.
*
* The workaround (FXRSTOR_AMD_ES0) only costs 1 cycle more than just doing an
* fxrstor, on both AMD Opteron and Intel Core CPUs.
*/
#if defined(__GNUC__)
static INLINE void
FXSAVE_ES1(void *save)
{
__asm__ __volatile__ ("fxsaveq %0 \n" : "=m" (*(uint8 *)save) : : "memory");
}
static INLINE void
FXSAVE_COMPAT_ES1(void *save)
{
__asm__ __volatile__ ("fxsave %0 \n" : "=m" (*(uint8 *)save) : : "memory");
}
static INLINE void
FXRSTOR_ES1(const void *load)
{
__asm__ __volatile__ ("fxrstorq %0 \n"
: : "m" (*(const uint8 *)load) : "memory");
}
static INLINE void
FXRSTOR_COMPAT_ES1(const void *load)
{
__asm__ __volatile__ ("fxrstor %0 \n"
: : "m" (*(const uint8 *)load) : "memory");
}
static INLINE void
FXRSTOR_AMD_ES0(const void *load)
{
uint64 dummy = 0;
__asm__ __volatile__
("fnstsw %%ax \n" // Grab x87 ES bit
"bt $7,%%ax \n" // Test ES bit
"jnc 1f \n" // Jump if ES=0
"fnclex \n" // ES=1. Clear it so fild doesn't trap
"1: \n"
"ffree %%st(7) \n" // Clear tag bit - avoid poss. stack overflow
"fildl %0 \n" // Dummy Load from "safe address" changes all
// x87 exception pointers.
"fxrstorq %1 \n"
:
: "m" (dummy), "m" (*(const uint8 *)load)
: "ax", "memory");
}
#endif /* __GNUC__ */
/*
* XSAVE/XRSTOR
* save/restore GSSE/SIMD/MMX fpu state
*
* The pointer passed in must be 64-byte aligned.
* See above comment for more information.
*/
#if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS))
static INLINE void
XSAVE_ES1(void *save, uint64 mask)
{
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
__asm__ __volatile__ (
".byte 0x48, 0x0f, 0xae, 0x21 \n"
:
: "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#else
__asm__ __volatile__ (
"xsaveq %0 \n"
: "=m" (*(uint8 *)save)
: "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#endif
}
static INLINE void
XSAVE_COMPAT_ES1(void *save, uint64 mask)
{
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
__asm__ __volatile__ (
".byte 0x0f, 0xae, 0x21 \n"
:
: "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#else
__asm__ __volatile__ (
"xsave %0 \n"
: "=m" (*(uint8 *)save)
: "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#endif
}
static INLINE void
XSAVEOPT_ES1(void *save, uint64 mask)
{
__asm__ __volatile__ (
".byte 0x48, 0x0f, 0xae, 0x31 \n"
:
: "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
}
static INLINE void
XRSTOR_ES1(const void *load, uint64 mask)
{
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
__asm__ __volatile__ (
".byte 0x48, 0x0f, 0xae, 0x29 \n"
:
: "c" ((const uint8 *)load),
"a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#else
__asm__ __volatile__ (
"xrstorq %0 \n"
:
: "m" (*(const uint8 *)load),
"a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#endif
}
static INLINE void
XRSTOR_COMPAT_ES1(const void *load, uint64 mask)
{
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
__asm__ __volatile__ (
".byte 0x0f, 0xae, 0x29 \n"
:
: "c" ((const uint8 *)load),
"a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#else
__asm__ __volatile__ (
"xrstor %0 \n"
:
: "m" (*(const uint8 *)load),
"a" ((uint32)mask), "d" ((uint32)(mask >> 32))
: "memory");
#endif
}
static INLINE void
XRSTOR_AMD_ES0(const void *load, uint64 mask)
{
uint64 dummy = 0;
__asm__ __volatile__
("fnstsw %%ax \n" // Grab x87 ES bit
"bt $7,%%ax \n" // Test ES bit
"jnc 1f \n" // Jump if ES=0
"fnclex \n" // ES=1. Clear it so fild doesn't trap
"1: \n"
"ffree %%st(7) \n" // Clear tag bit - avoid poss. stack overflow
"fildl %0 \n" // Dummy Load from "safe address" changes all
// x87 exception pointers.
"mov %%ebx, %%eax \n"
#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1
".byte 0x48, 0x0f, 0xae, 0x29 \n"
:
: "m" (dummy), "c" ((const uint8 *)load),
"b" ((uint32)mask), "d" ((uint32)(mask >> 32))
#else
"xrstorq %1 \n"
:
: "m" (dummy), "m" (*(const uint8 *)load),
"b" ((uint32)mask), "d" ((uint32)(mask >> 32))
#endif
: "eax", "memory");
}
#endif /* __GNUC__ */
/*
* XTEST
* Return TRUE if processor is in transaction region.
*
*/
#if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS))
static INLINE Bool
xtest(void)
{
uint8 al;
__asm__ __volatile__(".byte 0x0f, 0x01, 0xd6 # xtest \n"
"setnz %%al\n"
: "=a"(al) : : "cc");
return al;
}
#endif /* __GNUC__ */
/*
*-----------------------------------------------------------------------------
*
* Mul64x6464 --
*
* Unsigned integer by fixed point multiplication:
* result = multiplicand * multiplier >> shift
*
* Unsigned 64-bit integer multiplicand.
* Unsigned 64-bit fixed point multiplier, represented as
* multiplier >> shift, where shift < 64.
* Unsigned 64-bit integer product.
*
* Implementation:
* Multiply 64x64 bits to yield a full 128-bit product.
* Shift result in RDX:RAX right by "shift".
* Return the low-order 64 bits of the above.
*
* Result:
* Product
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__)
static INLINE uint64
Mul64x6464(uint64 multiplicand,
uint64 multiplier,
uint32 shift)
{
uint64 result, dummy;
__asm__("mulq %3 \n\t"
"shrdq %b4, %1, %0 \n\t"
: "=a" (result),
"=d" (dummy)
: "0" (multiplier),
"rm" (multiplicand),
"c" (shift)
: "cc");
return result;
}
#elif defined(_MSC_VER)
static INLINE uint64
Mul64x6464(uint64 multiplicand, uint64 multiplier, uint32 shift)
{
uint64 tmplo, tmphi;
tmplo = _umul128(multiplicand, multiplier, &tmphi);
return __shiftright128(tmplo, tmphi, (uint8) shift);
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Muls64x64s64 --
*
* Signed integer by fixed point multiplication:
* result = multiplicand * multiplier >> shift
*
* Signed 64-bit integer multiplicand.
* Unsigned 64-bit fixed point multiplier, represented as
* multiplier >> shift, where shift < 64.
* Signed 64-bit integer product.
*
* Implementation:
* Multiply 64x64 bits to yield a full 128-bit product.
* Shift result in RDX:RAX right by "shift".
* Return the low-order 64 bits of the above.
*
* Note: using an unsigned shift instruction is correct because
* shift < 64 and we return only the low 64 bits of the shifted
* result.
*
* Result:
* Product
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__)
static inline int64
Muls64x64s64(int64 multiplicand, int64 multiplier, uint32 shift)
{
int64 result, dummy;
__asm__("imulq %3 \n\t"
"shrdq %b4, %1, %0 \n\t"
: "=a" (result),
"=d" (dummy)
: "0" (multiplier),
"rm" (multiplicand),
"c" (shift)
: "cc");
return result;
}
#elif defined(_MSC_VER)
static INLINE int64
Muls64x64s64(int64 multiplicand, int64 multiplier, uint32 shift)
{
int64 tmplo, tmphi;
tmplo = _mul128(multiplicand, multiplier, &tmphi);
return __shiftright128(tmplo, tmphi, (uint8) shift);
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Mul64x3264 --
*
* Unsigned integer by fixed point multiplication:
* result = multiplicand * multiplier >> shift
*
* Unsigned 64-bit integer multiplicand.
* Unsigned 32-bit fixed point multiplier, represented as
* multiplier >> shift, where shift < 64.
* Unsigned 64-bit integer product.
*
* Implementation:
* Multiply 64x64 bits to yield a full 128-bit product.
* Shift result in RDX:RAX right by "shift".
* Return the low-order 64 bits of the above.
*
* Result:
* Return the low-order 64 bits of ((multiplicand * multiplier) >> shift)
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
Mul64x3264(uint64 multiplicand, uint32 multiplier, uint32 shift)
{
return Mul64x6464(multiplicand, multiplier, shift);
}
/*
*-----------------------------------------------------------------------------
*
* Muls64x32s64 --
*
* Signed integer by fixed point multiplication:
* result = (multiplicand * multiplier) >> shift
*
* Signed 64-bit integer multiplicand.
* Unsigned 32-bit fixed point multiplier, represented as
* multiplier >> shift, where shift < 64.
* Signed 64-bit integer product.
*
* Implementation:
* Multiply 64x64 bits to yield a full 128-bit product.
* Shift result in RDX:RAX right by "shift".
* Return the low-order 64 bits of the above.
*
* Result:
* Return the low-order 64 bits of ((multiplicand * multiplier) >> shift)
*
*-----------------------------------------------------------------------------
*/
static INLINE int64
Muls64x32s64(int64 multiplicand, uint32 multiplier, uint32 shift)
{
return Muls64x64s64(multiplicand, multiplier, shift);
}
#if defined(__GNUC__)
static INLINE void *
uint64set(void *dst, uint64 val, uint64 count)
{
int dummy0;
int dummy1;
__asm__ __volatile__("\t"
"cld" "\n\t"
"rep ; stosq" "\n"
: "=c" (dummy0), "=D" (dummy1)
: "0" (count), "1" (dst), "a" (val)
: "memory", "cc");
return dst;
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Div643232 --
*
* Unsigned integer division:
* The dividend is 64-bit wide
* The divisor is 32-bit wide
* The quotient is 32-bit wide
*
* Use this function if you are certain that the quotient will fit in 32 bits,
* If that is not the case, a #DE exception was generated in 32-bit version,
* but not in this 64-bit version. So please be careful.
*
* Results:
* Quotient and remainder
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__) || defined(_MSC_VER)
static INLINE void
Div643232(uint64 dividend, // IN
uint32 divisor, // IN
uint32 *quotient, // OUT
uint32 *remainder) // OUT
{
*quotient = (uint32)(dividend / divisor);
*remainder = (uint32)(dividend % divisor);
}
#endif
/*
*-----------------------------------------------------------------------------
*
* Div643264 --
*
* Unsigned integer division:
* The dividend is 64-bit wide
* The divisor is 32-bit wide
* The quotient is 64-bit wide
*
* Results:
* Quotient and remainder
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
#if defined(__GNUC__)
static INLINE void
Div643264(uint64 dividend, // IN
uint32 divisor, // IN
uint64 *quotient, // OUT
uint32 *remainder) // OUT
{
*quotient = dividend / divisor;
*remainder = dividend % divisor;
}
#endif
#endif // _VM_BASIC_ASM_X86_64_H_
vmci-only/shared/vmci_infrastructure.h 0000444 0000000 0000000 00000007766 12522066074 017230 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmci_infrastructure.h --
*
* This file implements the VMCI infrastructure.
*/
#ifndef _VMCI_INFRASTRUCTURE_H_
#define _VMCI_INFRASTRUCTURE_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#include "includeCheck.h"
#include "vmware.h"
#include "vmci_defs.h"
typedef enum {
VMCIOBJ_VMX_VM = 10,
VMCIOBJ_CONTEXT,
VMCIOBJ_SOCKET,
VMCIOBJ_NOT_SET,
} VMCIObjType;
/* For storing VMCI structures in file handles. */
typedef struct VMCIObj {
void *ptr;
VMCIObjType type;
} VMCIObj;
/* Guestcalls currently support a maximum of 8 uint64 arguments. */
#define VMCI_GUESTCALL_MAX_ARGS_SIZE 64
/*
* Structure used for checkpointing the doorbell mappings. It is
* written to the checkpoint as is, so changing this structure will
* break checkpoint compatibility.
*/
typedef struct VMCIDoorbellCptState {
VMCIHandle handle;
uint64 bitmapIdx;
} VMCIDoorbellCptState;
/* Used to determine what checkpoint state to get and set. */
#define VMCI_NOTIFICATION_CPT_STATE 0x1
#define VMCI_WELLKNOWN_CPT_STATE 0x2
#define VMCI_DG_OUT_STATE 0x3
#define VMCI_DG_IN_STATE 0x4
#define VMCI_DG_IN_SIZE_STATE 0x5
#define VMCI_DOORBELL_CPT_STATE 0x6
/* Used to control the VMCI device in the vmkernel */
#define VMCI_DEV_RESET 0x01
#define VMCI_DEV_QP_RESET 0x02 // DEPRECATED
#define VMCI_DEV_QUIESCE 0x03
#define VMCI_DEV_UNQUIESCE 0x04
#define VMCI_DEV_QP_BREAK_SHARING 0x05 // DEPRECATED
#define VMCI_DEV_RESTORE_SYNC 0x06
#define VMCI_DEV_BMASTER_OFF 0x07
#define VMCI_DEV_BMASTER_ON 0x08
/*
*-------------------------------------------------------------------------
*
* VMCI_Hash --
*
* Hash function used by the Simple Datagram API. Based on the djb2
* hash function by Dan Bernstein.
*
* Result:
* Returns guest call size.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
static INLINE int
VMCI_Hash(VMCIHandle handle, // IN
unsigned size) // IN
{
unsigned i;
int hash = 5381;
const uint64 handleValue = QWORD(handle.resource, handle.context);
for (i = 0; i < sizeof handle; i++) {
hash = ((hash << 5) + hash) + (uint8)(handleValue >> (i * 8));
}
return hash & (size - 1);
}
/*
*-------------------------------------------------------------------------
*
* VMCI_HashId --
*
* Hash function used by the Simple Datagram API. Hashes only a VMCI id
* (not the full VMCI handle) Based on the djb2
* hash function by Dan Bernstein.
*
* Result:
* Returns guest call size.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
static INLINE int
VMCI_HashId(VMCIId id, // IN
unsigned size) // IN
{
unsigned i;
int hash = 5381;
for (i = 0; i < sizeof id; i++) {
hash = ((hash << 5) + hash) + (uint8)(id >> (i * 8));
}
return hash & (size - 1);
}
#endif // _VMCI_INFRASTRUCTURE_H_
vmci-only/shared/compat_skbuff.h 0000444 0000000 0000000 00000015575 12522066073 015751 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SKBUFF_H__
# define __COMPAT_SKBUFF_H__
#include
/*
* When transition from mac/nh/h to skb_* accessors was made, also SKB_WITH_OVERHEAD
* was introduced.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) || \
(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 21) && defined(SKB_WITH_OVERHEAD))
#define compat_skb_mac_header(skb) skb_mac_header(skb)
#define compat_skb_network_header(skb) skb_network_header(skb)
#define compat_skb_network_offset(skb) skb_network_offset(skb)
#define compat_skb_transport_header(skb) skb_transport_header(skb)
#define compat_skb_transport_offset(skb) skb_transport_offset(skb)
#define compat_skb_network_header_len(skb) skb_network_header_len(skb)
#define compat_skb_tail_pointer(skb) skb_tail_pointer(skb)
#define compat_skb_end_pointer(skb) skb_end_pointer(skb)
#define compat_skb_ip_header(skb) ((struct iphdr *)skb_network_header(skb))
#define compat_skb_ipv6_header(skb) ((struct ipv6hdr *)skb_network_header(skb))
#define compat_skb_tcp_header(skb) ((struct tcphdr *)skb_transport_header(skb))
#define compat_skb_reset_mac_header(skb) skb_reset_mac_header(skb)
#define compat_skb_reset_network_header(skb) skb_reset_network_header(skb)
#define compat_skb_reset_transport_header(skb) skb_reset_transport_header(skb)
#define compat_skb_set_network_header(skb, off) skb_set_network_header(skb, off)
#define compat_skb_set_transport_header(skb, off) skb_set_transport_header(skb, off)
#else
#define compat_skb_mac_header(skb) (skb)->mac.raw
#define compat_skb_network_header(skb) (skb)->nh.raw
#define compat_skb_network_offset(skb) ((skb)->nh.raw - (skb)->data)
#define compat_skb_transport_header(skb) (skb)->h.raw
#define compat_skb_transport_offset(skb) ((skb)->h.raw - (skb)->data)
#define compat_skb_network_header_len(skb) ((skb)->h.raw - (skb)->nh.raw)
#define compat_skb_tail_pointer(skb) (skb)->tail
#define compat_skb_end_pointer(skb) (skb)->end
#define compat_skb_ip_header(skb) (skb)->nh.iph
#define compat_skb_ipv6_header(skb) (skb)->nh.ipv6h
#define compat_skb_tcp_header(skb) (skb)->h.th
#define compat_skb_reset_mac_header(skb) ((skb)->mac.raw = (skb)->data)
#define compat_skb_reset_network_header(skb) ((skb)->nh.raw = (skb)->data)
#define compat_skb_reset_transport_header(skb) ((skb)->h.raw = (skb)->data)
#define compat_skb_set_network_header(skb, off) ((skb)->nh.raw = (skb)->data + (off))
#define compat_skb_set_transport_header(skb, off) ((skb)->h.raw = (skb)->data + (off))
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18) || defined(VMW_SKB_LINEARIZE_2618)
# define compat_skb_linearize(skb) skb_linearize((skb))
#else
# if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 0)
# define compat_skb_linearize(skb) __skb_linearize((skb), GFP_ATOMIC)
# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4)
# define compat_skb_linearize(skb) skb_linearize((skb), GFP_ATOMIC)
# else
static inline int
compat_skb_linearize(struct sk_buff *skb)
{
return 0;
}
# endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
#define compat_skb_csum_offset(skb) (skb)->csum_offset
#else
#define compat_skb_csum_offset(skb) (skb)->csum
#endif
/*
* Note that compat_skb_csum_start() has semantic different from kernel's csum_start:
* kernel's skb->csum_start is offset between start of checksummed area and start of
* complete skb buffer, while our compat_skb_csum_start(skb) is offset from start
* of packet itself.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
#define compat_skb_csum_start(skb) ((skb)->csum_start - skb_headroom(skb))
#else
#define compat_skb_csum_start(skb) compat_skb_transport_offset(skb)
#endif
#if defined(NETIF_F_GSO) /* 2.6.18 and upwards */
#define compat_skb_mss(skb) (skb_shinfo(skb)->gso_size)
#else
#define compat_skb_mss(skb) (skb_shinfo(skb)->tso_size)
#endif
/* used by both received pkts and outgoing ones */
#define VM_CHECKSUM_UNNECESSARY CHECKSUM_UNNECESSARY
/* csum status of received pkts */
#if defined(CHECKSUM_COMPLETE)
# define VM_RX_CHECKSUM_PARTIAL CHECKSUM_COMPLETE
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && defined(CHECKSUM_HW)
# define VM_RX_CHECKSUM_PARTIAL CHECKSUM_HW
#else
# define VM_RX_CHECKSUM_PARTIAL CHECKSUM_PARTIAL
#endif
/* csum status of outgoing pkts */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && defined(CHECKSUM_HW)
# define VM_TX_CHECKSUM_PARTIAL CHECKSUM_HW
#else
# define VM_TX_CHECKSUM_PARTIAL CHECKSUM_PARTIAL
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0))
# define compat_kfree_skb(skb, type) kfree_skb(skb, type)
# define compat_dev_kfree_skb(skb, type) dev_kfree_skb(skb, type)
# define compat_dev_kfree_skb_any(skb, type) dev_kfree_skb(skb, type)
# define compat_dev_kfree_skb_irq(skb, type) dev_kfree_skb(skb, type)
#else
# define compat_kfree_skb(skb, type) kfree_skb(skb)
# define compat_dev_kfree_skb(skb, type) dev_kfree_skb(skb)
# if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,43))
# define compat_dev_kfree_skb_any(skb, type) dev_kfree_skb(skb)
# define compat_dev_kfree_skb_irq(skb, type) dev_kfree_skb(skb)
# else
# define compat_dev_kfree_skb_any(skb, type) dev_kfree_skb_any(skb)
# define compat_dev_kfree_skb_irq(skb, type) dev_kfree_skb_irq(skb)
# endif
#endif
#ifndef NET_IP_ALIGN
# define COMPAT_NET_IP_ALIGN 2
#else
# define COMPAT_NET_IP_ALIGN NET_IP_ALIGN
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4)
# define compat_skb_headlen(skb) skb_headlen(skb)
# define compat_pskb_may_pull(skb, len) pskb_may_pull(skb, len)
# define compat_skb_is_nonlinear(skb) skb_is_nonlinear(skb)
#else
# define compat_skb_headlen(skb) (skb)->len
# define compat_pskb_may_pull(skb, len) 1
# define compat_skb_is_nonlinear(skb) 0
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12)
# define compat_skb_header_cloned(skb) skb_header_cloned(skb)
#else
# define compat_skb_header_cloned(skb) 0
#endif
#endif /* __COMPAT_SKBUFF_H__ */
vmci-only/shared/compat_netdevice.h 0000444 0000000 0000000 00000023744 12522066073 016434 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_NETDEVICE_H__
# define __COMPAT_NETDEVICE_H__
#include
#include
#include
#include
#include
/*
* The enet_statistics structure moved from linux/if_ether.h to
* linux/netdevice.h and is renamed net_device_stats in 2.1.25 --hpreg
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 1, 25)
# include
# define net_device_stats enet_statistics
#endif
/* The netif_rx_ni() API appeared in 2.4.8 --hpreg */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8)
# define netif_rx_ni netif_rx
#endif
/* The device struct was renamed net_device in 2.3.14 --hpreg */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14)
# define net_device device
#endif
/*
* SET_MODULE_OWNER appeared sometime during 2.3.x. It was setting
* dev->owner = THIS_MODULE until 2.5.70, where netdevice refcounting
* was completely changed. SET_MODULE_OWNER was nop for whole
* 2.6.x series, and finally disappeared in 2.6.24.
*
* MOD_xxx_USE_COUNT wrappers are here, as they must be mutually
* exclusive with SET_MODULE_OWNER call.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
# define COMPAT_SET_MODULE_OWNER(dev) do {} while (0)
# define COMPAT_NETDEV_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
# define COMPAT_NETDEV_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
#else
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
# define COMPAT_SET_MODULE_OWNER(dev) SET_MODULE_OWNER(dev)
# else
# define COMPAT_SET_MODULE_OWNER(dev) do {} while (0)
# endif
# define COMPAT_NETDEV_MOD_INC_USE_COUNT do {} while (0)
# define COMPAT_NETDEV_MOD_DEC_USE_COUNT do {} while (0)
#endif
/*
* SET_NETDEV_DEV appeared sometime during 2.5.x, and later was
* crossported to various 2.4.x kernels (as dummy macro).
*/
#ifdef SET_NETDEV_DEV
# define COMPAT_SET_NETDEV_DEV(dev, pdev) SET_NETDEV_DEV(dev, pdev)
#else
# define COMPAT_SET_NETDEV_DEV(dev, pdev) do {} while (0)
#endif
/*
* Build alloc_etherdev API on the top of init_etherdev. For 2.0.x kernels
* we must provide dummy init method, otherwise register_netdev does
* nothing.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0)
int
vmware_dummy_init(struct net_device *dev)
{
return 0;
}
#endif
static inline struct net_device*
compat_alloc_etherdev(int priv_size)
{
struct net_device* dev;
int size = sizeof *dev + priv_size;
/*
* The name is dynamically allocated before 2.4.0, but
* is an embedded array in later kernels.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
size += sizeof("ethXXXXXXX");
#endif
dev = kmalloc(size, GFP_KERNEL);
if (dev) {
memset(dev, 0, size);
if (priv_size) {
dev->priv = dev + 1;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
dev->name = (char *)(dev + 1) + priv_size;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0)
dev->init = vmware_dummy_init;
#endif
if (init_etherdev(dev, 0) != dev) {
kfree(dev);
dev = NULL;
}
}
return dev;
}
#else
#define compat_alloc_etherdev(sz) alloc_etherdev(sz)
#endif
/*
* alloc_netdev and free_netdev are there since 2.4.23. Their use is mandatory
* since 2.6.24.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 23)
static inline struct net_device *
compat_alloc_netdev(int priv_size,
const char *mask,
void (*setup)(struct net_device *))
{
struct net_device *dev;
int netdev_size = sizeof *dev;
int alloc_size;
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
netdev_size += IFNAMSIZ;
# endif
alloc_size = netdev_size + priv_size;
dev = kmalloc(alloc_size, GFP_KERNEL);
if (dev) {
memset(dev, 0, alloc_size);
dev->priv = (char*)dev + netdev_size;
setup(dev);
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
dev->name = (char*)(dev + 1);
# endif
strcpy(dev->name, mask);
}
return dev;
}
# define compat_free_netdev(dev) kfree(dev)
#else
# define compat_alloc_netdev(size, mask, setup) alloc_netdev(size, mask, setup)
# define compat_free_netdev(dev) free_netdev(dev)
#endif
/* netdev_priv() appeared in 2.6.3 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 3)
# define compat_netdev_priv(netdev) (netdev)->priv
#else
# define compat_netdev_priv(netdev) netdev_priv(netdev)
#endif
/*
* In 3.1 merge window feature maros were removed from mainline,
* so let's add back ones we care about.
*/
#if !defined(HAVE_NET_DEVICE_OPS) && \
LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
# define HAVE_NET_DEVICE_OPS 1
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9)
# define COMPAT_NETDEV_TX_OK NETDEV_TX_OK
# define COMPAT_NETDEV_TX_BUSY NETDEV_TX_BUSY
#else
# define COMPAT_NETDEV_TX_OK 0
# define COMPAT_NETDEV_TX_BUSY 1
#endif
/* unregister_netdevice_notifier was not safe prior to 2.6.17 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) && \
!defined(ATOMIC_NOTIFIER_INIT)
/* pre 2.6.17 and not patched */
static inline int compat_unregister_netdevice_notifier(struct notifier_block *nb) {
int err;
rtnl_lock();
err = unregister_netdevice_notifier(nb);
rtnl_unlock();
return err;
}
#else
/* post 2.6.17 or patched */
#define compat_unregister_netdevice_notifier(_nb) \
unregister_netdevice_notifier(_nb);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) || defined(__VMKLNX__)
# define compat_netif_napi_add(dev, napi, poll, quota) \
netif_napi_add(dev, napi, poll, quota)
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) || \
defined VMW_NETIF_SINGLE_NAPI_PARM
# define compat_napi_complete(dev, napi) napi_complete(napi)
# define compat_napi_schedule(dev, napi) napi_schedule(napi)
# else
# define compat_napi_complete(dev, napi) netif_rx_complete(dev, napi)
# define compat_napi_schedule(dev, napi) netif_rx_schedule(dev, napi)
# endif
# define compat_napi_enable(dev, napi) napi_enable(napi)
# define compat_napi_disable(dev, napi) napi_disable(napi)
#else
# define compat_napi_complete(dev, napi) netif_rx_complete(dev)
# define compat_napi_schedule(dev, napi) netif_rx_schedule(dev)
# define compat_napi_enable(dev, napi) netif_poll_enable(dev)
# define compat_napi_disable(dev, napi) netif_poll_disable(dev)
/* RedHat ported GRO to 2.6.18 bringing new napi_struct with it */
# if defined NETIF_F_GRO
# define compat_netif_napi_add(netdev, napi, pollcb, quota) \
do { \
(netdev)->poll = (pollcb); \
(netdev)->weight = (quota);\
(napi)->dev = (netdev); \
} while (0)
# else
struct napi_struct {
int dummy;
};
# define compat_netif_napi_add(dev, napi, pollcb, quota) \
do { \
(dev)->poll = (pollcb); \
(dev)->weight = (quota);\
} while (0)
# endif
#endif
#ifdef NETIF_F_TSO6
# define COMPAT_NETIF_F_TSO (NETIF_F_TSO6 | NETIF_F_TSO)
#else
# define COMPAT_NETIF_F_TSO (NETIF_F_TSO)
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
# define compat_netif_tx_lock(dev) netif_tx_lock(dev)
# define compat_netif_tx_unlock(dev) netif_tx_unlock(dev)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
# define compat_netif_tx_lock(dev) spin_lock(&dev->xmit_lock)
# define compat_netif_tx_unlock(dev) spin_unlock(&dev->xmit_lock)
#else
/* Vendor backporting (SLES 10) has muddled the tx_lock situation. Pick whichever
* of the above works for you. */
# define compat_netif_tx_lock(dev) do {} while (0)
# define compat_netif_tx_unlock(dev) do {} while (0)
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
# define COMPAT_VLAN_GROUP_ARRAY_LEN VLAN_N_VID
# define compat_flush_scheduled_work(work) cancel_work_sync(work)
#else
# define COMPAT_VLAN_GROUP_ARRAY_LEN VLAN_GROUP_ARRAY_LEN
# define compat_flush_scheduled_work(work) flush_scheduled_work()
#endif
/*
* For kernel versions older than 2.6.29, where pci_msi_enabled is not
* available, check if
* 1. CONFIG_PCI_MSI is present
* 2. kernel version is newer than 2.6.25 (because multiqueue is not
* supporter) in kernels older than that)
* 3. msi can be enabled. If it fails it means that MSI is not available.
* When all the above are true, return non-zero so that multiple queues will be
* allowed in the driver.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
# define compat_multiqueue_allowed(dev) pci_msi_enabled()
#else
# if defined CONFIG_PCI_MSI && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
static inline int
compat_multiqueue_allowed(struct pci_dev *dev)
{
int ret;
if (!pci_enable_msi(dev))
ret = 1;
else
ret = 0;
pci_disable_msi(dev);
return ret;
}
# else
# define compat_multiqueue_allowed(dev) (0)
# endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
# define compat_vlan_get_protocol(skb) vlan_get_protocol(skb)
#else
# define compat_vlan_get_protocol(skb) (skb->protocol)
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
typedef netdev_features_t compat_netdev_features_t;
#else
typedef u32 compat_netdev_features_t;
#endif
#endif /* __COMPAT_NETDEVICE_H__ */
vmci-only/shared/autoconf/ 0000755 0000000 0000000 00000000000 12522066670 014563 5 ustar root root vmci-only/shared/autoconf/statfs1.c 0000444 0000000 0000000 00000002671 12522066073 016315 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
#include
/*
* Around 2.6.18, the super_block pointer in statfs was changed to a dentry
* pointer. Red Hat backported this behavior into a 2.6.17 kernel.
*
* This test will fail on a kernel with such a patch.
*/
static int LinuxDriverStatFs(struct super_block *sb,
struct kstatfs *stat)
{
return 0;
}
struct super_operations super_ops = {
.statfs = LinuxDriverStatFs
};
#else
#error "This test intentionally fails on 2.6.19 and newer kernels."
#endif
vmci-only/shared/autoconf/cachector.c 0000444 0000000 0000000 00000003270 12522066073 016657 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
/*
* Between 2.6.23 and 2.6.24-rc1 ctor prototype was changed from
* ctor(ptr, cache, flags) to ctor(cache, ptr). Unfortunately there
* is no typedef for ctor, so we have to redefine kmem_cache_create
* to find out ctor prototype. This assumes that kmem_cache_create
* takes 5 arguments and not 6 - that change occured between
* 2.6.22 and 2.6.23-rc1. If prototype matches, then this is old
* kernel.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
#error "This test intentionally fails on 2.6.24 and newer kernels."
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
#include
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *, struct kmem_cache *, unsigned long));
#endif
vmci-only/shared/autoconf/skblin.c 0000444 0000000 0000000 00000002572 12522066073 016212 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* Detect whether skb_linearize takes one or two arguments.
*/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 17)
/*
* Since 2.6.18 all kernels have single-argument skb_linearize. For
* older kernels use autodetection. Not using autodetection on newer
* kernels saves us from compile failure on some post 2.6.18 kernels
* which do not have selfcontained skbuff.h.
*/
#include
int test_skb_linearize(struct sk_buff *skb)
{
return skb_linearize(skb);
}
#endif
vmci-only/shared/autoconf/inode1.c 0000444 0000000 0000000 00000002703 12522066073 016103 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
#include
#include /* NULL */
/*
* After 2.6.18, inodes were "slimmed". This involved removing the union
* that encapsulates inode private data (and using i_private instead), as well
* as removing i_blksize. Red Hat backported this behavior into a 2.6.17
* kernel.
*
* This test will fail on a kernel with such a patch.
*/
void test(void)
{
struct inode inode;
inode.u.generic_ip = NULL;
}
#else
#error "This test intentionally fails on 2.6.20 and newer kernels."
#endif
vmci-only/shared/autoconf/netif_num_params.c 0000444 0000000 0000000 00000003352 12522066073 020254 0 ustar root root /*********************************************************
* Copyright (C) 2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* Detect whether netif_rx_complete (and netif_rx_schedule) take a single
* napi_struct argument. The foundation was laid whith introducing Generic
* Receive Offload infrastructure but dropping unneeded net_device argument
* did not happen till few commits later so we can't simply test for presence
* of NETIF_F_GRO.
*
* Test succeeds if netif_rx_complete takes dev & napi arguments, or if it
* takes dev argument only (kernels before 2.6.24). Test fails if netif_rx_complete
* takes only single napi argument.
*/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
# error This compile test intentionally fails.
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
#include
#ifdef NETIF_F_GRO
void test_netif_rx_complete(struct net_device *dev, struct napi_struct *napi)
{
netif_rx_complete(dev, napi);
}
#endif
#endif
vmci-only/shared/autoconf/netcreate_num_params.c 0000444 0000000 0000000 00000003160 12522066073 021116 0 ustar root root /*********************************************************
* Copyright (C) 2010 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* During 2.6.33 merge window net_proto_ops->create() method was changed -
* a new 'kern' field, signalling whether socket is being created by kernel
* or userspace application, was added to it. Unfortunately, some
* distributions, such as RHEL 6, have backported the change to earlier
* kernels, so we can't rely solely on kernel version to determine number of
* arguments.
*/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
# error This compile test intentionally fails.
#else
#include
static int TestCreate(struct net *net,
struct socket *sock, int protocol,
int kern)
{
return 0;
}
struct net_proto_family testFamily = {
.create = TestCreate,
};
#endif
vmci-only/shared/autoconf/cachector1.c 0000444 0000000 0000000 00000003075 12522066073 016743 0 ustar root root /*********************************************************
* Copyright (C) 2008 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
/*
* Between 2.6.27-rc1 and 2.6.27-rc2 ctor prototype was changed from
* ctor(cache, ptr) to ctor(ptr). Unfortunately there
* is no typedef for ctor, so we have to redefine kmem_cache_create
* to find out ctor prototype. If prototype matches, then this is old
* kernel.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
#error "This test intentionally fails on 2.6.28 and newer kernels."
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
#include
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(struct kmem_cache *, void *));
#endif
vmci-only/shared/autoconf/cachecreate.c 0000444 0000000 0000000 00000003210 12522066073 017145 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
/*
* All kernels before 2.6.22 take 6 arguments. All kernels since
* 2.6.23-rc1 take 5 arguments. Only kernels between 2.6.22 and
* 2.6.23-rc1 are questionable - we could ignore them if we wanted,
* nobody cares about them even now. But unfortunately RedHat is
* re-releasing 2.6.X-rc kernels under 2.6.(X-1) name, so they
* are releasing 2.6.23-rc1 as 2.6.22-5055-something, so we have
* to do autodetection for them.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)
/* Success... */
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
#error "This test intentionally fails on 2.6.23 and newer kernels."
#else
#include
struct kmem_cache *kmemtest(void) {
return kmem_cache_create("test", 12, 0, 0, NULL, NULL);
}
#endif
vmci-only/shared/autoconf/file_operations_fsync.c 0000444 0000000 0000000 00000002746 12522066073 021317 0 ustar root root /*********************************************************
* Copyright (C) 2011 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* Linux v3.1 added 2 params to fsync for fine-grained locking control.
* But SLES11 SP2 has backported the change to its 3.0 kernel,
* so we can't rely solely on kernel version to determine number of
* arguments.
*/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
# error This compile test intentionally fails.
#else
#include
#include /* loff_t */
static int TestFsync(struct file *file,
loff_t start, loff_t end,
int datasync)
{
return 0;
}
struct file_operations testFO = {
.fsync = TestFsync,
};
#endif
vmci-only/shared/autoconf/geninclude.c 0000444 0000000 0000000 00000002321 12522066073 017035 0 ustar root root /*********************************************************
* Copyright (C) 2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#ifdef CONFIG_X86_VOYAGER
APATH/mach-voyager
#endif
#ifdef CONFIG_X86_VISWS
APATH/mach-visws
#endif
#ifdef CONFIG_X86_NUMAQ
APATH/mach-numaq
#endif
#ifdef CONFIG_X86_BIGSMP
APATH/mach-bigsmp
#endif
#ifdef CONFIG_X86_SUMMIT
APATH/mach-summit
#endif
#ifdef CONFIG_X86_GENERICARCH
APATH/mach-generic
#endif
APATH/mach-default
vmci-only/shared/autoconf/getsb1.c 0000444 0000000 0000000 00000003076 12522066073 016115 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
#include
/*
* Around 2.6.18, a pointer to a vfsmount was added to get_sb. Red Hat
* backported this behavior into a 2.6.17 kernel.
*
* This test will fail on a kernel with such a patch.
*/
static struct super_block * LinuxDriverGetSb(struct file_system_type *fs_type,
int flags,
const char *dev_name,
void *rawData)
{
return 0;
}
struct file_system_type fs_type = {
.get_sb = LinuxDriverGetSb
};
#else
#error "This test intentionally fails on 2.6.19 or newer kernels."
#endif
vmci-only/shared/autoconf/dcount.c 0000444 0000000 0000000 00000002603 12522066073 016217 0 ustar root root /*********************************************************
* Copyright (C) 2014 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
#include
/*
* After 3.11.0, the dentry d_count field was removed. Red Hat
* backported this behavior into a 3.10.0 kernel.
*
* This test will fail on a kernel with such a patch.
*/
void test(void)
{
struct dentry dentry;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
dentry.d_count = 1;
#else
atomic_set(&dentry.d_count, 1);
#endif
}
#else
#error "This test intentionally fails on 3.11.0 or newer kernels."
#endif
vmci-only/shared/autoconf/filldir1.c 0000444 0000000 0000000 00000003260 12522066073 016431 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#include "compat_version.h"
#include "compat_autoconf.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
#include
#include /* loff_t */
#include /* NULL */
/*
* After 2.6.18, filldir and statfs were changed to send 64-bit inode
* numbers to user space. Red Hat backported this behavior into a 2.6.17
* kernel.
*
* This test will fail on a kernel with such a patch.
*/
static int LinuxDriverFilldir(void *buf,
const char *name,
int namelen,
loff_t offset,
ino_t ino,
unsigned int d_type)
{
return 0;
}
void test(void)
{
vfs_readdir(NULL, LinuxDriverFilldir, NULL);
}
#else
#error "This test intentionally fails on 2.6.20 and newer kernels."
#endif
vmci-only/shared/compat_autoconf.h 0000444 0000000 0000000 00000002641 12522066073 016275 0 ustar root root /*********************************************************
* Copyright (C) 2009 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_AUTOCONF_H__
# define __COMPAT_AUTOCONF_H__
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMKDRIVERS
#include "includeCheck.h"
#ifndef LINUX_VERSION_CODE
# error "Include compat_version.h before compat_autoconf.h"
#endif
/* autoconf.h moved from linux/autoconf.h to generated/autoconf.h in 2.6.33-rc1. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
# include
#else
# include
#endif
#endif /* __COMPAT_AUTOCONF_H__ */
vmci-only/shared/vmware_pack_init.h 0000444 0000000 0000000 00000003644 12522066074 016443 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __VMWARE_PACK_INIT_H__
# define __VMWARE_PACK_INIT_H__
/*
* vmware_pack_init.h --
*
* Platform-independent code to make the compiler pack (i.e. have them
* occupy the smallest possible space) structure definitions. The following
* constructs are known to work --hpreg
*
* #include "vmware_pack_begin.h"
* struct foo {
* ...
* }
* #include "vmware_pack_end.h"
* ;
*
* typedef
* #include "vmware_pack_begin.h"
* struct foo {
* ...
* }
* #include "vmware_pack_end.h"
* foo;
*/
#ifdef _MSC_VER
/*
* MSVC 6.0 emits warning 4103 when the pack push and pop pragma pairing is
* not balanced within 1 included file. That is annoying because our scheme
* is based on the pairing being balanced between 2 included files.
*
* So we disable this warning, but this is safe because the compiler will also
* emit warning 4161 when there is more pops than pushes within 1 main
* file --hpreg
*/
# pragma warning(disable:4103)
#elif __GNUC__
#else
# error Compiler packing...
#endif
#endif /* __VMWARE_PACK_INIT_H__ */
vmci-only/shared/compat_statfs.h 0000444 0000000 0000000 00000002306 12522066073 015761 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_STATFS_H__
# define __COMPAT_STATFS_H__
/* vfs.h simply include statfs.h, but it knows what directory statfs.h is in. */
#include
/* 2.5.74 renamed struct statfs to kstatfs. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 74)
#define compat_kstatfs kstatfs
#else
#define compat_kstatfs statfs
#endif
#endif /* __COMPAT_STATFS_H__ */
vmci-only/shared/vmci_defs.h 0000444 0000000 0000000 00000065447 12522066074 015071 0 ustar root root /*********************************************************
* Copyright (C) 2005-2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef _VMCI_DEF_H_
#define _VMCI_DEF_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMMEXT
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#include "includeCheck.h"
#include "vm_basic_types.h"
#include "vm_atomic.h"
#include "vm_assert.h"
/* Register offsets. */
#define VMCI_STATUS_ADDR 0x00
#define VMCI_CONTROL_ADDR 0x04
#define VMCI_ICR_ADDR 0x08
#define VMCI_IMR_ADDR 0x0c
#define VMCI_DATA_OUT_ADDR 0x10
#define VMCI_DATA_IN_ADDR 0x14
#define VMCI_CAPS_ADDR 0x18
#define VMCI_RESULT_LOW_ADDR 0x1c
#define VMCI_RESULT_HIGH_ADDR 0x20
/* Max number of devices. */
#define VMCI_MAX_DEVICES 1
/* Status register bits. */
#define VMCI_STATUS_INT_ON 0x1
/* Control register bits. */
#define VMCI_CONTROL_RESET 0x1
#define VMCI_CONTROL_INT_ENABLE 0x2
#define VMCI_CONTROL_INT_DISABLE 0x4
/* Capabilities register bits. */
#define VMCI_CAPS_HYPERCALL 0x1
#define VMCI_CAPS_GUESTCALL 0x2
#define VMCI_CAPS_DATAGRAM 0x4
#define VMCI_CAPS_NOTIFICATIONS 0x8
/* Interrupt Cause register bits. */
#define VMCI_ICR_DATAGRAM 0x1
#define VMCI_ICR_NOTIFICATION 0x2
/* Interrupt Mask register bits. */
#define VMCI_IMR_DATAGRAM 0x1
#define VMCI_IMR_NOTIFICATION 0x2
/* Interrupt type. */
typedef enum VMCIIntrType {
VMCI_INTR_TYPE_INTX = 0,
VMCI_INTR_TYPE_MSI = 1,
VMCI_INTR_TYPE_MSIX = 2
} VMCIIntrType;
/*
* Maximum MSI/MSI-X interrupt vectors in the device.
*/
#define VMCI_MAX_INTRS 2
/*
* Supported interrupt vectors. There is one for each ICR value above,
* but here they indicate the position in the vector array/message ID.
*/
#define VMCI_INTR_DATAGRAM 0
#define VMCI_INTR_NOTIFICATION 1
/*
* A single VMCI device has an upper limit of 128 MiB on the amount of
* memory that can be used for queue pairs.
*/
#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
/*
* Queues with pre-mapped data pages must be small, so that we don't pin
* too much kernel memory (especially on vmkernel). We limit a queuepair to
* 32 KB, or 16 KB per queue for symmetrical pairs.
*
* XXX, we are raising this limit to 4MB to support high-throughput workloads
* with vioi-filter. Once we switch to rings instead of queuepairs for the
* page channel, we will drop this limit again. See PR 852983.
*/
#define VMCI_MAX_PINNED_QP_MEMORY (4 * 1024 * 1024)
/*
* We have a fixed set of resource IDs available in the VMX.
* This allows us to have a very simple implementation since we statically
* know how many will create datagram handles. If a new caller arrives and
* we have run out of slots we can manually increment the maximum size of
* available resource IDs.
*/
typedef uint32 VMCI_Resource;
/* VMCI reserved hypervisor datagram resource IDs. */
#define VMCI_RESOURCES_QUERY 0
#define VMCI_GET_CONTEXT_ID 1
#define VMCI_SET_NOTIFY_BITMAP 2
#define VMCI_DOORBELL_LINK 3
#define VMCI_DOORBELL_UNLINK 4
#define VMCI_DOORBELL_NOTIFY 5
/*
* VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
* obsoleted by the removal of VM to VM communication.
*/
#define VMCI_DATAGRAM_REQUEST_MAP 6
#define VMCI_DATAGRAM_REMOVE_MAP 7
#define VMCI_EVENT_SUBSCRIBE 8
#define VMCI_EVENT_UNSUBSCRIBE 9
#define VMCI_QUEUEPAIR_ALLOC 10
#define VMCI_QUEUEPAIR_DETACH 11
/*
* VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
* WS 7.0/7.1 and ESX 4.1
*/
#define VMCI_HGFS_TRANSPORT 13
#define VMCI_UNITY_PBRPC_REGISTER 14
/*
* The next two resources are for RPC calls from guest Tools, to replace the
* backdoor calls we used previously. Privileged is for admin/root RPCs,
* unprivileged is for RPCs from any user.
*/
#define VMCI_RPC_PRIVILEGED 15
#define VMCI_RPC_UNPRIVILEGED 16
#define VMCI_RESOURCE_MAX 17
/*
* The core VMCI device functionality only requires the resource IDs of
* VMCI_QUEUEPAIR_DETACH and below.
*/
#define VMCI_CORE_DEVICE_RESOURCE_MAX VMCI_QUEUEPAIR_DETACH
/*
* VMCI reserved host datagram resource IDs.
* vsock control channel has resource id 1.
*/
#define VMCI_DVFILTER_DATA_PATH_DATAGRAM 2
/* VMCI Ids. */
typedef uint32 VMCIId;
typedef struct VMCIIdRange {
int8 action; // VMCI_FA_X, for use in filters.
VMCIId begin; // Beginning of range
VMCIId end; // End of range
} VMCIIdRange;
typedef struct VMCIHandle {
VMCIId context;
VMCIId resource;
} VMCIHandle;
static INLINE VMCIHandle
VMCI_MAKE_HANDLE(VMCIId cid, // IN:
VMCIId rid) // IN:
{
VMCIHandle h;
h.context = cid;
h.resource = rid;
return h;
}
/*
*----------------------------------------------------------------------
*
* VMCI_HANDLE_TO_UINT64 --
*
* Helper for VMCI handle to uint64 conversion.
*
* Results:
* The uint64 value.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE uint64
VMCI_HANDLE_TO_UINT64(VMCIHandle handle) // IN:
{
uint64 handle64;
handle64 = handle.context;
handle64 <<= 32;
handle64 |= handle.resource;
return handle64;
}
/*
*----------------------------------------------------------------------
*
* VMCI_UINT64_TO_HANDLE --
*
* Helper for uint64 to VMCI handle conversion.
*
* Results:
* The VMCI handle value.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE VMCIHandle
VMCI_UINT64_TO_HANDLE(uint64 handle64) // IN:
{
VMCIId context = (VMCIId)(handle64 >> 32);
VMCIId resource = (VMCIId)handle64;
return VMCI_MAKE_HANDLE(context, resource);
}
#define VMCI_HANDLE_TO_CONTEXT_ID(_handle) ((_handle).context)
#define VMCI_HANDLE_TO_RESOURCE_ID(_handle) ((_handle).resource)
#define VMCI_HANDLE_EQUAL(_h1, _h2) ((_h1).context == (_h2).context && \
(_h1).resource == (_h2).resource)
#define VMCI_INVALID_ID 0xFFFFFFFF
static const VMCIHandle VMCI_INVALID_HANDLE = {VMCI_INVALID_ID,
VMCI_INVALID_ID};
#define VMCI_HANDLE_INVALID(_handle) \
VMCI_HANDLE_EQUAL((_handle), VMCI_INVALID_HANDLE)
/*
* The below defines can be used to send anonymous requests.
* This also indicates that no response is expected.
*/
#define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
#define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
#define VMCI_ANON_SRC_HANDLE VMCI_MAKE_HANDLE(VMCI_ANON_SRC_CONTEXT_ID, \
VMCI_ANON_SRC_RESOURCE_ID)
/* The lowest 16 context ids are reserved for internal use. */
#define VMCI_RESERVED_CID_LIMIT 16
/*
* Hypervisor context id, used for calling into hypervisor
* supplied services from the VM.
*/
#define VMCI_HYPERVISOR_CONTEXT_ID 0
/*
* Well-known context id, a logical context that contains a set of
* well-known services. This context ID is now obsolete.
*/
#define VMCI_WELL_KNOWN_CONTEXT_ID 1
/*
* Context ID used by host endpoints.
*/
#define VMCI_HOST_CONTEXT_ID 2
#define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != _cid && \
_cid > VMCI_HOST_CONTEXT_ID)
/*
* The VMCI_CONTEXT_RESOURCE_ID is used together with VMCI_MAKE_HANDLE to make
* handles that refer to a specific context.
*/
#define VMCI_CONTEXT_RESOURCE_ID 0
/*
*-----------------------------------------------------------------------------
*
* VMCI error codes.
*
*-----------------------------------------------------------------------------
*/
#define VMCI_SUCCESS_QUEUEPAIR_ATTACH 5
#define VMCI_SUCCESS_QUEUEPAIR_CREATE 4
#define VMCI_SUCCESS_LAST_DETACH 3
#define VMCI_SUCCESS_ACCESS_GRANTED 2
#define VMCI_SUCCESS_ENTRY_DEAD 1
#define VMCI_SUCCESS 0LL
#define VMCI_ERROR_INVALID_RESOURCE (-1)
#define VMCI_ERROR_INVALID_ARGS (-2)
#define VMCI_ERROR_NO_MEM (-3)
#define VMCI_ERROR_DATAGRAM_FAILED (-4)
#define VMCI_ERROR_MORE_DATA (-5)
#define VMCI_ERROR_NO_MORE_DATAGRAMS (-6)
#define VMCI_ERROR_NO_ACCESS (-7)
#define VMCI_ERROR_NO_HANDLE (-8)
#define VMCI_ERROR_DUPLICATE_ENTRY (-9)
#define VMCI_ERROR_DST_UNREACHABLE (-10)
#define VMCI_ERROR_PAYLOAD_TOO_LARGE (-11)
#define VMCI_ERROR_INVALID_PRIV (-12)
#define VMCI_ERROR_GENERIC (-13)
#define VMCI_ERROR_PAGE_ALREADY_SHARED (-14)
#define VMCI_ERROR_CANNOT_SHARE_PAGE (-15)
#define VMCI_ERROR_CANNOT_UNSHARE_PAGE (-16)
#define VMCI_ERROR_NO_PROCESS (-17)
#define VMCI_ERROR_NO_DATAGRAM (-18)
#define VMCI_ERROR_NO_RESOURCES (-19)
#define VMCI_ERROR_UNAVAILABLE (-20)
#define VMCI_ERROR_NOT_FOUND (-21)
#define VMCI_ERROR_ALREADY_EXISTS (-22)
#define VMCI_ERROR_NOT_PAGE_ALIGNED (-23)
#define VMCI_ERROR_INVALID_SIZE (-24)
#define VMCI_ERROR_REGION_ALREADY_SHARED (-25)
#define VMCI_ERROR_TIMEOUT (-26)
#define VMCI_ERROR_DATAGRAM_INCOMPLETE (-27)
#define VMCI_ERROR_INCORRECT_IRQL (-28)
#define VMCI_ERROR_EVENT_UNKNOWN (-29)
#define VMCI_ERROR_OBSOLETE (-30)
#define VMCI_ERROR_QUEUEPAIR_MISMATCH (-31)
#define VMCI_ERROR_QUEUEPAIR_NOTSET (-32)
#define VMCI_ERROR_QUEUEPAIR_NOTOWNER (-33)
#define VMCI_ERROR_QUEUEPAIR_NOTATTACHED (-34)
#define VMCI_ERROR_QUEUEPAIR_NOSPACE (-35)
#define VMCI_ERROR_QUEUEPAIR_NODATA (-36)
#define VMCI_ERROR_BUSMEM_INVALIDATION (-37)
#define VMCI_ERROR_MODULE_NOT_LOADED (-38)
#define VMCI_ERROR_DEVICE_NOT_FOUND (-39)
#define VMCI_ERROR_QUEUEPAIR_NOT_READY (-40)
#define VMCI_ERROR_WOULD_BLOCK (-41)
/* VMCI clients should return error code withing this range */
#define VMCI_ERROR_CLIENT_MIN (-500)
#define VMCI_ERROR_CLIENT_MAX (-550)
/* Internal error codes. */
#define VMCI_SHAREDMEM_ERROR_BAD_CONTEXT (-1000)
#define VMCI_PATH_MAX 256
/* VMCI reserved events. */
typedef uint32 VMCI_Event;
#define VMCI_EVENT_CTX_ID_UPDATE 0 // Only applicable to guest endpoints
#define VMCI_EVENT_CTX_REMOVED 1 // Applicable to guest and host
#define VMCI_EVENT_QP_RESUMED 2 // Only applicable to guest endpoints
#define VMCI_EVENT_QP_PEER_ATTACH 3 // Applicable to guest and host
#define VMCI_EVENT_QP_PEER_DETACH 4 // Applicable to guest and host
#define VMCI_EVENT_MEM_ACCESS_ON 5 // Applicable to VMX and vmk. On vmk,
// this event has the Context payload type.
#define VMCI_EVENT_MEM_ACCESS_OFF 6 // Applicable to VMX and vmk. Same as
// above for the payload type.
#define VMCI_EVENT_MAX 7
/*
* Of the above events, a few are reserved for use in the VMX, and
* other endpoints (guest and host kernel) should not use them. For
* the rest of the events, we allow both host and guest endpoints to
* subscribe to them, to maintain the same API for host and guest
* endpoints.
*/
#define VMCI_EVENT_VALID_VMX(_event) (_event == VMCI_EVENT_MEM_ACCESS_ON || \
_event == VMCI_EVENT_MEM_ACCESS_OFF)
#if defined(VMX86_SERVER)
#define VMCI_EVENT_VALID(_event) (_event < VMCI_EVENT_MAX)
#else // VMX86_SERVER
#define VMCI_EVENT_VALID(_event) (_event < VMCI_EVENT_MAX && \
!VMCI_EVENT_VALID_VMX(_event))
#endif // VMX86_SERVER
/* Reserved guest datagram resource ids. */
#define VMCI_EVENT_HANDLER 0
/* VMCI privileges. */
typedef enum VMCIResourcePrivilegeType {
VMCI_PRIV_CH_PRIV,
VMCI_PRIV_DESTROY_RESOURCE,
VMCI_PRIV_ASSIGN_CLIENT,
VMCI_PRIV_DG_CREATE,
VMCI_PRIV_DG_SEND,
VMCI_PRIV_NOTIFY,
VMCI_NUM_PRIVILEGES,
} VMCIResourcePrivilegeType;
/*
* VMCI coarse-grained privileges (per context or host
* process/endpoint. An entity with the restricted flag is only
* allowed to interact with the hypervisor and trusted entities.
*/
typedef uint32 VMCIPrivilegeFlags;
#define VMCI_PRIVILEGE_FLAG_RESTRICTED 0x01
#define VMCI_PRIVILEGE_FLAG_TRUSTED 0x02
#define VMCI_PRIVILEGE_ALL_FLAGS (VMCI_PRIVILEGE_FLAG_RESTRICTED | \
VMCI_PRIVILEGE_FLAG_TRUSTED)
#define VMCI_NO_PRIVILEGE_FLAGS 0x00
#define VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS VMCI_NO_PRIVILEGE_FLAGS
#define VMCI_LEAST_PRIVILEGE_FLAGS VMCI_PRIVILEGE_FLAG_RESTRICTED
#define VMCI_MAX_PRIVILEGE_FLAGS VMCI_PRIVILEGE_FLAG_TRUSTED
#define VMCI_PUBLIC_GROUP_NAME "vmci public group"
/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
#define VMCI_RESERVED_RESOURCE_ID_MAX 1023
#define VMCI_DOMAIN_NAME_MAXLEN 32
#define VMCI_LGPFX "VMCI: "
/*
* VMCIQueueHeader
*
* A Queue cannot stand by itself as designed. Each Queue's header
* contains a pointer into itself (the producerTail) and into its peer
* (consumerHead). The reason for the separation is one of
* accessibility: Each end-point can modify two things: where the next
* location to enqueue is within its produceQ (producerTail); and
* where the next dequeue location is in its consumeQ (consumerHead).
*
* An end-point cannot modify the pointers of its peer (guest to
* guest; NOTE that in the host both queue headers are mapped r/w).
* But, each end-point needs read access to both Queue header
* structures in order to determine how much space is used (or left)
* in the Queue. This is because for an end-point to know how full
* its produceQ is, it needs to use the consumerHead that points into
* the produceQ but -that- consumerHead is in the Queue header for
* that end-points consumeQ.
*
* Thoroughly confused? Sorry.
*
* producerTail: the point to enqueue new entrants. When you approach
* a line in a store, for example, you walk up to the tail.
*
* consumerHead: the point in the queue from which the next element is
* dequeued. In other words, who is next in line is he who is at the
* head of the line.
*
* Also, producerTail points to an empty byte in the Queue, whereas
* consumerHead points to a valid byte of data (unless producerTail ==
* consumerHead in which case consumerHead does not point to a valid
* byte of data).
*
* For a queue of buffer 'size' bytes, the tail and head pointers will be in
* the range [0, size-1].
*
* If produceQHeader->producerTail == consumeQHeader->consumerHead
* then the produceQ is empty.
*/
typedef struct VMCIQueueHeader {
/* All fields are 64bit and aligned. */
VMCIHandle handle; /* Identifier. */
Atomic_uint64 producerTail; /* Offset in this queue. */
Atomic_uint64 consumerHead; /* Offset in peer queue. */
} VMCIQueueHeader;
/*
* If one client of a QueuePair is a 32bit entity, we restrict the QueuePair
* size to be less than 4GB, and use 32bit atomic operations on the head and
* tail pointers. 64bit atomic read on a 32bit entity involves cmpxchg8b which
* is an atomic read-modify-write. This will cause traces to fire when a 32bit
* consumer tries to read the producer's tail pointer, for example, because the
* consumer has read-only access to the producer's tail pointer.
*
* We provide the following macros to invoke 32bit or 64bit atomic operations
* based on the architecture the code is being compiled on.
*/
/* Architecture independent maximum queue size. */
#define QP_MAX_QUEUE_SIZE_ARCH_ANY CONST64U(0xffffffff)
#ifdef __x86_64__
# define QP_MAX_QUEUE_SIZE_ARCH CONST64U(0xffffffffffffffff)
# define QPAtomic_ReadOffset(x) Atomic_Read64(x)
# define QPAtomic_WriteOffset(x, y) Atomic_Write64(x, y)
#else
/*
* Wrappers below are being used to call Atomic_Read32 because of the
* 'type punned' compilation warning received when Atomic_Read32 is
* called with a Atomic_uint64 pointer typecasted to Atomic_uint32
* pointer from QPAtomic_ReadOffset. Ditto with QPAtomic_WriteOffset.
*/
static INLINE uint32
TypeSafe_Atomic_Read32(void *var) // IN:
{
return Atomic_Read32((Atomic_uint32 *)(var));
}
static INLINE void
TypeSafe_Atomic_Write32(void *var, uint32 val) // IN:
{
Atomic_Write32((Atomic_uint32 *)(var), (uint32)(val));
}
# define QP_MAX_QUEUE_SIZE_ARCH CONST64U(0xffffffff)
# define QPAtomic_ReadOffset(x) TypeSafe_Atomic_Read32((void *)(x))
# define QPAtomic_WriteOffset(x, y) \
TypeSafe_Atomic_Write32((void *)(x), (uint32)(y))
#endif /* __x86_64__ */
/*
*-----------------------------------------------------------------------------
*
* QPAddPointer --
*
* Helper to add a given offset to a head or tail pointer. Wraps the value
* of the pointer around the max size of the queue.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
QPAddPointer(Atomic_uint64 *var, // IN:
size_t add, // IN:
uint64 size) // IN:
{
uint64 newVal = QPAtomic_ReadOffset(var);
if (newVal >= size - add) {
newVal -= size;
}
newVal += add;
QPAtomic_WriteOffset(var, newVal);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_ProducerTail() --
*
* Helper routine to get the Producer Tail from the supplied queue.
*
* Results:
* The contents of the queue's producer tail.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
VMCIQueueHeader_ProducerTail(const VMCIQueueHeader *qHeader) // IN:
{
VMCIQueueHeader *qh = (VMCIQueueHeader *)qHeader;
return QPAtomic_ReadOffset(&qh->producerTail);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_ConsumerHead() --
*
* Helper routine to get the Consumer Head from the supplied queue.
*
* Results:
* The contents of the queue's consumer tail.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE uint64
VMCIQueueHeader_ConsumerHead(const VMCIQueueHeader *qHeader) // IN:
{
VMCIQueueHeader *qh = (VMCIQueueHeader *)qHeader;
return QPAtomic_ReadOffset(&qh->consumerHead);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_AddProducerTail() --
*
* Helper routine to increment the Producer Tail. Fundamentally,
* QPAddPointer() is used to manipulate the tail itself.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
VMCIQueueHeader_AddProducerTail(VMCIQueueHeader *qHeader, // IN/OUT:
size_t add, // IN:
uint64 queueSize) // IN:
{
QPAddPointer(&qHeader->producerTail, add, queueSize);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_AddConsumerHead() --
*
* Helper routine to increment the Consumer Head. Fundamentally,
* QPAddPointer() is used to manipulate the head itself.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
VMCIQueueHeader_AddConsumerHead(VMCIQueueHeader *qHeader, // IN/OUT:
size_t add, // IN:
uint64 queueSize) // IN:
{
QPAddPointer(&qHeader->consumerHead, add, queueSize);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_CheckAlignment --
*
* Checks if the given queue is aligned to page boundary. Returns TRUE if
* the alignment is good.
*
* Results:
* TRUE or FALSE.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE Bool
VMCIQueueHeader_CheckAlignment(const VMCIQueueHeader *qHeader) // IN:
{
uintptr_t hdr, offset;
hdr = (uintptr_t) qHeader;
offset = hdr & (PAGE_SIZE -1);
return offset == 0;
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_GetPointers --
*
* Helper routine for getting the head and the tail pointer for a queue.
* Both the VMCIQueues are needed to get both the pointers for one queue.
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
VMCIQueueHeader_GetPointers(const VMCIQueueHeader *produceQHeader, // IN:
const VMCIQueueHeader *consumeQHeader, // IN:
uint64 *producerTail, // OUT:
uint64 *consumerHead) // OUT:
{
if (producerTail) {
*producerTail = VMCIQueueHeader_ProducerTail(produceQHeader);
}
if (consumerHead) {
*consumerHead = VMCIQueueHeader_ConsumerHead(consumeQHeader);
}
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_ResetPointers --
*
* Reset the tail pointer (of "this" queue) and the head pointer (of
* "peer" queue).
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
VMCIQueueHeader_ResetPointers(VMCIQueueHeader *qHeader) // IN/OUT:
{
QPAtomic_WriteOffset(&qHeader->producerTail, CONST64U(0));
QPAtomic_WriteOffset(&qHeader->consumerHead, CONST64U(0));
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_Init --
*
* Initializes a queue's state (head & tail pointers).
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE void
VMCIQueueHeader_Init(VMCIQueueHeader *qHeader, // IN/OUT:
const VMCIHandle handle) // IN:
{
qHeader->handle = handle;
VMCIQueueHeader_ResetPointers(qHeader);
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_FreeSpace --
*
* Finds available free space in a produce queue to enqueue more
* data or reports an error if queue pair corruption is detected.
*
* Results:
* Free space size in bytes or an error code.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE int64
VMCIQueueHeader_FreeSpace(const VMCIQueueHeader *produceQHeader, // IN:
const VMCIQueueHeader *consumeQHeader, // IN:
const uint64 produceQSize) // IN:
{
uint64 tail;
uint64 head;
uint64 freeSpace;
tail = VMCIQueueHeader_ProducerTail(produceQHeader);
head = VMCIQueueHeader_ConsumerHead(consumeQHeader);
if (tail >= produceQSize || head >= produceQSize) {
return VMCI_ERROR_INVALID_SIZE;
}
/*
* Deduct 1 to avoid tail becoming equal to head which causes ambiguity. If
* head and tail are equal it means that the queue is empty.
*/
if (tail >= head) {
freeSpace = produceQSize - (tail - head) - 1;
} else {
freeSpace = head - tail - 1;
}
return freeSpace;
}
/*
*-----------------------------------------------------------------------------
*
* VMCIQueueHeader_BufReady --
*
* VMCIQueueHeader_FreeSpace() does all the heavy lifting of
* determing the number of free bytes in a Queue. This routine,
* then subtracts that size from the full size of the Queue so
* the caller knows how many bytes are ready to be dequeued.
*
* Results:
* On success, available data size in bytes (up to MAX_INT64).
* On failure, appropriate error code.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
static INLINE int64
VMCIQueueHeader_BufReady(const VMCIQueueHeader *consumeQHeader, // IN:
const VMCIQueueHeader *produceQHeader, // IN:
const uint64 consumeQSize) // IN:
{
int64 freeSpace;
freeSpace = VMCIQueueHeader_FreeSpace(consumeQHeader,
produceQHeader,
consumeQSize);
if (freeSpace < VMCI_SUCCESS) {
return freeSpace;
} else {
return consumeQSize - freeSpace - 1;
}
}
/*
* Defines for the VMCI traffic filter:
* - VMCI_FA_ defines the filter action values
* - VMCI_FP_ defines the filter protocol values
* - VMCI_FD_ defines the direction values (guest or host)
* - VMCI_FT_ are the type values (allow or deny)
*/
#define VMCI_FA_INVALID -1
#define VMCI_FA_ALLOW 0
#define VMCI_FA_DENY (VMCI_FA_ALLOW + 1)
#define VMCI_FA_MAX (VMCI_FA_DENY + 1)
#define VMCI_FP_INVALID -1
#define VMCI_FP_HYPERVISOR 0
#define VMCI_FP_QUEUEPAIR (VMCI_FP_HYPERVISOR + 1)
#define VMCI_FP_DOORBELL (VMCI_FP_QUEUEPAIR + 1)
#define VMCI_FP_DATAGRAM (VMCI_FP_DOORBELL + 1)
#define VMCI_FP_STREAMSOCK (VMCI_FP_DATAGRAM + 1)
#define VMCI_FP_ANY (VMCI_FP_STREAMSOCK + 1)
#define VMCI_FP_MAX (VMCI_FP_ANY + 1)
#define VMCI_FD_INVALID -1
#define VMCI_FD_GUEST 0
#define VMCI_FD_HOST (VMCI_FD_GUEST + 1)
#define VMCI_FD_ANY (VMCI_FD_HOST + 1)
#define VMCI_FD_MAX (VMCI_FD_ANY + 1)
/*
* The filter list tracks VMCI Id ranges for a given filter.
*/
typedef struct {
uint32 len;
VMCIIdRange *list;
} VMCIFilterList;
/*
* The filter info is used to communicate the filter configuration
* from the VMX to the host kernel.
*/
typedef struct {
VA64 list; // List of VMCIIdRange
uint32 len; // Length of list
uint8 dir; // VMCI_FD_X
uint8 proto; // VMCI_FP_X
} VMCIFilterInfo;
/*
* In the host kernel, the ingoing and outgoing filters are
* separated. The VMCIProtoFilters type captures all filters in one
* direction. The VMCIFilters type captures all filters.
*/
typedef VMCIFilterList VMCIProtoFilters[VMCI_FP_MAX];
typedef VMCIProtoFilters VMCIFilters[VMCI_FD_MAX];
#endif // _VMCI_DEF_H_
vmci-only/shared/includeCheck.h 0000444 0000000 0000000 00000010063 12522066074 015473 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* includeCheck.h --
*
* Restrict include file use.
*
* In every .h file, define one or more of these
*
* INCLUDE_ALLOW_VMX
* INCLUDE_ALLOW_USERLEVEL
* INCLUDE_ALLOW_VMCORE
* INCLUDE_ALLOW_MODULE
* INCLUDE_ALLOW_VMKERNEL
* INCLUDE_ALLOW_DISTRIBUTE
* INCLUDE_ALLOW_VMK_MODULE
* INCLUDE_ALLOW_VMKDRIVERS
* INCLUDE_ALLOW_VMIROM
* INCLUDE_ALLOW_MKS
*
* Then include this file.
*
* Any file that has INCLUDE_ALLOW_DISTRIBUTE defined will potentially
* be distributed in source form along with GPLed code. Ensure
* that this is acceptable.
*/
/*
* Declare a VMCORE-only variable to help classify object
* files. The variable goes in the common block and does
* not create multiple definition link-time conflicts.
*/
#if defined VMCORE && defined VMX86_DEVEL && defined VMX86_DEBUG && \
defined linux && !defined MODULE && \
!defined COMPILED_WITH_VMCORE
#define COMPILED_WITH_VMCORE compiled_with_vmcore
#ifdef ASM
.comm compiled_with_vmcore, 0
#else
asm(".comm compiled_with_vmcore, 0");
#endif /* ASM */
#endif
#if defined VMCORE && \
!(defined VMX86_VMX || defined VMM || \
defined MONITOR_APP || defined VMMON)
#error "Makefile problem: VMCORE without VMX86_VMX or \
VMM or MONITOR_APP or MODULE."
#endif
#if defined VMCORE && !defined INCLUDE_ALLOW_VMCORE
#error "The surrounding include file is not allowed in vmcore."
#endif
#undef INCLUDE_ALLOW_VMCORE
#if defined VMX86_VMX && !defined VMCORE && \
!defined INCLUDE_ALLOW_VMX && !defined INCLUDE_ALLOW_USERLEVEL && \
!defined INCLUDE_ALLOW_MKS
#error "The surrounding include file is not allowed in the VMX."
#endif
#undef INCLUDE_ALLOW_VMX
#if defined USERLEVEL && !defined VMX86_VMX && !defined VMCORE && \
!defined INCLUDE_ALLOW_USERLEVEL && !defined INCLUDE_ALLOW_MKS
#error "The surrounding include file is not allowed at userlevel."
#endif
#undef INCLUDE_ALLOW_USERLEVEL
#if defined MODULE && !defined VMKERNEL_MODULE && \
!defined VMMON && !defined INCLUDE_ALLOW_MODULE
#error "The surrounding include file is not allowed in driver modules."
#endif
#undef INCLUDE_ALLOW_MODULE
#if defined VMMON && !defined INCLUDE_ALLOW_VMMON
#error "The surrounding include file is not allowed in vmmon."
#endif
#undef INCLUDE_ALLOW_VMMON
#if defined VMKERNEL && !defined INCLUDE_ALLOW_VMKERNEL
#error "The surrounding include file is not allowed in the vmkernel."
#endif
#undef INCLUDE_ALLOW_VMKERNEL
#if defined GPLED_CODE && !defined INCLUDE_ALLOW_DISTRIBUTE
#error "The surrounding include file is not allowed in GPL code."
#endif
#undef INCLUDE_ALLOW_DISTRIBUTE
#if defined VMKERNEL_MODULE && !defined VMKERNEL && \
!defined INCLUDE_ALLOW_VMK_MODULE && !defined INCLUDE_ALLOW_VMKDRIVERS
#error "The surrounding include file is not allowed in vmkernel modules."
#endif
#undef INCLUDE_ALLOW_VMK_MODULE
#undef INCLUDE_ALLOW_VMKDRIVERS
#if defined VMIROM && ! defined INCLUDE_ALLOW_VMIROM
#error "The surrounding include file is not allowed in vmirom."
#endif
#undef INCLUDE_ALLOW_VMIROM
#if defined INCLUDE_ALLOW_MKS && \
!(defined LOCALMKS || defined REMOTEMKS || \
defined SERVERMKS || defined CLIENTMKS)
#error "The surrounding include file is not allowed outside of the MKS."
#endif
#undef INCLUDE_ALLOW_MKS
vmci-only/shared/vm_assert.h 0000444 0000000 0000000 00000025722 12522066074 015125 0 ustar root root /*********************************************************
* Copyright (C) 1998-2004 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_assert.h --
*
* The basic assertion facility for all VMware code.
*
* For proper use, see
* http://vmweb.vmware.com/~mts/WebSite/guide/programming/asserts.html
*/
#ifndef _VM_ASSERT_H_
#define _VM_ASSERT_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
// XXX not necessary except some places include vm_assert.h improperly
#include "vm_basic_types.h"
#include "vm_basic_defs.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Some bits of vmcore are used in VMKernel code and cannot have
* the VMKERNEL define due to other header dependencies.
*/
#if defined(VMKERNEL) && !defined(VMKPANIC)
#define VMKPANIC 1
#endif
/*
* Internal macros, functions, and strings
*
* The monitor wants to save space at call sites, so it has specialized
* functions for each situation. User level wants to save on implementation
* so it uses generic functions.
*/
#if !defined VMM || defined MONITOR_APP // {
#if defined VMKPANIC
// vmkernel Panic() function does not want a trailing newline.
#define _ASSERT_PANIC(name) \
Panic(_##name##Fmt, __FILE__, __LINE__)
#define _ASSERT_PANIC_BUG(bug, name) \
Panic(_##name##Fmt " bugNr=%d", __FILE__, __LINE__, bug)
#define _ASSERT_PANIC_NORETURN(name) \
Panic_NoReturn(_##name##Fmt, __FILE__, __LINE__)
#else /* !VMKPANIC */
#define _ASSERT_PANIC(name) \
Panic(_##name##Fmt "\n", __FILE__, __LINE__)
#define _ASSERT_PANIC_BUG(bug, name) \
Panic(_##name##Fmt " bugNr=%d\n", __FILE__, __LINE__, bug)
#endif /* VMKPANIC */
#ifdef VMX86_DEVEL
# define _ASSERT_WARNING(name) \
Warning(_##name##Fmt "\n", __FILE__, __LINE__)
#else
# define _ASSERT_WARNING(name) \
Log(_##name##Fmt "\n", __FILE__, __LINE__)
#endif
#endif // }
// these don't have newline so a bug can be tacked on
#define _AssertPanicFmt "PANIC %s:%d"
#define _AssertAssertFmt "ASSERT %s:%d"
#define _AssertNotImplementedFmt "NOT_IMPLEMENTED %s:%d"
#define _AssertNotReachedFmt "NOT_REACHED %s:%d"
#define _AssertMemAllocFmt "MEM_ALLOC %s:%d"
#define _AssertNotTestedFmt "NOT_TESTED %s:%d"
/*
* Panic and log functions
*/
void Log(const char *fmt, ...) PRINTF_DECL(1, 2);
void Warning(const char *fmt, ...) PRINTF_DECL(1, 2);
#if defined VMKPANIC
void Panic_SaveRegs(void);
#ifdef VMX86_DEBUG
void Panic_NoSave(const char *fmt, ...) PRINTF_DECL(1, 2);
#else
NORETURN void Panic_NoSave(const char *fmt, ...) PRINTF_DECL(1, 2);
#endif
NORETURN void Panic_NoSaveNoReturn(const char *fmt, ...)
PRINTF_DECL(1, 2);
#define Panic(fmt...) do { \
Panic_SaveRegs(); \
Panic_NoSave(fmt); \
} while(0)
#define Panic_NoReturn(fmt...) do { \
Panic_SaveRegs(); \
Panic_NoSaveNoReturn(fmt); \
} while(0)
#else
NORETURN void Panic(const char *fmt, ...) PRINTF_DECL(1, 2);
#endif
void LogThrottled(uint32 *count, const char *fmt, ...)
PRINTF_DECL(2, 3);
void WarningThrottled(uint32 *count, const char *fmt, ...)
PRINTF_DECL(2, 3);
/*
* Stress testing: redefine ASSERT_IFNOT() to taste
*/
#ifndef ASSERT_IFNOT
/*
* PR 271512: When compiling with gcc, catch assignments inside an ASSERT.
*
* 'UNLIKELY' is defined with __builtin_expect, which does not warn when
* passed an assignment (gcc bug 36050). To get around this, we put 'cond'
* in an 'if' statement and make sure it never gets executed by putting
* that inside of 'if (0)'. We use gcc's statement expression syntax to
* make ASSERT an expression because some code uses it that way.
*
* Since statement expression syntax is a gcc extension and since it's
* not clear if this is a problem with other compilers, the ASSERT
* definition was not changed for them. Using a bare 'cond' with the
* ternary operator may provide a solution.
*/
#ifdef __GNUC__
#define ASSERT_IFNOT(cond, panic) \
({if (UNLIKELY(!(cond))) { panic; if (0) { if (cond) { ; } } } (void)0;})
#else
#define ASSERT_IFNOT(cond, panic) \
(UNLIKELY(!(cond)) ? (panic) : (void)0)
#endif
#endif
/*
* Assert, panic, and log macros
*
* Some of these are redefined below undef !VMX86_DEBUG.
* ASSERT() is special cased because of interaction with Windows DDK.
*/
#if defined VMX86_DEBUG || defined ASSERT_ALWAYS_AVAILABLE
#undef ASSERT
#define ASSERT(cond) \
ASSERT_IFNOT(cond, _ASSERT_PANIC(AssertAssert))
#endif
#define ASSERT_BUG(bug, cond) \
ASSERT_IFNOT(cond, _ASSERT_PANIC_BUG(bug, AssertAssert))
#define ASSERT_BUG_DEBUGONLY(bug, cond) ASSERT_BUG(bug, cond)
#define PANIC() _ASSERT_PANIC(AssertPanic)
#define PANIC_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertPanic)
#ifdef VMKPANIC
#define ASSERT_OR_IN_PANIC(cond) ASSERT((cond) || Panic_IsSystemInPanic())
#endif
#define ASSERT_NOT_IMPLEMENTED(cond) \
ASSERT_IFNOT(cond, NOT_IMPLEMENTED())
#define ASSERT_NOT_IMPLEMENTED_BUG(bug, cond) \
ASSERT_IFNOT(cond, NOT_IMPLEMENTED_BUG(bug))
#if defined VMKPANIC && defined VMX86_DEBUG
#define NOT_IMPLEMENTED() _ASSERT_PANIC_NORETURN(AssertNotImplemented)
#else
#define NOT_IMPLEMENTED() _ASSERT_PANIC(AssertNotImplemented)
#endif
#define NOT_IMPLEMENTED_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertNotImplemented)
#if defined VMKPANIC && defined VMX86_DEBUG
#define NOT_REACHED() _ASSERT_PANIC_NORETURN(AssertNotReached)
#else
#define NOT_REACHED() _ASSERT_PANIC(AssertNotReached)
#endif
#define NOT_REACHED_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertNotReached)
#define ASSERT_MEM_ALLOC(cond) \
ASSERT_IFNOT(cond, _ASSERT_PANIC(AssertMemAlloc))
#ifdef VMX86_DEVEL
#define ASSERT_DEVEL(cond) ASSERT(cond)
#else
#define ASSERT_DEVEL(cond) ((void) 0)
#endif
#define ASSERT_NO_INTERRUPTS() ASSERT(!INTERRUPTS_ENABLED())
#define ASSERT_HAS_INTERRUPTS() ASSERT(INTERRUPTS_ENABLED())
#define NOT_TESTED() _ASSERT_WARNING(AssertNotTested)
#define ASSERT_NOT_TESTED(cond) (UNLIKELY(!(cond)) ? NOT_TESTED() : (void)0)
#define NOT_TESTED_ONCE() DO_ONCE(NOT_TESTED())
#define NOT_TESTED_1024() \
do { \
static uint16 count = 0; \
if (UNLIKELY(count == 0)) { NOT_TESTED(); } \
count = (count + 1) & 1023; \
} while (0)
#define LOG_ONCE(_s) DO_ONCE(Log _s)
#ifdef VMX86_DEVEL
#define DEPRECATED(_fix) DO_ONCE( \
Warning("%s:%d: %s is DEPRECATED; %s\n", \
__FILE__, __LINE__, __FUNCTION__, \
_fix))
#else
#define DEPRECATED(_fix) do {} while (0)
#endif
/*
* Redefine macros that are only in debug versions
*/
#if !defined VMX86_DEBUG && !defined ASSERT_ALWAYS_AVAILABLE // {
#undef ASSERT
#define ASSERT(cond) ((void) 0)
#undef ASSERT_BUG_DEBUGONLY
#define ASSERT_BUG_DEBUGONLY(bug, cond) ((void) 0)
#undef ASSERT_LENGTH
#define ASSERT_LENGTH(real, expected) ((void) 0)
/*
* Expand NOT_REACHED() as appropriate for each situation.
*
* Mainly, we want the compiler to infer the same control-flow
* information as it would from Panic(). Otherwise, different
* compilation options will lead to different control-flow-derived
* errors, causing some make targets to fail while others succeed.
*
* VC++ has the __assume() built-in function which we don't trust
* (see bug 43485); gcc has no such construct; we just panic in
* userlevel code. The monitor doesn't want to pay the size penalty
* (measured at 212 bytes for the release vmm for a minimal infinite
* loop; panic would cost even more) so it does without and lives
* with the inconsistency.
*/
#ifdef VMM
#undef NOT_REACHED
#define NOT_REACHED() ((void) 0)
#else
// keep debug definition
#endif
#undef ASSERT_LOG_UNEXPECTED
#define ASSERT_LOG_UNEXPECTED(bug, cond) ((void) 0)
#undef LOG_UNEXPECTED
#define LOG_UNEXPECTED(bug) ((void) 0)
#undef ASSERT_NOT_TESTED
#define ASSERT_NOT_TESTED(cond) ((void) 0)
#undef NOT_TESTED
#define NOT_TESTED() ((void) 0)
#undef NOT_TESTED_ONCE
#define NOT_TESTED_ONCE() ((void) 0)
#undef NOT_TESTED_1024
#define NOT_TESTED_1024() ((void) 0)
#endif // !VMX86_DEBUG }
/*
* Compile-time assertions.
*
* ASSERT_ON_COMPILE does not use the common
* switch (0) { case 0: case (e): ; } trick because some compilers (e.g. MSVC)
* generate code for it.
*
* The implementation uses both enum and typedef because the typedef alone is
* insufficient; gcc allows arrays to be declared with non-constant expressions
* (even in typedefs, where it makes no sense).
*
* NOTE: if GCC ever changes so that it ignores unused types altogether, this
* assert might not fire! We explicitly mark it as unused because GCC 4.8+
* uses -Wunused-local-typedefs as part of -Wall, which means the typedef will
* generate a warning.
*/
#define ASSERT_ON_COMPILE(e) \
do { \
enum { AssertOnCompileMisused = ((e) ? 1 : -1) }; \
UNUSED_TYPE(typedef char AssertOnCompileFailed[AssertOnCompileMisused]); \
} while (0)
/*
* To put an ASSERT_ON_COMPILE() outside a function, wrap it
* in MY_ASSERTS(). The first parameter must be unique in
* each .c file where it appears. For example,
*
* MY_ASSERTS(FS3_INT,
* ASSERT_ON_COMPILE(sizeof(FS3_DiskLock) == 128);
* ASSERT_ON_COMPILE(sizeof(FS3_DiskLockReserved) == DISK_BLOCK_SIZE);
* ASSERT_ON_COMPILE(sizeof(FS3_DiskBlock) == DISK_BLOCK_SIZE);
* ASSERT_ON_COMPILE(sizeof(Hardware_DMIUUID) == 16);
* )
*
* Caution: ASSERT() within MY_ASSERTS() is silently ignored.
* The same goes for anything else not evaluated at compile time.
*/
#define MY_ASSERTS(name, assertions) \
static INLINE void name(void) { \
assertions \
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* ifndef _VM_ASSERT_H_ */
vmci-only/shared/dbllnklst.h 0000444 0000000 0000000 00000014744 12522066072 015113 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* dbllnklst.h --
*
* Double linked lists
*/
#ifndef _DBLLNKLST_H_
#define _DBLLNKLST_H_
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#include "includeCheck.h"
#include "vm_basic_types.h"
#define DblLnkLst_OffsetOf(type, field) ((intptr_t)&((type *)0)->field)
#define DblLnkLst_Container(addr, type, field) \
((type *)((char *)(addr) - DblLnkLst_OffsetOf(type, field)))
#define DblLnkLst_ForEach(curr, head) \
for (curr = (head)->next; curr != (head); curr = (curr)->next)
/* Safe from list element removal within loop body. */
#define DblLnkLst_ForEachSafe(curr, nextElem, head) \
for (curr = (head)->next, nextElem = (curr)->next; \
curr != (head); \
curr = nextElem, nextElem = (curr)->next)
typedef struct DblLnkLst_Links {
struct DblLnkLst_Links *prev;
struct DblLnkLst_Links *next;
} DblLnkLst_Links;
/*
* Functions
*
* DblLnkLst_LinkFirst, DblLnkLst_LinkLast, and DblLnkLst_Swap are specific
* to anchored lists. The rest are for both circular and anchored lists.
*/
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Init --
*
* Initialize a member of a doubly linked list
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Init(DblLnkLst_Links *l) // IN
{
l->prev = l->next = l;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Link --
*
* Merge two doubly linked lists into one
*
* The operation is commutative
* The operation is inversible (its inverse is DblLnkLst_Unlink)
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Link(DblLnkLst_Links *l1, // IN
DblLnkLst_Links *l2) // IN
{
DblLnkLst_Links *tmp;
(tmp = l1->prev)->next = l2;
(l1->prev = l2->prev)->next = l1;
l2->prev = tmp ;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Unlink --
*
* Split one doubly linked list into two
*
* No check is performed: the caller must ensure that both members
* belong to the same doubly linked list
*
* The operation is commutative
* The operation is inversible (its inverse is DblLnkLst_Link)
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Unlink(DblLnkLst_Links *l1, // IN
DblLnkLst_Links *l2) // IN
{
DblLnkLst_Links *tmp;
tmp = l1->prev ;
(l1->prev = l2->prev)->next = l1;
(l2->prev = tmp )->next = l2;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Unlink1 --
*
* Unlink an element from its list.
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Unlink1(DblLnkLst_Links *l) // IN
{
DblLnkLst_Unlink(l, l->next);
}
/*
*----------------------------------------------------------------------------
*
* DblLnkLst_IsLinked --
*
* Determines whether an element is linked with any other elements.
*
* Results:
* TRUE if link is linked, FALSE otherwise.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------------
*/
static INLINE Bool
DblLnkLst_IsLinked(DblLnkLst_Links const *l) // IN
{
/*
* A DblLnkLst_Links is either linked to itself (not linked) or linked to
* other elements in a list (linked).
*/
return l->prev != l;
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_LinkFirst --
*
* Insert 'l' at the beginning of the list anchored at 'head'
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_LinkFirst(DblLnkLst_Links *head, // IN
DblLnkLst_Links *l) // IN
{
DblLnkLst_Link(head->next, l);
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_LinkLast --
*
* Insert 'l' at the end of the list anchored at 'head'
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_LinkLast(DblLnkLst_Links *head, // IN
DblLnkLst_Links *l) // IN
{
DblLnkLst_Link(head, l);
}
/*
*----------------------------------------------------------------------
*
* DblLnkLst_Swap --
*
* Swap all entries between the list anchored at 'head1' and the list
* anchored at 'head2'.
*
* The operation is commutative
* The operation is inversible (its inverse is itself)
*
* Result
* None
*
* Side effects:
* None
*
*----------------------------------------------------------------------
*/
static INLINE void
DblLnkLst_Swap(DblLnkLst_Links *head1, // IN/OUT
DblLnkLst_Links *head2) // IN/OUT
{
DblLnkLst_Links const tmp = *head1;
if (DblLnkLst_IsLinked(head2)) {
(head1->prev = head2->prev)->next = head1;
(head1->next = head2->next)->prev = head1;
} else {
DblLnkLst_Init(head1);
}
if (tmp.prev != head1) {
(head2->prev = tmp.prev)->next = head2;
(head2->next = tmp.next)->prev = head2;
} else {
DblLnkLst_Init(head2);
}
}
#endif /* _DBLLNKLST_H_ */
vmci-only/shared/vmciKernelAPI2.h 0000444 0000000 0000000 00000004104 12522066074 015624 0 ustar root root /*********************************************************
* Copyright (C) 2010 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmciKernelAPI2.h --
*
* Kernel API (v2) exported from the VMCI host and guest drivers.
*/
#ifndef __VMCI_KERNELAPI_2_H__
#define __VMCI_KERNELAPI_2_H__
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
#include "vmciKernelAPI1.h"
/* Define version 2. */
#undef VMCI_KERNEL_API_VERSION
#define VMCI_KERNEL_API_VERSION_2 2
#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2
/* VMCI Doorbell API. */
#define VMCI_FLAG_DELAYED_CB 0x01
typedef void (*VMCICallback)(void *clientData);
int vmci_doorbell_create(VMCIHandle *handle, uint32 flags,
VMCIPrivilegeFlags privFlags, VMCICallback notifyCB,
void *clientData);
int vmci_doorbell_destroy(VMCIHandle handle);
int vmci_doorbell_notify(VMCIHandle handle, VMCIPrivilegeFlags privFlags);
/* Typedefs for all of the above, used by the IOCTLs and the kernel library. */
typedef int (VMCIDoorbell_CreateFct)(VMCIHandle *, uint32, VMCIPrivilegeFlags,
VMCICallback, void *);
typedef int (VMCIDoorbell_DestroyFct)(VMCIHandle);
typedef int (VMCIDoorbell_NotifyFct)(VMCIHandle, VMCIPrivilegeFlags);
#endif /* !__VMCI_KERNELAPI_2_H__ */
vmci-only/shared/circList.h 0000444 0000000 0000000 00000023650 12522066072 014672 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* circList.h --
*
* macros, prototypes and struct definitions for double-linked
* circular lists.
*/
#ifndef _CIRCLIST_H_
#define _CIRCLIST_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
#include "vmware.h"
typedef struct ListItem {
struct ListItem *prev;
struct ListItem *next;
} ListItem;
/* A list with no elements is a null pointer. */
#define LIST_ITEM_DEF(name) \
ListItem * name = NULL
#define LIST_EMPTY(l) ((l) == NULL)
/* initialize list item */
#define INIT_LIST_ITEM(p) \
do { \
(p)->prev = (p)->next = (p); \
} while (0)
/* check if initialized */
#define IS_LIST_ITEM_INITIALIZED(li) \
(((li) == (li)->prev) && ((li) == (li)->next))
/* return first element in the list */
#define LIST_FIRST(l) (l)
#define LIST_FIRST_CHK(l) (l)
/* return last element in the list */
#define LIST_LAST(l) ((l)->prev)
#define LIST_LAST_CHK(l) (LIST_EMPTY(l) ? NULL : LIST_LAST(l))
/*
* LIST_CONTAINER - get the struct for this entry (like list_entry)
* @ptr: the &struct ListItem pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list struct within the struct.
*/
#define LIST_CONTAINER(ptr, type, member) \
VMW_CONTAINER_OF(ptr, type, member)
/*
* delete item from the list
*/
#define LIST_DEL DelListItem
/*
* link two lists together
*/
#define LIST_SPLICE SpliceLists
/*
* Split a list into two lists
*/
#define LIST_SPLIT SplitLists
/*
* Add item to front of stack. List pointer points to new head.
*/
#define LIST_PUSH PushListItem
/*
* Add item at back of queue. List pointer only changes if list was empty.
*/
#define LIST_QUEUE QueueListItem
/*
* Get the list size.
*/
#define LIST_SIZE GetListSize
/*
* LIST_SCAN_FROM scans the list from "from" up until "until".
* The loop variable p should not be destroyed in the process.
* "from" is an element in the list where to start scanning.
* "until" is the element where search should stop.
* member is the field to use for the search - either "next" or "prev".
*/
#define LIST_SCAN_FROM(p, from, until, member) \
for (p = (from); (p) != NULL; \
(p) = (((p)->member == (until)) ? NULL : (p)->member))
/* scan the entire list (non-destructively) */
#define LIST_SCAN(p, l) \
LIST_SCAN_FROM(p, LIST_FIRST(l), LIST_FIRST(l), next)
/* scan a list backward from last element to first (non-destructively) */
#define LIST_SCAN_BACK(p, l) \
LIST_SCAN_FROM(p, LIST_LAST_CHK(l), LIST_LAST(l), prev)
/* scan the entire list where loop element may be destroyed */
#define LIST_SCAN_SAFE(p, pn, l) \
if (!LIST_EMPTY(l)) \
for (p = (l), (pn) = NextListItem(p, l); (p) != NULL; \
(p) = (pn), (pn) = NextListItem(p, l))
/* scan the entire list backwards where loop element may be destroyed */
#define LIST_SCAN_BACK_SAFE(p, pn, l) \
if (!LIST_EMPTY(l)) \
for (p = LIST_LAST(l), (pn) = PrevListItem(p, l); (p) != NULL; \
(p) = (pn), (pn) = PrevListItem(p, l))
/* function definitions */
/*
*----------------------------------------------------------------------
*
* NextListItem --
*
* Returns the next member of a doubly linked list, or NULL if last.
* Assumes: p is member of the list headed by head.
*
* Result:
* If head or p is NULL, return NULL. Otherwise,
* next list member (or null if last).
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE ListItem *
NextListItem(ListItem *p, // IN
ListItem *head) // IN
{
if (head == NULL || p == NULL) {
return NULL;
}
/* both p and head are non-null */
p = p->next;
return p == head ? NULL : p;
}
/*
*----------------------------------------------------------------------
*
* PrevListItem --
*
* Returns the prev member of a doubly linked list, or NULL if first.
* Assumes: p is member of the list headed by head.
*
* Result:
* If head or prev is NULL, return NULL. Otherwise,
* prev list member (or null if first).
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE ListItem *
PrevListItem(ListItem *p, // IN
ListItem *head) // IN
{
if (head == NULL || p == NULL) {
return NULL;
}
/* both p and head are non-null */
return p == head ? NULL : p->prev;
}
/*
*----------------------------------------------------------------------
*
* DelListItem --
*
* Deletes a member of a doubly linked list, possibly modifies the
* list header itself.
* Assumes neither p nor headp is null and p is a member of *headp.
*
* Result:
* None
*
* Side effects:
* Modifies *headp.
*
*----------------------------------------------------------------------
*/
static INLINE void
DelListItem(ListItem *p, // IN
ListItem **headp) // IN/OUT
{
ListItem *next;
ASSERT(p);
ASSERT(headp);
next = p->next;
if (p == next) {
*headp = NULL;
} else {
next->prev = p->prev;
p->prev->next = next;
if (*headp == p) {
*headp = next;
}
}
}
/*
*----------------------------------------------------------------------
*
* QueueListItem --
*
* Adds a new member to the back of a doubly linked list (queue)
* Assumes neither p nor headp is null and p is not a member of *headp.
*
* Result:
* None
*
* Side effects:
* Modifies *headp.
*
*----------------------------------------------------------------------
*/
static INLINE void
QueueListItem(ListItem *p, // IN
ListItem **headp) // IN/OUT
{
ListItem *head;
head = *headp;
if (LIST_EMPTY(head)) {
INIT_LIST_ITEM(p);
*headp = p;
} else {
p->prev = head->prev;
p->next = head;
p->prev->next = p;
head->prev = p;
}
}
/*
*----------------------------------------------------------------------
*
* PushListItem --
*
* Adds a new member to the front of a doubly linked list (stack)
* Assumes neither p nor headp is null and p is not a member of *headp.
*
* Result:
* None
*
* Side effects:
* Modifies *headp.
*
*----------------------------------------------------------------------
*/
static INLINE void
PushListItem(ListItem *p, // IN
ListItem **headp) // IN/OUT
{
QueueListItem(p, headp);
*headp = p;
}
/*
*----------------------------------------------------------------------
*
* SpliceLists --
*
* Make a single list {l1 l2} from {l1} and {l2} and return it.
* It is okay for one or both lists to be NULL.
* No checking is done. It is assumed that l1 and l2 are two
* distinct lists.
*
* Result:
* A list { l1 l2 }.
*
* Side effects:
* Modifies l1 and l2 list pointers.
*
*----------------------------------------------------------------------
*/
static INLINE ListItem *
SpliceLists(ListItem *l1, // IN
ListItem *l2) // IN
{
ListItem *l1Last, *l2Last;
if (LIST_EMPTY(l1)) {
return l2;
}
if (LIST_EMPTY(l2)) {
return l1;
}
l1Last = l1->prev; /* last elem of l1 */
l2Last = l2->prev; /* last elem of l2 */
/*
* l1 -> ... -> l1Last l2 -> ... l2Last
*/
l1Last->next = l2;
l2->prev = l1Last;
l1->prev = l2Last;
l2Last->next = l1;
return l1;
}
/*
*----------------------------------------------------------------------
*
* SplitLists --
*
* Make a list l = {l1 l2} into two separate lists {l1} and {l2}, where:
* l = { ... x -> p -> ... } split into:
* l1 = { ... -> x }
* l2 = { p -> ... }
* Assumes neither p nor l is null and p is a member of l.
* If p is the first element of l, then l1 will be NULL.
*
* Result:
* None.
*
* Side effects:
* Sets *l1p and *l2p to the resulting two lists.
* Modifies l's pointers.
*
*----------------------------------------------------------------------
*/
static INLINE void
SplitLists(ListItem *p, // IN
ListItem *l, // IN
ListItem **l1p, // OUT
ListItem **l2p) // OUT
{
ListItem *last;
if (p == LIST_FIRST(l)) { /* first element */
*l1p = NULL;
*l2p = l;
return;
}
last = l->prev;
*l1p = l;
p->prev->next = l;
l->prev = p->prev;
*l2p = p;
p->prev = last;
last->next = p;
}
/*
*----------------------------------------------------------------------
*
* GetListSize --
*
* Return the number of items in the list.
*
* Result:
* The number of items in the list.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static INLINE int
GetListSize(ListItem *head) // IN
{
ListItem *li;
int ret = 0;
LIST_SCAN(li, head) {
ret++;
}
return ret;
}
#endif /* _CIRCLIST_H_ */
vmci-only/shared/compat_semaphore.h 0000444 0000000 0000000 00000003142 12522066073 016437 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SEMAPHORE_H__
# define __COMPAT_SEMAPHORE_H__
/* <= 2.6.25 have asm only, 2.6.26 has both, and 2.6.27-rc2+ has linux only. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
# include
#else
# include
#endif
/*
* The init_MUTEX_LOCKED() API appeared in 2.2.18, and is also in
* 2.2.17-21mdk --hpreg
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)
#ifndef init_MUTEX_LOCKED
#define init_MUTEX_LOCKED(_sem) *(_sem) = MUTEX_LOCKED
#endif
#ifndef DECLARE_MUTEX
#define DECLARE_MUTEX(name) struct semaphore name = MUTEX
#endif
#ifndef DECLARE_MUTEX_LOCKED
#define DECLARE_MUTEX_LOCKED(name) struct semaphore name = MUTEX_LOCKED
#endif
#endif
#endif /* __COMPAT_SEMAPHORE_H__ */
vmci-only/shared/vmciKernelAPI.h 0000444 0000000 0000000 00000002410 12522066074 015540 0 ustar root root /*********************************************************
* Copyright (C) 2010 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmciKernelAPI.h --
*
* Kernel API (current) exported from the VMCI host and guest drivers.
*/
#ifndef __VMCI_KERNELAPI_H__
#define __VMCI_KERNELAPI_H__
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
/* With this file you always get the latest version. */
#include "vmciKernelAPI1.h"
#include "vmciKernelAPI2.h"
#endif /* !__VMCI_KERNELAPI_H__ */
vmci-only/shared/compat_cred.h 0000444 0000000 0000000 00000003013 12522066073 015366 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_CRED_H__
# define __COMPAT_CRED_H__
/*
* Include linux/cred.h via linux/sched.h - it is not nice, but
* as cpp does not have #ifexist...
*/
#include
#if !defined(current_fsuid) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
#define current_uid() (current->uid)
#define current_euid() (current->euid)
#define current_fsuid() (current->fsuid)
#define current_gid() (current->gid)
#define current_egid() (current->egid)
#define current_fsgid() (current->fsgid)
#endif
#if !defined(cap_set_full)
/* cap_set_full was removed in kernel version 3.0-rc4. */
#define cap_set_full(_c) do { (_c) = CAP_FULL_SET; } while (0)
#endif
#endif /* __COMPAT_CRED_H__ */
vmci-only/shared/compat_fs.h 0000444 0000000 0000000 00000024277 12522066073 015100 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_FS_H__
# define __COMPAT_FS_H__
#include
/*
* 2.6.5+ kernels define FS_BINARY_MOUNTDATA. Since it didn't exist and
* wasn't used prior, it's safe to define it to zero.
*/
#ifndef FS_BINARY_MOUNTDATA
#define FS_BINARY_MOUNTDATA 0
#endif
/*
* MAX_LFS_FILESIZE wasn't defined until 2.5.4.
*/
#ifndef MAX_LFS_FILESIZE
# include
# if BITS_PER_LONG == 32
# define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG - 1)) - 1)
# elif BITS_PER_LONG == 64
# define MAX_LFS_FILESIZE 0x7fffffffffffffffUL
# endif
#endif
/*
* sendfile as a VFS op was born in 2.5.30. Unfortunately, it also changed
* signatures, first in 2.5.47, then again in 2.5.70, then again in 2.6.8.
* Luckily, the 2.6.8+ signature is the same as the 2.5.47 signature. And
* as of 2.6.23-rc1 sendfile is gone, replaced by splice_read...
*
* Let's not support sendfile from 2.5.30 to 2.5.47, because the 2.5.30
* signature is much different and file_send_actor isn't externed.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
#define VMW_SENDFILE_NONE
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8)
#define VMW_SENDFILE_NEW
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 70)
#define VMW_SENDFILE_OLD
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 47)
#define VMW_SENDFILE_NEW
#else
#define VMW_SENDFILE_NONE
#endif
/*
* splice_read is there since 2.6.17, but let's avoid 2.6.17-rcX kernels...
* After all nobody is using splice system call until 2.6.23 using it to
* implement sendfile.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
#define VMW_SPLICE_READ 1
#endif
/*
* Filesystems wishing to use generic page cache read/write routines are
* supposed to implement aio_read and aio_write (calling into
* generic_file_aio_read() and generic_file_aio_write() if necessary).
*
* The VFS exports do_sync_read() and do_sync_write() as the "new"
* generic_file_read() and generic_file_write(), but filesystems need not
* actually implement read and write- the VFS will automatically call
* do_sync_write() and do_sync_read() when applications invoke the standard
* read() and write() system calls.
*
* In 2.6.19, generic_file_read() and generic_file_write() were removed,
* necessitating this change. AIO dates as far back as 2.5.42, but the API has
* changed over time, so for simplicity, we'll only enable it from 2.6.19 and
* on.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
# define VMW_USE_AIO
#endif
/*
* The alloc_inode and destroy_inode VFS ops didn't exist prior to 2.4.21.
* Without these functions, file systems can't embed inodes.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 21)
# define VMW_EMBED_INODE
#endif
/*
* iget() was removed from the VFS as of 2.6.25-rc1. The replacement for iget()
* is iget_locked() which was added in 2.5.17.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 17)
# define VMW_USE_IGET_LOCKED
#endif
/*
* parent_ino was born in 2.5.5. For older kernels, let's use 2.5.5
* implementation. It uses the dcache lock which is OK because per-dentry
* locking appeared after 2.5.5.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5)
#define compat_parent_ino(dentry) parent_ino(dentry)
#else
#define compat_parent_ino(dentry) \
({ \
ino_t res; \
spin_lock(&dcache_lock); \
res = dentry->d_parent->d_inode->i_ino; \
spin_unlock(&dcache_lock); \
res; \
})
#endif
/*
* putname changed to __putname in 2.6.6.
*/
#define compat___getname() __getname()
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6)
#define compat___putname(name) putname(name)
#else
#define compat___putname(name) __putname(name)
#endif
/*
* inc_nlink, drop_nlink, and clear_nlink were added in 2.6.19.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
#define compat_inc_nlink(inode) ((inode)->i_nlink++)
#define compat_drop_nlink(inode) ((inode)->i_nlink--)
#define compat_clear_nlink(inode) ((inode)->i_nlink = 0)
#else
#define compat_inc_nlink(inode) inc_nlink(inode)
#define compat_drop_nlink(inode) drop_nlink(inode)
#define compat_clear_nlink(inode) clear_nlink(inode)
#endif
/*
* i_size_write and i_size_read were introduced in 2.6.0-test1
* (though we'll look for them as of 2.6.1). They employ slightly different
* locking in order to guarantee atomicity, depending on the length of a long,
* whether the kernel is SMP, or whether the kernel is preemptible. Prior to
* i_size_write and i_size_read, there was no such locking, so that's the
* behavior we'll emulate.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 1)
#define compat_i_size_read(inode) ((inode)->i_size)
#define compat_i_size_write(inode, size) ((inode)->i_size = size)
#else
#define compat_i_size_read(inode) i_size_read(inode)
#define compat_i_size_write(inode, size) i_size_write(inode, size)
#endif
/*
* filemap_fdatawrite was introduced in 2.5.12. Prior to that, modules used
* filemap_fdatasync instead. In 2.4.18, both filemap_fdatawrite and
* filemap_fdatawait began returning status codes. Prior to that, they were
* void functions, so we'll just have them return 0.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 18)
#define compat_filemap_fdatawrite(mapping) \
({ \
int result = 0; \
filemap_fdatasync(mapping); \
result; \
})
#define compat_filemap_fdatawait(mapping) \
({ \
int result = 0; \
filemap_fdatawait(mapping); \
result; \
})
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12)
#define compat_filemap_fdatawrite(mapping) filemap_fdatasync(mapping)
#define compat_filemap_fdatawait(mapping) filemap_fdatawait(mapping)
#else
#define compat_filemap_fdatawrite(mapping) filemap_fdatawrite(mapping)
#define compat_filemap_fdatawait(mapping) filemap_fdatawait(mapping)
#endif
/*
* filemap_write_and_wait was introduced in 2.6.6 and exported for module use
* in 2.6.16. It's really just a simple wrapper around filemap_fdatawrite and
* and filemap_fdatawait, which initiates a flush of all dirty pages, then
* waits for the pages to flush. The implementation here is a simplified form
* of the one found in 2.6.20-rc3.
*
* Unfortunately, it just isn't possible to implement this prior to 2.4.5, when
* neither filemap_fdatawait nor filemap_fdatasync were exported for module
* use. So we'll define it out and hope for the best.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 5)
#define compat_filemap_write_and_wait(mapping)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
#define compat_filemap_write_and_wait(mapping) \
({ \
int result = 0; \
if (mapping->nrpages) { \
result = compat_filemap_fdatawrite(mapping); \
if (result != -EIO) { \
int result2 = compat_filemap_fdatawait(mapping); \
if (!result) { \
result = result2; \
} \
} \
} \
result; \
})
#else
#define compat_filemap_write_and_wait(mapping) filemap_write_and_wait(mapping)
#endif
/*
* invalidate_remote_inode was introduced in 2.6.0-test5. Prior to that,
* filesystems wishing to invalidate pages belonging to an inode called
* invalidate_inode_pages.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
#define compat_invalidate_remote_inode(inode) invalidate_inode_pages(inode)
#else
#define compat_invalidate_remote_inode(inode) invalidate_remote_inode(inode)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
#define VMW_FSYNC_OLD
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
typedef umode_t compat_umode_t;
#else
typedef int compat_umode_t;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
#define d_make_root(inode) ({ \
struct dentry * ____res = d_alloc_root(inode); \
if (!____res) { \
iput(inode); \
} \
____res; \
})
#endif
#endif /* __COMPAT_FS_H__ */
vmci-only/shared/compat_ethtool.h 0000444 0000000 0000000 00000003662 12522066073 016141 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef _COMPAT_ETHTOOL_H
#define _COMPAT_ETHTOOL_H
/*
* ethtool is a userspace utility for getting and setting ethernet device
* settings. Kernel support for it was first published in 2.4.0-test11, but
* only in 2.4.15 were the ethtool_value struct and the ETHTOOL_GLINK ioctl
* added to ethtool.h (together, because the ETHTOOL_GLINK ioctl expects a
* single value response).
*
* Likewise, ioctls for getting and setting TSO were published in 2.4.22.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
# include
# ifndef ETHTOOL_GLINK
# define ETHTOOL_GLINK 0x0a
typedef struct {
__u32 cmd;
__u32 data;
} compat_ethtool_value;
# else
typedef struct ethtool_value compat_ethtool_value;
# endif
# ifndef ETHTOOL_GTSO
# define ETHTOOL_GTSO 0x1E
# define ETHTOOL_STSO 0x1F
# endif
#endif
#if COMPAT_LINUX_VERSION_CHECK_LT(3, 3, 0)
# define compat_ethtool_rxfh_indir_default(i, num_queues) (i % num_queues)
#else
# define compat_ethtool_rxfh_indir_default(i, num_queues) ethtool_rxfh_indir_default(i, num_queues)
#endif
#endif /* _COMPAT_ETHTOOL_H */
vmci-only/shared/vmciKernelAPI1.h 0000444 0000000 0000000 00000017514 12522066074 015634 0 ustar root root /*********************************************************
* Copyright (C) 2010 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmciKernelAPI1.h --
*
* Kernel API (v1) exported from the VMCI host and guest drivers.
*/
#ifndef __VMCI_KERNELAPI_1_H__
#define __VMCI_KERNELAPI_1_H__
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
#include "vmci_defs.h"
#include "vmci_call_defs.h"
/* VMCI module namespace on vmkernel. */
#define MOD_VMCI_NAMESPACE "com.vmware.vmci"
/* Define version 1. */
#undef VMCI_KERNEL_API_VERSION
#define VMCI_KERNEL_API_VERSION_1 1
#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_1
/* Macros to operate on the driver version number. */
#define VMCI_MAJOR_VERSION(v) (((v) >> 16) & 0xffff)
#define VMCI_MINOR_VERSION(v) ((v) & 0xffff)
#if defined(_WIN32)
/* Path to callback object in object manager, for Windows only. */
#define VMCI_CALLBACK_OBJECT_PATH L"\\Callback\\VMCIDetachCB"
#endif // _WIN32
/* VMCI Device Usage API. */
#if defined(linux) && !defined(VMKERNEL)
#define vmci_device_get(_a, _b, _c, _d) 1
#define vmci_device_release(_x)
#else // !linux
typedef void (VMCI_DeviceShutdownFn)(void *deviceRegistration,
void *userData);
Bool vmci_device_get(uint32 *apiVersion,
VMCI_DeviceShutdownFn *deviceShutdownCB,
void *userData, void **deviceRegistration);
void vmci_device_release(void *deviceRegistration);
#endif // !linux
#if defined(_WIN32)
/* Called when the client is unloading, for Windows only. */
void vmci_exit(void);
#endif // _WIN32
/* VMCI Datagram API. */
int vmci_datagram_create_handle(uint32 resourceId, uint32 flags,
VMCIDatagramRecvCB recvCB, void *clientData,
VMCIHandle *outHandle);
int vmci_datagram_create_handle_priv(uint32 resourceID, uint32 flags,
VMCIPrivilegeFlags privFlags,
VMCIDatagramRecvCB recvCB,
void *clientData, VMCIHandle *outHandle);
int vmci_datagram_destroy_handle(VMCIHandle handle);
int vmci_datagram_send(VMCIDatagram *msg);
/* VMCI Utility API. */
VMCIId vmci_get_context_id(void);
#if defined(linux) && !defined(VMKERNEL)
/* Returned value is a bool, 0 for false, 1 for true. */
int vmci_is_context_owner(VMCIId contextID, uid_t uid);
#else // !linux || VMKERNEL
/* Returned value is a VMCI error code. */
int vmci_is_context_owner(VMCIId contextID, void *hostUser);
#endif // !linux || VMKERNEL
uint32 vmci_version(void);
int vmci_cid_2_host_vm_id(VMCIId contextID, void *hostVmID,
size_t hostVmIDLen);
/* VMCI Event API. */
typedef void (*VMCI_EventCB)(VMCIId subID, VMCI_EventData *ed,
void *clientData);
int vmci_event_subscribe(VMCI_Event event,
#if !defined(linux) || defined(VMKERNEL)
uint32 flags,
#endif // !linux || VMKERNEL
VMCI_EventCB callback,
void *callbackData, VMCIId *subID);
int vmci_event_unsubscribe(VMCIId subID);
/* VMCI Context API */
VMCIPrivilegeFlags vmci_context_get_priv_flags(VMCIId contextID);
/* VMCI Queue Pair API. */
typedef struct VMCIQPair VMCIQPair;
int vmci_qpair_alloc(VMCIQPair **qpair, VMCIHandle *handle,
uint64 produceQSize, uint64 consumeQSize, VMCIId peer,
uint32 flags, VMCIPrivilegeFlags privFlags);
int vmci_qpair_detach(VMCIQPair **qpair);
int vmci_qpair_get_produce_indexes(const VMCIQPair *qpair,
uint64 *producerTail, uint64 *consumerHead);
int vmci_qpair_get_consume_indexes(const VMCIQPair *qpair,
uint64 *consumerTail, uint64 *producerHead);
int64 vmci_qpair_produce_free_space(const VMCIQPair *qpair);
int64 vmci_qpair_produce_buf_ready(const VMCIQPair *qpair);
int64 vmci_qpair_consume_free_space(const VMCIQPair *qpair);
int64 vmci_qpair_consume_buf_ready(const VMCIQPair *qpair);
ssize_t vmci_qpair_enqueue(VMCIQPair *qpair, const void *buf, size_t bufSize,
int mode);
ssize_t vmci_qpair_dequeue(VMCIQPair *qpair, void *buf, size_t bufSize,
int mode);
ssize_t vmci_qpair_peek(VMCIQPair *qpair, void *buf, size_t bufSize, int mode);
#if defined (SOLARIS) || (defined(__APPLE__) && !defined (VMX86_TOOLS)) || \
(defined(__linux__) && defined(__KERNEL__)) || \
(defined(_WIN32) && defined(WINNT_DDK))
/*
* Environments that support struct iovec
*/
ssize_t vmci_qpair_enquev(VMCIQPair *qpair, void *iov, size_t iovSize,
int mode);
ssize_t vmci_qpair_dequev(VMCIQPair *qpair, void *iov, size_t iovSize,
int mode);
ssize_t vmci_qpair_peekv(VMCIQPair *qpair, void *iov, size_t iovSize,
int mode);
#endif /* Systems that support struct iovec */
/* Typedefs for all of the above, used by the IOCTLs and the kernel library. */
typedef void (VMCI_DeviceReleaseFct)(void *);
typedef int (VMCIDatagram_CreateHndFct)(VMCIId, uint32, VMCIDatagramRecvCB,
void *, VMCIHandle *);
typedef int (VMCIDatagram_CreateHndPrivFct)(VMCIId, uint32, VMCIPrivilegeFlags,
VMCIDatagramRecvCB, void *,
VMCIHandle *);
typedef int (VMCIDatagram_DestroyHndFct)(VMCIHandle);
typedef int (VMCIDatagram_SendFct)(VMCIDatagram *);
typedef VMCIId (VMCI_GetContextIDFct)(void);
typedef uint32 (VMCI_VersionFct)(void);
typedef int (VMCI_ContextID2HostVmIDFct)(VMCIId, void *, size_t);
typedef int (VMCI_IsContextOwnerFct)(VMCIId, void *);
typedef int (VMCIEvent_SubscribeFct)(VMCI_Event, uint32, VMCI_EventCB, void *,
VMCIId *);
typedef int (VMCIEvent_UnsubscribeFct)(VMCIId);
typedef VMCIPrivilegeFlags (VMCIContext_GetPrivFlagsFct)(VMCIId);
typedef int (VMCIQPair_AllocFct)(VMCIQPair **, VMCIHandle *, uint64, uint64,
VMCIId, uint32, VMCIPrivilegeFlags);
typedef int (VMCIQPair_DetachFct)(VMCIQPair **);
typedef int (VMCIQPair_GetProduceIndexesFct)(const VMCIQPair *, uint64 *,
uint64 *);
typedef int (VMCIQPair_GetConsumeIndexesFct)(const VMCIQPair *, uint64 *,
uint64 *);
typedef int64 (VMCIQPair_ProduceFreeSpaceFct)(const VMCIQPair *);
typedef int64 (VMCIQPair_ProduceBufReadyFct)(const VMCIQPair *);
typedef int64 (VMCIQPair_ConsumeFreeSpaceFct)(const VMCIQPair *);
typedef int64 (VMCIQPair_ConsumeBufReadyFct)(const VMCIQPair *);
typedef ssize_t (VMCIQPair_EnqueueFct)(VMCIQPair *, const void *, size_t, int);
typedef ssize_t (VMCIQPair_DequeueFct)(VMCIQPair *, void *, size_t, int);
typedef ssize_t (VMCIQPair_PeekFct)(VMCIQPair *, void *, size_t, int);
typedef ssize_t (VMCIQPair_EnqueueVFct)(VMCIQPair *qpair, void *, size_t, int);
typedef ssize_t (VMCIQPair_DequeueVFct)(VMCIQPair *qpair, void *, size_t, int);
typedef ssize_t (VMCIQPair_PeekVFct)(VMCIQPair *qpair, void *, size_t, int);
#endif /* !__VMCI_KERNELAPI_1_H__ */
vmci-only/shared/compat_page-flags.h 0000444 0000000 0000000 00000005037 12522066073 016467 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_PAGE_FLAGS_H__
# define __COMPAT_PAGE_FLAGS_H__
/* No page-flags.h prior to 2.5.12. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
# include
#endif
/*
* The pgoff_t type was introduced in 2.5.20, but we'll look for it by
* definition since it's more convenient. Note that we want to avoid a
* situation where, in the future, a #define is changed to a typedef,
* so if pgoff_t is not defined in some future kernel, we won't define it.
*/
#if !defined(pgoff_t) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
#define pgoff_t unsigned long
#endif
/*
* set_page_writeback() was introduced in 2.6.6. Prior to that, callers were
* using the SetPageWriteback() macro directly, so that's what we'll use.
* Prior to 2.5.12, the writeback bit didn't exist, so we don't need to do
* anything.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12)
#define compat_set_page_writeback(page)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6)
#define compat_set_page_writeback(page) SetPageWriteback(page)
#else
#define compat_set_page_writeback(page) set_page_writeback(page)
#endif
/*
* end_page_writeback() was introduced in 2.5.12. Prior to that, it looks like
* there was no page writeback bit, and everything the function accomplished
* was done by unlock_page(), so we'll define it out.
*
* Note that we could just #define end_page_writeback to nothing and avoid
* needing the compat_ prefix, but this is more complete with respect to
* compat_set_page_writeback.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12)
#define compat_end_page_writeback(page)
#else
#define compat_end_page_writeback(page) end_page_writeback(page)
#endif
#endif /* __COMPAT_PAGE_FLAGS_H__ */
vmci-only/shared/pgtbl.h 0000444 0000000 0000000 00000022326 12522066073 014226 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __PGTBL_H__
# define __PGTBL_H__
#include
#include "compat_pgtable.h"
#include "compat_spinlock.h"
#include "compat_page.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 11)
# define compat_active_mm mm
#else
# define compat_active_mm active_mm
#endif
/*
*-----------------------------------------------------------------------------
*
* PgtblPte2MPN --
*
* Returns the page structure associated to a Page Table Entry.
*
* This function is not allowed to schedule() because it can be called while
* holding a spinlock --hpreg
*
* Results:
* INVALID_MPN on failure
* mpn on success
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE MPN
PgtblPte2MPN(pte_t *pte) // IN
{
if (pte_present(*pte) == 0) {
return INVALID_MPN;
}
return pte_pfn(*pte);
}
/*
*-----------------------------------------------------------------------------
*
* PgtblPte2Page --
*
* Returns the page structure associated to a Page Table Entry.
*
* This function is not allowed to schedule() because it can be called while
* holding a spinlock --hpreg
*
* Results:
* The page structure if the page table entry points to a physical page
* NULL if the page table entry does not point to a physical page
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE struct page *
PgtblPte2Page(pte_t *pte) // IN
{
if (pte_present(*pte) == 0) {
return NULL;
}
return compat_pte_page(*pte);
}
/*
*-----------------------------------------------------------------------------
*
* PgtblPGD2PTELocked --
*
* Walks through the hardware page tables to try to find the pte
* associated to a virtual address.
*
* Results:
* pte. Caller must call pte_unmap if valid pte returned.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE pte_t *
PgtblPGD2PTELocked(compat_pgd_t *pgd, // IN: PGD to start with
VA addr) // IN: Address in the virtual address
// space of that process
{
compat_pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (compat_pgd_present(*pgd) == 0) {
return NULL;
}
pud = compat_pud_offset(pgd, addr);
if (compat_pud_present(*pud) == 0) {
return NULL;
}
pmd = pmd_offset_map(pud, addr);
if (pmd_present(*pmd) == 0) {
pmd_unmap(pmd);
return NULL;
}
pte = pte_offset_map(pmd, addr);
pmd_unmap(pmd);
return pte;
}
/*
*-----------------------------------------------------------------------------
*
* PgtblVa2PTELocked --
*
* Walks through the hardware page tables to try to find the pte
* associated to a virtual address.
*
* Results:
* pte. Caller must call pte_unmap if valid pte returned.
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE pte_t *
PgtblVa2PTELocked(struct mm_struct *mm, // IN: Mm structure of a process
VA addr) // IN: Address in the virtual address
// space of that process
{
return PgtblPGD2PTELocked(compat_pgd_offset(mm, addr), addr);
}
/*
*-----------------------------------------------------------------------------
*
* PgtblVa2MPNLocked --
*
* Retrieve MPN for a given va.
*
* Caller must call pte_unmap if valid pte returned. The mm->page_table_lock
* must be held, so this function is not allowed to schedule() --hpreg
*
* Results:
* INVALID_MPN on failure
* mpn on success
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE MPN
PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
VA addr) // IN: Address in the virtual address
{
pte_t *pte;
pte = PgtblVa2PTELocked(mm, addr);
if (pte != NULL) {
MPN mpn = PgtblPte2MPN(pte);
pte_unmap(pte);
return mpn;
}
return INVALID_MPN;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
/*
*-----------------------------------------------------------------------------
*
* PgtblKVa2MPNLocked --
*
* Retrieve MPN for a given kernel va.
*
* Caller must call pte_unmap if valid pte returned. The mm->page_table_lock
* must be held, so this function is not allowed to schedule() --hpreg
*
* Results:
* INVALID_MPN on failure
* mpn on success
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE MPN
PgtblKVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a caller
VA addr) // IN: Address in the virtual address
{
pte_t *pte;
pte = PgtblPGD2PTELocked(compat_pgd_offset_k(mm, addr), addr);
if (pte != NULL) {
MPN mpn = PgtblPte2MPN(pte);
pte_unmap(pte);
return mpn;
}
return INVALID_MPN;
}
#endif
/*
*-----------------------------------------------------------------------------
*
* PgtblVa2PageLocked --
*
* Return the "page" struct for a given va.
*
* Results:
* struct page or NULL. The mm->page_table_lock must be held, so this
* function is not allowed to schedule() --hpreg
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE struct page *
PgtblVa2PageLocked(struct mm_struct *mm, // IN: Mm structure of a process
VA addr) // IN: Address in the virtual address
{
pte_t *pte;
pte = PgtblVa2PTELocked(mm, addr);
if (pte != NULL) {
struct page *page = PgtblPte2Page(pte);
pte_unmap(pte);
return page;
} else {
return NULL;
}
}
/*
*-----------------------------------------------------------------------------
*
* PgtblVa2MPN --
*
* Walks through the hardware page tables of the current process to try to
* find the page structure associated to a virtual address.
*
* Results:
* Same as PgtblVa2MPNLocked()
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE int
PgtblVa2MPN(VA addr) // IN
{
struct mm_struct *mm;
MPN mpn;
/* current->mm is NULL for kernel threads, so use active_mm. */
mm = current->compat_active_mm;
if (compat_get_page_table_lock(mm)) {
spin_lock(compat_get_page_table_lock(mm));
}
mpn = PgtblVa2MPNLocked(mm, addr);
if (compat_get_page_table_lock(mm)) {
spin_unlock(compat_get_page_table_lock(mm));
}
return mpn;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
/*
*-----------------------------------------------------------------------------
*
* PgtblKVa2MPN --
*
* Walks through the hardware page tables of the current process to try to
* find the page structure associated to a virtual address.
*
* Results:
* Same as PgtblVa2MPNLocked()
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE int
PgtblKVa2MPN(VA addr) // IN
{
struct mm_struct *mm;
MPN mpn;
mm = current->compat_active_mm;
if (compat_get_page_table_lock(mm)) {
spin_lock(compat_get_page_table_lock(mm));
}
mpn = PgtblKVa2MPNLocked(mm, addr);
if (compat_get_page_table_lock(mm)) {
spin_unlock(compat_get_page_table_lock(mm));
}
return mpn;
}
#endif
/*
*-----------------------------------------------------------------------------
*
* PgtblVa2Page --
*
* Walks through the hardware page tables of the current process to try to
* find the page structure associated to a virtual address.
*
* Results:
* Same as PgtblVa2PageLocked()
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/
static INLINE struct page *
PgtblVa2Page(VA addr) // IN
{
struct mm_struct *mm;
struct page *page;
mm = current->compat_active_mm;
if (compat_get_page_table_lock(mm)) {
spin_lock(compat_get_page_table_lock(mm));
}
page = PgtblVa2PageLocked(mm, addr);
if (compat_get_page_table_lock(mm)) {
spin_unlock(compat_get_page_table_lock(mm));
}
return page;
}
#endif /* __PGTBL_H__ */
vmci-only/shared/compat_string.h 0000444 0000000 0000000 00000003563 12522066073 015771 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_STRING_H__
# define __COMPAT_STRING_H__
#include
/*
* kstrdup was born in 2.6.13. This implementation is almost identical to the
* one found there.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
#define compat_kstrdup(s, gfp) kstrdup(s, gfp)
#else
#define compat_kstrdup(s, gfp) \
({ \
size_t len; \
char *buf; \
len = strlen(s) + 1; \
buf = kmalloc(len, gfp); \
memcpy(buf, s, len); \
buf; \
})
#endif
#endif /* __COMPAT_STRING_H__ */
vmci-only/shared/compat_module.h 0000444 0000000 0000000 00000004372 12522066073 015747 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* compat_module.h --
*/
#ifndef __COMPAT_MODULE_H__
# define __COMPAT_MODULE_H__
#include
/*
* Modules wishing to use the GPL license are required to include a
* MODULE_LICENSE definition in their module source as of 2.4.10.
*/
#ifndef MODULE_LICENSE
#define MODULE_LICENSE(license)
#endif
/*
* To make use of our own home-brewed MODULE_INFO, we need macros to
* concatenate two expressions to "__mod_", and and to convert an
* expression into a string. I'm sure we've got these in our codebase,
* but I'd rather not introduce such a dependency in a compat header.
*/
#ifndef __module_cat
#define __module_cat_1(a, b) __mod_ ## a ## b
#define __module_cat(a, b) __module_cat_1(a, b)
#endif
#ifndef __stringify
#define __stringify_1(x) #x
#define __stringify(x) __stringify_1(x)
#endif
/*
* MODULE_INFO was born in 2.5.69.
*/
#ifndef MODULE_INFO
#define MODULE_INFO(tag, info) \
static const char __module_cat(tag, __LINE__)[] \
__attribute__((section(".modinfo"), unused)) = __stringify(tag) "=" info
#endif
/*
* MODULE_VERSION was born in 2.6.4. The earlier form appends a long "\0xxx"
* string to the module's version, but that was removed in 2.6.10, so we'll
* ignore it in our wrapper.
*/
#ifndef MODULE_VERSION
#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
#endif
#endif /* __COMPAT_MODULE_H__ */
vmci-only/shared/compat_dcache.h 0000444 0000000 0000000 00000004010 12522066073 015656 0 ustar root root /*********************************************************
* Copyright (C) 2006-2013 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_DCACHE_H__
# define __COMPAT_DCACHE_H__
#include
/*
* per-dentry locking was born in 2.5.62.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 62)
#define compat_lock_dentry(dentry) spin_lock(&dentry->d_lock)
#define compat_unlock_dentry(dentry) spin_unlock(&dentry->d_lock)
#else
#define compat_lock_dentry(dentry) do {} while (0)
#define compat_unlock_dentry(dentry) do {} while (0)
#endif
/*
* d_alloc_name was born in 2.6.10.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
#define compat_d_alloc_name(parent, s) d_alloc_name(parent, s)
#else
#define compat_d_alloc_name(parent, s) \
({ \
struct qstr q; \
q.name = s; \
q.len = strlen(s); \
q.hash = full_name_hash(q.name, q.len); \
d_alloc(parent, &q); \
})
#endif
#endif /* __COMPAT_DCACHE_H__ */
vmci-only/shared/vm_basic_math.h 0000444 0000000 0000000 00000004176 12522066074 015716 0 ustar root root /*********************************************************
* Copyright (C) 2008 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_basic_math.h --
*
* Standard mathematical macros for VMware source code.
*/
#ifndef _VM_BASIC_MATH_H_
#define _VM_BASIC_MATH_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
#include "vm_basic_types.h" // For INLINE.
#include "vm_basic_asm.h" // For Div64...
static INLINE uint32
RatioOf(uint32 numer1, uint32 numer2, uint32 denom)
{
uint64 numer = (uint64)numer1 * numer2;
/* Calculate "(numer1 * numer2) / denom" avoiding round-off errors. */
#if defined(VMM) || !(defined(__i386__) || defined(__x86_64__))
return numer / denom;
#else
uint32 ratio;
uint32 unused;
Div643232(numer, denom, &ratio, &unused);
return ratio;
#endif
}
static INLINE uint32
ExponentialAvg(uint32 avg, uint32 value, uint32 gainNumer, uint32 gainDenom)
{
uint32 term1 = gainNumer * avg;
uint32 term2 = (gainDenom - gainNumer) * value;
return (term1 + term2) / gainDenom;
}
static INLINE Bool
IsPowerOfTwo(uint32 x)
{
/* Does not check for zero. Callers depend on this. */
return !(x & (x - 1));
}
#endif // ifndef _VM_BASIC_MATH_H_
vmci-only/shared/vmware_pack_begin.h 0000444 0000000 0000000 00000002444 12522066074 016561 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmware_pack_begin.h --
*
* Begin of structure packing. See vmware_pack_init.h for details.
*
* Note that we do not use the following construct in this include file,
* because we want to emit the code every time the file is included --hpreg
*
* #ifndef foo
* # define foo
* ...
* #endif
*
*/
#include "vmware_pack_init.h"
#ifdef _MSC_VER
# pragma pack(push, 1)
#elif __GNUC__
#else
# error Compiler packing...
#endif
vmci-only/shared/vm_basic_defs.h 0000444 0000000 0000000 00000040372 12522066074 015704 0 ustar root root /*********************************************************
* Copyright (C) 2003-2010 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_basic_defs.h --
*
* Standard macros for VMware source code.
*/
#ifndef _VM_BASIC_DEFS_H_
#define _VM_BASIC_DEFS_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMKDRIVERS
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
#include "vm_basic_types.h" // For INLINE.
/* Checks for FreeBSD, filtering out VMKERNEL. */
#define __IS_FREEBSD__ (!defined(VMKERNEL) && defined(__FreeBSD__))
#define __IS_FREEBSD_VER__(ver) (__IS_FREEBSD__ && __FreeBSD_version >= (ver))
#if defined _WIN32 && defined USERLEVEL
#include /*
* We redefine offsetof macro from stddef; make
* sure that it's already defined before we do that.
*/
#include // for Sleep() and LOWORD() etc.
#undef GetFreeSpace // Unpollute preprocessor namespace.
#endif
/*
* Simple macros
*/
#if (defined __APPLE__ || defined __FreeBSD__) && \
(!defined KERNEL && !defined _KERNEL && !defined VMKERNEL && !defined __KERNEL__)
# include
#else
// XXX the _WIN32 one matches that of VC++, to prevent redefinition warning
// XXX the other one matches that of gcc3.3.3/glibc2.2.4 to prevent redefinition warnings
#ifndef offsetof
#ifdef _WIN32
#define offsetof(s,m) (size_t)&(((s *)0)->m)
#else
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
#endif
#endif // __APPLE__
#define VMW_CONTAINER_OF(ptr, type, member) \
((type *)((char *)(ptr) - offsetof(type, member)))
#ifndef ARRAYSIZE
#define ARRAYSIZE(a) (sizeof (a) / sizeof *(a))
#endif
#ifndef MIN
#define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
#endif
/* The Solaris 9 cross-compiler complains about these not being used */
#ifndef sun
static INLINE int
Min(int a, int b)
{
return a < b ? a : b;
}
#endif
#ifndef MAX
#define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
#endif
#ifndef sun
static INLINE int
Max(int a, int b)
{
return a > b ? a : b;
}
#endif
#define VMW_CLAMP(x, min, max) \
((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
#define ROUNDUP(x,y) (((x) + (y) - 1) / (y) * (y))
#define ROUNDDOWN(x,y) ((x) / (y) * (y))
#define ROUNDUPBITS(x, bits) (((uintptr_t) (x) + MASK(bits)) & ~MASK(bits))
#define ROUNDDOWNBITS(x, bits) ((uintptr_t) (x) & ~MASK(bits))
#define CEILING(x, y) (((x) + (y) - 1) / (y))
#if defined __APPLE__
#include
#undef MASK
#endif
/*
* The MASK macro behaves badly when given negative numbers or numbers larger
* than the highest order bit number (e.g. 32 on a 32-bit machine) as an
* argument. The range 0..31 is safe.
*/
#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
#define MASK64(n) ((CONST64U(1) << (n)) - 1) /* make an n-bit mask */
/*
* MASKRANGE64 makes a bit vector starting at bit lo and ending at bit hi. No
* checking for lo < hi is done.
*/
#define MASKRANGE64(hi, lo) (MASK64((hi) - (lo) + 1) << (lo))
/* SIGNEXT64 sign extends a n-bit value to 64-bits. */
#define SIGNEXT64(val, n) (((int64)(val) << (64 - (n))) >> (64 - (n)))
#define DWORD_ALIGN(x) ((((x) + 3) >> 2) << 2)
#define QWORD_ALIGN(x) ((((x) + 7) >> 3) << 3)
#define IMPLIES(a,b) (!(a) || (b))
/*
* Not everybody (e.g., the monitor) has NULL
*/
#ifndef NULL
#ifdef __cplusplus
#define NULL 0
#else
#define NULL ((void *)0)
#endif
#endif
/*
* Token concatenation
*
* The C preprocessor doesn't prescan arguments when they are
* concatenated or stringified. So we need extra levels of
* indirection to convince the preprocessor to expand its
* arguments.
*/
#define CONC(x, y) x##y
#define XCONC(x, y) CONC(x, y)
#define XXCONC(x, y) XCONC(x, y)
#define MAKESTR(x) #x
#define XSTR(x) MAKESTR(x)
/*
* Wide versions of string constants.
*/
#ifndef WSTR
#define WSTR_(X) L ## X
#define WSTR(X) WSTR_(X)
#endif
/*
* Page operations
*
* It has been suggested that these definitions belong elsewhere
* (like x86types.h). However, I deem them common enough
* (since even regular user-level programs may want to do
* page-based memory manipulation) to be here.
* -- edward
*/
#ifndef PAGE_SHIFT // {
#if defined VM_I386
#define PAGE_SHIFT 12
#elif defined __APPLE__
#define PAGE_SHIFT 12
#elif defined __arm__
#define PAGE_SHIFT 12
#else
#error
#endif
#endif // }
#ifndef PAGE_SIZE
#define PAGE_SIZE (1<> PAGE_SHIFT)
#endif
#ifndef BYTES_2_PAGES
#define BYTES_2_PAGES(_nbytes) ((_nbytes) >> PAGE_SHIFT)
#endif
#ifndef PAGES_2_BYTES
#define PAGES_2_BYTES(_npages) (((uint64)(_npages)) << PAGE_SHIFT)
#endif
#ifndef MBYTES_2_PAGES
#define MBYTES_2_PAGES(_nbytes) ((_nbytes) << (20 - PAGE_SHIFT))
#endif
#ifndef PAGES_2_MBYTES
#define PAGES_2_MBYTES(_npages) ((_npages) >> (20 - PAGE_SHIFT))
#endif
#ifndef GBYTES_2_PAGES
#define GBYTES_2_PAGES(_nbytes) ((_nbytes) << (30 - PAGE_SHIFT))
#endif
#ifndef PAGES_2_GBYTES
#define PAGES_2_GBYTES(_npages) ((_npages) >> (30 - PAGE_SHIFT))
#endif
#ifndef BYTES_2_MBYTES
#define BYTES_2_MBYTES(_nbytes) ((_nbytes) >> 20)
#endif
#ifndef MBYTES_2_BYTES
#define MBYTES_2_BYTES(_nbytes) ((uint64)(_nbytes) << 20)
#endif
#ifndef VM_PAE_LARGE_PAGE_SHIFT
#define VM_PAE_LARGE_PAGE_SHIFT 21
#endif
#ifndef VM_PAE_LARGE_PAGE_SIZE
#define VM_PAE_LARGE_PAGE_SIZE (1 << VM_PAE_LARGE_PAGE_SHIFT)
#endif
#ifndef VM_PAE_LARGE_PAGE_MASK
#define VM_PAE_LARGE_PAGE_MASK (VM_PAE_LARGE_PAGE_SIZE - 1)
#endif
#ifndef VM_PAE_LARGE_2_SMALL_PAGES
#define VM_PAE_LARGE_2_SMALL_PAGES (BYTES_2_PAGES(VM_PAE_LARGE_PAGE_SIZE))
#endif
#ifndef NR_MPNS_PER_PAGE
#define NR_MPNS_PER_PAGE (PAGE_SIZE / sizeof(MPN))
#endif
/*
* Word operations
*/
#ifndef LOWORD
#define LOWORD(_dw) ((_dw) & 0xffff)
#endif
#ifndef HIWORD
#define HIWORD(_dw) (((_dw) >> 16) & 0xffff)
#endif
#ifndef LOBYTE
#define LOBYTE(_w) ((_w) & 0xff)
#endif
#ifndef HIBYTE
#define HIBYTE(_w) (((_w) >> 8) & 0xff)
#endif
#define HIDWORD(_qw) ((uint32)((_qw) >> 32))
#define LODWORD(_qw) ((uint32)(_qw))
#define QWORD(_hi, _lo) ((((uint64)(_hi)) << 32) | ((uint32)(_lo)))
/*
* Deposit a field _src at _pos bits from the right,
* with a length of _len, into the integer _target.
*/
#define DEPOSIT_BITS(_src,_pos,_len,_target) { \
unsigned mask = ((1 << _len) - 1); \
unsigned shiftedmask = ((1 << _len) - 1) << _pos; \
_target = (_target & ~shiftedmask) | ((_src & mask) << _pos); \
}
/*
* Get return address.
*/
#ifdef _MSC_VER
#ifdef __cplusplus
extern "C"
#endif
void *_ReturnAddress(void);
#pragma intrinsic(_ReturnAddress)
#define GetReturnAddress() _ReturnAddress()
#elif __GNUC__
#define GetReturnAddress() __builtin_return_address(0)
#endif
#ifdef __GNUC__
#ifndef sun
static INLINE_SINGLE_CALLER uintptr_t
GetFrameAddr(void)
{
uintptr_t bp;
#if !(__GNUC__ == 4 && (__GNUC_MINOR__ == 0 || __GNUC_MINOR__ == 1))
bp = (uintptr_t)__builtin_frame_address(0);
#else
/*
* We use this assembly hack due to a bug discovered in gcc 4.1.1.
* The bug was fixed in 4.2.0; assume it originated with 4.0.
* PR147638, PR554369.
*/
__asm__ __volatile__(
# if defined(VM_X86_64)
"movq %%rbp, %0\n"
# else
"movl %%ebp, %0\n"
# endif
: "=g" (bp));
#endif
return bp;
}
/*
* Returns the frame pointer of the calling function.
* Equivalent to __builtin_frame_address(1).
*/
static INLINE_SINGLE_CALLER uintptr_t
GetCallerFrameAddr(void)
{
return *(uintptr_t*)GetFrameAddr();
}
#endif // sun
#endif // __GNUC__
/*
* Data prefetch was added in gcc 3.1.1
* http://www.gnu.org/software/gcc/gcc-3.1/changes.html
*/
#ifdef __GNUC__
# if ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ > 1) || \
(__GNUC__ == 3 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 1))
# define PREFETCH_R(var) __builtin_prefetch((var), 0 /* read */, \
3 /* high temporal locality */)
# define PREFETCH_W(var) __builtin_prefetch((var), 1 /* write */, \
3 /* high temporal locality */)
# else
# define PREFETCH_R(var) ((void)(var))
# define PREFETCH_W(var) ((void)(var))
# endif
#endif /* __GNUC__ */
#ifdef USERLEVEL // {
/*
* Note this might be a problem on NT b/c while sched_yield guarantees it
* moves you to the end of your priority list, Sleep(0) offers no such
* guarantee. Bummer. --Jeremy.
*/
#if defined(N_PLAT_NLM)
/* We do not have YIELD() as we do not need it yet... */
#elif defined(_WIN32)
# define YIELD() Sleep(0)
#elif defined(VMKERNEL)
/* We don't have a YIELD macro in the vmkernel */
#else
# include // For sched_yield. Don't ask. --Jeremy.
# define YIELD() sched_yield()
#endif
/*
* Standardize some Posix names on Windows.
*/
#ifdef _WIN32 // {
#define snprintf _snprintf
#define strtok_r strtok_s
#if (_MSC_VER < 1500)
#define vsnprintf _vsnprintf
#endif
typedef int uid_t;
typedef int gid_t;
static INLINE void
sleep(unsigned int sec)
{
Sleep(sec * 1000);
}
static INLINE int
usleep(unsigned long usec)
{
Sleep(CEILING(usec, 1000));
return 0;
}
typedef int pid_t;
#define F_OK 0
#define X_OK 1
#define W_OK 2
#define R_OK 4
#endif // }
/*
* Macro for username comparison.
*/
#ifdef _WIN32 // {
#define USERCMP(x,y) Str_Strcasecmp(x,y)
#else
#define USERCMP(x,y) strcmp(x,y)
#endif // }
#endif // }
#ifndef va_copy
#ifdef _WIN32
/*
* Windows needs va_copy. This works for both 32 and 64-bit Windows
* based on inspection of how varags.h from the Visual C CRTL is
* implemented. (Future versions of the RTL may break this).
*/
#define va_copy(dest, src) ((dest) = (src))
#elif defined(__APPLE__) && defined(KERNEL)
#include "availabilityMacOS.h"
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
// The Mac OS 10.5 kernel SDK defines va_copy in stdarg.h.
#include
#else
/*
* The Mac OS 10.4 kernel SDK needs va_copy. Based on inspection of
* stdarg.h from the MacOSX10.4u.sdk kernel framework, this should
* work.
*/
#define va_copy(dest, src) ((dest) = (src))
#endif // MAC_OS_X_VERSION_MIN_REQUIRED
#elif defined(__GNUC__) && (__GNUC__ < 3)
/*
* Old versions of gcc recognize __va_copy, but not va_copy.
*/
#define va_copy(dest, src) __va_copy(dest, src)
#endif // _WIN32
#endif // va_copy
/*
* This one is outside USERLEVEL because it's used by
* files compiled into the Windows hgfs driver or the display
* driver.
*/
#ifdef _WIN32
#define PATH_MAX 256
#ifndef strcasecmp
#define strcasecmp(_s1,_s2) _stricmp((_s1),(_s2))
#endif
#ifndef strncasecmp
#define strncasecmp(_s1,_s2,_n) _strnicmp((_s1),(_s2),(_n))
#endif
#endif
#if defined __linux__ && !defined __KERNEL__ && !defined MODULE && \
!defined VMM && !defined FROBOS && !defined __ANDROID__
#include
#if __GLIBC_PREREQ(2, 1) && !defined GLIBC_VERSION_21
#define GLIBC_VERSION_21
#endif
#if __GLIBC_PREREQ(2, 2) && !defined GLIBC_VERSION_22
#define GLIBC_VERSION_22
#endif
#if __GLIBC_PREREQ(2, 3) && !defined GLIBC_VERSION_23
#define GLIBC_VERSION_23
#endif
#if __GLIBC_PREREQ(2, 4) && !defined GLIBC_VERSION_24
#define GLIBC_VERSION_24
#endif
#if __GLIBC_PREREQ(2, 5) && !defined GLIBC_VERSION_25
#define GLIBC_VERSION_25
#endif
#if __GLIBC_PREREQ(2, 12) && !defined GLIBC_VERSION_212
#define GLIBC_VERSION_212
#endif
#endif
/*
* Convenience macros and definitions. Can often be used instead of #ifdef.
*/
#undef DEBUG_ONLY
#ifdef VMX86_DEBUG
#define vmx86_debug 1
#define DEBUG_ONLY(x) x
#else
#define vmx86_debug 0
#define DEBUG_ONLY(x)
#endif
#ifdef VMX86_STATS
#define vmx86_stats 1
#define STATS_ONLY(x) x
#else
#define vmx86_stats 0
#define STATS_ONLY(x)
#endif
#ifdef VMX86_DEVEL
#define vmx86_devel 1
#define DEVEL_ONLY(x) x
#else
#define vmx86_devel 0
#define DEVEL_ONLY(x)
#endif
#ifdef VMX86_LOG
#define vmx86_log 1
#define LOG_ONLY(x) x
#else
#define vmx86_log 0
#define LOG_ONLY(x)
#endif
#ifdef VMX86_BETA
#define vmx86_beta 1
#define BETA_ONLY(x) x
#else
#define vmx86_beta 0
#define BETA_ONLY(x)
#endif
#ifdef VMX86_RELEASE
#define vmx86_release 1
#define RELEASE_ONLY(x) x
#else
#define vmx86_release 0
#define RELEASE_ONLY(x)
#endif
#ifdef VMX86_SERVER
#define vmx86_server 1
#define SERVER_ONLY(x) x
#define HOSTED_ONLY(x)
#else
#define vmx86_server 0
#define SERVER_ONLY(x)
#define HOSTED_ONLY(x) x
#endif
#ifdef VMKERNEL
#define vmkernel 1
#define VMKERNEL_ONLY(x) x
#else
#define vmkernel 0
#define VMKERNEL_ONLY(x)
#endif
#ifdef _WIN32
#define WIN32_ONLY(x) x
#define POSIX_ONLY(x)
#define vmx86_win32 1
#else
#define WIN32_ONLY(x)
#define POSIX_ONLY(x) x
#define vmx86_win32 0
#endif
#ifdef __linux__
#define vmx86_linux 1
#define LINUX_ONLY(x) x
#else
#define vmx86_linux 0
#define LINUX_ONLY(x)
#endif
#ifdef __APPLE__
#define vmx86_apple 1
#define APPLE_ONLY(x) x
#else
#define vmx86_apple 0
#define APPLE_ONLY(x)
#endif
#ifdef VMM
#define VMM_ONLY(x) x
#define USER_ONLY(x)
#else
#define VMM_ONLY(x)
#define USER_ONLY(x) x
#endif
/* VMVISOR ifdef only allowed in the vmkernel */
#ifdef VMKERNEL
#ifdef VMVISOR
#define vmvisor 1
#define VMVISOR_ONLY(x) x
#else
#define vmvisor 0
#define VMVISOR_ONLY(x)
#endif
#endif
#ifdef _WIN32
#define VMW_INVALID_HANDLE INVALID_HANDLE_VALUE
#else
#define VMW_INVALID_HANDLE (-1LL)
#endif
#ifdef _WIN32
#define fsync(fd) _commit(fd)
#define fileno(f) _fileno(f)
#else
#endif
/*
* Debug output macros for Windows drivers (the Eng variant is for
* display/printer drivers only.
*/
#ifdef _WIN32
#ifndef USES_OLD_WINDDK
#if defined(VMX86_LOG)
#ifdef _WIN64
#define WinDrvPrint(arg, ...) DbgPrintEx(DPFLTR_IHVDRIVER_ID, (ULONG)~0, arg, __VA_ARGS__)
#else
#define WinDrvPrint(arg, ...) DbgPrint(arg, __VA_ARGS__)
#endif
#define WinDrvEngPrint(arg, ...) EngDbgPrint(arg, __VA_ARGS__)
#else
#define WinDrvPrint(arg, ...)
#define WinDrvEngPrint(arg, ...)
#endif
#endif
#endif // _WIN32
#ifdef HOSTED_LG_PG
#define hosted_lg_pg 1
#else
#define hosted_lg_pg 0
#endif
/*
* Use to initialize cbSize for this structure to preserve < Vista
* compatibility.
*/
#define NONCLIENTMETRICSINFO_V1_SIZE CCSIZEOF_STRUCT(NONCLIENTMETRICS, \
lfMessageFont)
/* This is not intended to be thread-safe. */
#define DO_ONCE(code) \
do { \
static Bool _doOnceDone = FALSE; \
if (UNLIKELY(!_doOnceDone)) { \
_doOnceDone = TRUE; \
code; \
} \
} while (0)
/*
* Bug 827422 and 838523.
*/
#if defined __GNUC__ && __GNUC__ >= 4
#define VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
#else
#define VISIBILITY_HIDDEN /* nothing */
#endif
#endif // ifndef _VM_BASIC_DEFS_H_
vmci-only/shared/compat_log2.h 0000444 0000000 0000000 00000003672 12522066073 015327 0 ustar root root /*********************************************************
* Copyright (C) 2011 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_LOG2_H__
# define __COMPAT_LOG2_H__
#ifndef LINUX_VERSION_CODE
# error "Include compat_version.h before compat_log2.h"
#endif
/* linux/log2.h was introduced in 2.6.20. */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 19)
# include
#endif
/*
* is_power_of_2 was introduced in 2.6.21. This implementation is almost
* identical to the one found there.
*/
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)
#define compat_is_power_of_2(n) is_power_of_2(n)
#else
static inline __attribute__((const))
int compat_is_power_of_2(unsigned long n)
{
return (n != 0 && ((n && (n - 1)) == 0));
}
#endif
/*
* rounddown_power_of_two was introduced in 2.6.24. This implementation is
* similar to the one in log2.h but with input of int instead of long to
* avoid more version related checks for fls_long().
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
#define compat_rounddown_pow_of_two(n) rounddown_pow_of_two(n)
#else
static inline __attribute__((const))
unsigned int compat_rounddown_pow_of_two(unsigned int n)
{
return 1U << (fls(n) -1);
}
#endif
#endif /* __COMPAT_LOG2_H__ */
vmci-only/shared/compat_sock.h 0000444 0000000 0000000 00000006002 12522066073 015411 0 ustar root root /*********************************************************
* Copyright (C) 2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SOCK_H__
# define __COMPAT_SOCK_H__
#include /* for NULL */
#include
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
{
return sk->sk_sleep;
}
#endif
/*
* Prior to 2.6.24, there was no sock network namespace member. In 2.6.26, it
* was hidden behind accessor functions so that its behavior could vary
* depending on the value of CONFIG_NET_NS.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
# define compat_sock_net(sk) sock_net(sk)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
# define compat_sock_net(sk) sk->sk_net
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
#ifndef CONFIG_FILTER
# define sk_filter(sk, skb, needlock) 0
#endif
/* Taken from 2.6.16's sock.h and modified for macro. */
# define compat_sk_receive_skb(sk, skb, nested) \
({ \
int rc = NET_RX_SUCCESS; \
\
if (sk_filter(sk, skb, 0)) { \
kfree_skb(skb); \
} else { \
skb->dev = NULL; \
bh_lock_sock(sk); \
if (!sock_owned_by_user(sk)) { \
rc = (sk)->sk_backlog_rcv(sk, skb); \
} else { \
sk_add_backlog(sk, skb); \
} \
bh_unlock_sock(sk); \
} \
\
sock_put(sk); \
rc; \
})
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
# define compat_sk_receive_skb(sk, skb, nested) sk_receive_skb(sk, skb)
#else
# define compat_sk_receive_skb(sk, skb, nested) sk_receive_skb(sk, skb, nested)
#endif
#endif /* __COMPAT_SOCK_H__ */
vmci-only/shared/driverLog.c 0000444 0000000 0000000 00000011112 12522066073 015035 0 ustar root root /*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* driverLog.c --
*
* Common logging functions for Linux kernel modules.
*/
#include "driver-config.h"
#include "compat_kernel.h"
#include "compat_sched.h"
#include
#include "driverLog.h"
#define LINUXLOG_BUFFER_SIZE 1024
static const char *driverLogPrefix = "";
/*
* vsnprintf was born in 2.4.10. Fall back on vsprintf if we're
* an older kernel.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)
# define vsnprintf(str, size, fmt, args) vsprintf(str, fmt, args)
#endif
/*
*----------------------------------------------------------------------------
*
* DriverLog_Init --
*
* Initializes the Linux logging.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------------
*/
void
DriverLog_Init(const char *prefix) // IN
{
driverLogPrefix = prefix ? prefix : "";
}
/*
*----------------------------------------------------------------------
*
* DriverLogPrint --
*
* Log error message from a Linux module.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static void
DriverLogPrint(const char *level, // IN: KERN_* constant
const char *fmt, // IN: error format string
va_list args) // IN: arguments for format string
{
static char staticBuf[LINUXLOG_BUFFER_SIZE];
char stackBuf[128];
va_list args2;
const char *buf;
/*
* By default, use a small buffer on the stack (thread safe). If it is too
* small, fall back to a larger static buffer (not thread safe).
*/
va_copy(args2, args);
if (vsnprintf(stackBuf, sizeof stackBuf, fmt, args2) < sizeof stackBuf) {
buf = stackBuf;
} else {
vsnprintf(staticBuf, sizeof staticBuf, fmt, args);
buf = staticBuf;
}
va_end(args2);
printk("%s%s[%d]: %s", level, driverLogPrefix, current->pid, buf);
}
/*
*----------------------------------------------------------------------
*
* Warning --
*
* Warning messages from kernel module: logged into kernel log
* as warnings.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
void
Warning(const char *fmt, ...) // IN: warning format string
{
va_list args;
va_start(args, fmt);
DriverLogPrint(KERN_WARNING, fmt, args);
va_end(args);
}
/*
*----------------------------------------------------------------------
*
* Log --
*
* Log messages from kernel module: logged into kernel log
* as debug information.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
void
Log(const char *fmt, ...) // IN: log format string
{
va_list args;
/*
* Use the kernel log with at least a KERN_DEBUG level
* so it doesn't garbage the screen at (re)boot time on RedHat 6.0.
*/
va_start(args, fmt);
DriverLogPrint(KERN_DEBUG, fmt, args);
va_end(args);
}
/*
*----------------------------------------------------------------------
*
* Panic --
*
* ASSERTION failures and Panics from kernel module get here.
* Message is logged to the kernel log and on console.
*
* Results:
* None.
*
* Side effects:
* Never returns
*
*----------------------------------------------------------------------
*/
void
Panic(const char *fmt, ...) // IN: panic format string
{
va_list args;
va_start(args, fmt);
DriverLogPrint(KERN_EMERG, fmt, args);
va_end(args);
#ifdef BUG
BUG();
#else
/* Should die with %cs unwritable, or at least with page fault. */
asm volatile("movb $0, %cs:(0)");
#endif
while (1);
}
vmci-only/shared/vmware.h 0000444 0000000 0000000 00000003502 12522066074 014413 0 ustar root root /*********************************************************
* Copyright (C) 2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmware.h --
*
* Standard include file for VMware source code.
*/
#ifndef _VMWARE_H_
#define _VMWARE_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_DISTRIBUTE
#include "includeCheck.h"
#include "vm_basic_types.h"
#include "vm_basic_defs.h"
#include "vm_assert.h"
/*
* Global error codes. Currently used internally, but may be exported
* to customers one day, like VM_E_XXX in vmcontrol_constants.h
*/
typedef enum VMwareStatus {
VMWARE_STATUS_SUCCESS, /* success */
VMWARE_STATUS_ERROR, /* generic error */
VMWARE_STATUS_NOMEM, /* generic memory allocation error */
VMWARE_STATUS_INSUFFICIENT_RESOURCES, /* internal or system resource limit exceeded */
VMWARE_STATUS_INVALID_ARGS /* invalid arguments */
} VMwareStatus;
#define VMWARE_SUCCESS(s) ((s) == VMWARE_STATUS_SUCCESS)
#endif // ifndef _VMWARE_H_
vmci-only/shared/compat_pci.h 0000444 0000000 0000000 00000005152 12522066073 015232 0 ustar root root /*********************************************************
* Copyright (C) 1999 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* compat_pci.h: PCI compatibility wrappers.
*/
#ifndef __COMPAT_PCI_H__
#define __COMPAT_PCI_H__
#include "compat_ioport.h"
#include
#ifndef DMA_BIT_MASK
# define DMA_BIT_MASK(n) DMA_##n##BIT_MASK
#endif
/*
* Power Management related compat wrappers.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10)
# define compat_pci_save_state(pdev) pci_save_state((pdev), NULL)
# define compat_pci_restore_state(pdev) pci_restore_state((pdev), NULL)
#else
# define compat_pci_save_state(pdev) pci_save_state((pdev))
# define compat_pci_restore_state(pdev) pci_restore_state((pdev))
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
# define pm_message_t u32
# define compat_pci_choose_state(pdev, state) (state)
# define PCI_D0 0
# define PCI_D3hot 3
#else
# define compat_pci_choose_state(pdev, state) pci_choose_state((pdev), (state))
#endif
/* 2.6.14 changed the PCI shutdown callback */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
# define COMPAT_PCI_SHUTDOWN(func) .driver = { .shutdown = (func), }
# define COMPAT_PCI_DECLARE_SHUTDOWN(func, var) (func)(struct device *(var))
# define COMPAT_PCI_TO_DEV(dev) (to_pci_dev(dev))
#else
# define COMPAT_PCI_SHUTDOWN(func) .shutdown = (func)
# define COMPAT_PCI_DECLARE_SHUTDOWN(func, var) (func)(struct pci_dev *(var))
# define COMPAT_PCI_TO_DEV(dev) (dev)
#endif
/* 2.6.26 introduced the device_set_wakeup_enable() function */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
# define compat_device_set_wakeup_enable(dev, val) do {} while(0)
#else
# define compat_device_set_wakeup_enable(dev, val) \
device_set_wakeup_enable(dev, val)
#endif
#endif /* __COMPAT_PCI_H__ */
vmci-only/shared/compat_namei.h 0000444 0000000 0000000 00000003416 12522066073 015551 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_NAMEI_H__
# define __COMPAT_NAMEI_H__
#include
/*
* In 2.6.25-rc2, dentry and mount objects were removed from the nameidata
* struct. They were both replaced with a struct path.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
#define compat_vmw_nd_to_dentry(nd) (nd).path.dentry
#else
#define compat_vmw_nd_to_dentry(nd) (nd).dentry
#endif
/* In 2.6.25-rc2, path_release(&nd) was replaced with path_put(&nd.path). */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
#define compat_path_release(nd) path_put(&(nd)->path)
#else
#define compat_path_release(nd) path_release(nd)
#endif
/* path_lookup was removed in 2.6.39 merge window VFS merge */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
#define compat_path_lookup(name, flags, nd) kern_path(name, flags, &((nd)->path))
#else
#define compat_path_lookup(name, flags, nd) path_lookup(name, flags, nd)
#endif
#endif /* __COMPAT_NAMEI_H__ */
vmci-only/shared/compat_uaccess.h 0000444 0000000 0000000 00000006062 12522066073 016106 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_UACCESS_H__
# define __COMPAT_UACCESS_H__
/* User space access functions moved in 2.1.7 to asm/uaccess.h --hpreg */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 7)
# include
#else
# include
#endif
/* get_user() API modified in 2.1.4 to take 2 arguments --hpreg */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 4)
# define compat_get_user get_user
#else
/*
* We assign 0 to the variable in case of failure to prevent "`_var' might be
* used uninitialized in this function" compiler warnings. I think it is OK,
* because the hardware-based version in newer kernels probably has the same
* semantics and does not guarantee that the value of _var will not be
* modified, should the access fail --hpreg
*/
# define compat_get_user(_var, _uvAddr) ({ \
int _status; \
\
_status = verify_area(VERIFY_READ, _uvAddr, sizeof(*(_uvAddr))); \
if (_status == 0) { \
(_var) = get_user(_uvAddr); \
} else { \
(_var) = 0; \
} \
_status; \
})
#endif
/*
* The copy_from_user() API appeared in 2.1.4
*
* The emulation is not perfect here, but it is conservative: on failure, we
* always return the total size, instead of the potentially smaller faulty
* size --hpreg
*
* Since 2.5.55 copy_from_user() is no longer macro.
*/
#if !defined(copy_from_user) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0)
# define copy_from_user(_to, _from, _size) ( \
verify_area(VERIFY_READ, _from, _size) \
? (_size) \
: (memcpy_fromfs(_to, _from, _size), 0) \
)
# define copy_to_user(_to, _from, _size) ( \
verify_area(VERIFY_WRITE, _to, _size) \
? (_size) \
: (memcpy_tofs(_to, _from, _size), 0) \
)
#endif
#endif /* __COMPAT_UACCESS_H__ */
vmci-only/shared/compat_sched.h 0000444 0000000 0000000 00000024236 12522066073 015551 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SCHED_H__
# define __COMPAT_SCHED_H__
#include
/* CLONE_KERNEL available in 2.5.35 and higher. */
#ifndef CLONE_KERNEL
#define CLONE_KERNEL CLONE_FILES | CLONE_FS | CLONE_SIGHAND
#endif
/* TASK_COMM_LEN become available in 2.6.11. */
#ifndef TASK_COMM_LEN
#define TASK_COMM_LEN 16
#endif
/* The capable() API appeared in 2.1.92 --hpreg */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 1, 92)
# define capable(_capability) suser()
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0)
# define need_resched() need_resched
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 3)
# define need_resched() (current->need_resched)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 3)
# define cond_resched() (need_resched() ? schedule() : (void) 0)
#endif
/* Oh well. We need yield... Happy us! */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 20)
# ifdef __x86_64__
# define compat_yield() there_is_nothing_like_yield()
# else
# include
# include
/*
* Used by _syscallX macros. Note that this is global variable, so
* do not rely on its contents too much. As exit() is only function
* we use, and we never check return value from exit(), we have
* no problem...
*/
extern int errno;
/*
* compat_exit() provides an access to the exit() function. It must
* be named compat_exit(), as exit() (with different signature) is
* provided by x86-64, arm and other (but not by i386).
*/
# define __NR_compat_yield __NR_sched_yield
static inline _syscall0(int, compat_yield);
# endif
#else
# define compat_yield() yield()
#endif
/*
* Since 2.5.34 there are two methods to enumerate tasks:
* for_each_process(p) { ... } which enumerates only tasks and
* do_each_thread(g,t) { ... } while_each_thread(g,t) which enumerates
* also threads even if they share same pid.
*/
#ifndef for_each_process
# define for_each_process(p) for_each_task(p)
#endif
#ifndef do_each_thread
# define do_each_thread(g, t) for_each_task(g) { t = g; do
# define while_each_thread(g, t) while (0) }
#endif
/*
* Lock for signal mask is moving target...
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 40) && defined(CLONE_PID)
/* 2.4.x without NPTL patches or early 2.5.x */
#define compat_sigmask_lock sigmask_lock
#define compat_dequeue_signal_current(siginfo_ptr) \
dequeue_signal(¤t->blocked, (siginfo_ptr))
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 60) && !defined(INIT_SIGHAND)
/* RedHat's 2.4.x with first version of NPTL support, or 2.5.40 to 2.5.59 */
#define compat_sigmask_lock sig->siglock
#define compat_dequeue_signal_current(siginfo_ptr) \
dequeue_signal(¤t->blocked, (siginfo_ptr))
#else
/* RedHat's 2.4.x with second version of NPTL support, or 2.5.60+. */
#define compat_sigmask_lock sighand->siglock
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
#define compat_dequeue_signal_current(siginfo_ptr) \
dequeue_signal(¤t->blocked, (siginfo_ptr))
#else
#define compat_dequeue_signal_current(siginfo_ptr) \
dequeue_signal(current, ¤t->blocked, (siginfo_ptr))
#endif
#endif
/*
* recalc_sigpending() had task argument in the past
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 29) && defined(CLONE_PID)
/* 2.4.x without NPTL patches or early 2.5.x */
#define compat_recalc_sigpending() recalc_sigpending(current)
#else
/* RedHat's 2.4.x with NPTL support, or 2.5.29+ */
#define compat_recalc_sigpending() recalc_sigpending()
#endif
/*
* reparent_to_init() was introduced in 2.4.8. In 2.5.38 (or possibly
* earlier, but later than 2.5.31) a call to it was added into
* daemonize(), so compat_daemonize no longer needs to call it.
*
* In 2.4.x kernels reparent_to_init() forgets to do correct refcounting
* on current->user. It is better to count one too many than one too few...
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 38)
#define compat_reparent_to_init() do { \
reparent_to_init(); \
atomic_inc(¤t->user->__count); \
} while (0)
#else
#define compat_reparent_to_init() do {} while (0)
#endif
/*
* daemonize appeared in 2.2.18. Except 2.2.17-4-RH7.0, which has it too.
* Fortunately 2.2.17-4-RH7.0 uses versioned symbols, so we can check
* its existence with defined().
*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) && !defined(daemonize)
static inline void daemonize(void) {
struct fs_struct *fs;
exit_mm(current);
current->session = 1;
current->pgrp = 1;
exit_fs(current);
fs = init_task.fs;
current->fs = fs;
atomic_inc(&fs->count);
}
#endif
/*
* flush_signals acquires sighand->siglock since 2.5.61... Verify RH's kernels!
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61)
#define compat_flush_signals(task) do { \
spin_lock_irq(&task->compat_sigmask_lock); \
flush_signals(task); \
spin_unlock_irq(&task->compat_sigmask_lock); \
} while (0)
#else
#define compat_flush_signals(task) flush_signals(task)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61)
#define compat_allow_signal(signr) do { \
spin_lock_irq(¤t->compat_sigmask_lock); \
sigdelset(¤t->blocked, signr); \
compat_recalc_sigpending(); \
spin_unlock_irq(¤t->compat_sigmask_lock); \
} while (0)
#else
#define compat_allow_signal(signr) allow_signal(signr)
#endif
/*
* daemonize can set process name since 2.5.61. Prior to 2.5.61, daemonize
* didn't block signals on our behalf.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61)
#define compat_daemonize(x...) \
({ \
/* Beware! No snprintf here, so verify arguments! */ \
sprintf(current->comm, x); \
\
/* Block all signals. */ \
spin_lock_irq(¤t->compat_sigmask_lock); \
sigfillset(¤t->blocked); \
compat_recalc_sigpending(); \
spin_unlock_irq(¤t->compat_sigmask_lock); \
compat_flush_signals(current); \
\
daemonize(); \
compat_reparent_to_init(); \
})
#else
#define compat_daemonize(x...) daemonize(x)
#endif
/*
* try to freeze a process. For kernels 2.6.11 or newer, we know how to choose
* the interface. The problem is that the oldest interface, introduced in
* 2.5.18, was backported to 2.4.x kernels. So if we're older than 2.6.11,
* we'll decide what to do based on whether or not swsusp was configured
* for the kernel. For kernels 2.6.20 and newer, we'll also need to include
* freezer.h since the try_to_freeze definition was pulled out of sched.h.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
#include
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) || defined(VMW_TL10S64_WORKAROUND)
#define compat_try_to_freeze() try_to_freeze()
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
#define compat_try_to_freeze() try_to_freeze(PF_FREEZE)
#elif defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_SOFTWARE_SUSPEND2)
#include "compat_mm.h"
#include
#include
static inline int compat_try_to_freeze(void) {
if (current->flags & PF_FREEZE) {
refrigerator(PF_FREEZE);
return 1;
} else {
return 0;
}
}
#else
static inline int compat_try_to_freeze(void) { return 0; }
#endif
/*
* As of 2.6.23-rc1, kernel threads are no longer freezable by
* default. Instead, kernel threads that need to be frozen must opt-in
* by calling set_freezable() as soon as the thread is created.
*/
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22)
#define compat_set_freezable() do { set_freezable(); } while (0)
#else
#define compat_set_freezable() do {} while (0)
#endif
/*
* Around 2.6.27 kernel stopped sending signals to kernel
* threads being frozen, instead threads have to check
* freezing() or use wait_event_freezable(). Unfortunately
* wait_event_freezable() completely hides the fact that
* thread was frozen from calling code and sometimes we do
* want to know that.
*/
#ifdef PF_FREEZER_NOSIG
#define compat_wait_check_freezing() freezing(current)
#else
#define compat_wait_check_freezing() (0)
#endif
/*
* Since 2.6.27-rc2 kill_proc() is gone... Replacement (GPL-only!)
* API is available since 2.6.19. Use them from 2.6.27-rc1 up.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
typedef int compat_pid;
#define compat_find_get_pid(pid) (pid)
#define compat_put_pid(pid) do { } while (0)
#define compat_kill_pid(pid, sig, flag) kill_proc(pid, sig, flag)
#else
typedef struct pid * compat_pid;
#define compat_find_get_pid(pid) find_get_pid(pid)
#define compat_put_pid(pid) put_pid(pid)
#define compat_kill_pid(pid, sig, flag) kill_pid(pid, sig, flag)
#endif
#endif /* __COMPAT_SCHED_H__ */
vmci-only/shared/backdoor_def.h 0000444 0000000 0000000 00000023444 12522066074 015523 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* backdoor_def.h --
*
* This contains backdoor defines that can be included from
* an assembly language file.
*/
#ifndef _BACKDOOR_DEF_H_
#define _BACKDOOR_DEF_H_
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
/*
* If you want to add a new low-level backdoor call for a guest userland
* application, please consider using the GuestRpc mechanism instead. --hpreg
*/
#define BDOOR_MAGIC 0x564D5868
/* Low-bandwidth backdoor port. --hpreg */
#define BDOOR_PORT 0x5658
#define BDOOR_CMD_GETMHZ 1
/*
* BDOOR_CMD_APMFUNCTION is used by:
*
* o The FrobOS code, which instead should either program the virtual chipset
* (like the new BIOS code does, matthias offered to implement that), or not
* use any VM-specific code (which requires that we correctly implement
* "power off on CLI HLT" for SMP VMs, boris offered to implement that)
*
* o The old BIOS code, which will soon be jettisoned
*
* --hpreg
*/
#define BDOOR_CMD_APMFUNCTION 2 /* CPL0 only. */
#define BDOOR_CMD_GETDISKGEO 3
#define BDOOR_CMD_GETPTRLOCATION 4
#define BDOOR_CMD_SETPTRLOCATION 5
#define BDOOR_CMD_GETSELLENGTH 6
#define BDOOR_CMD_GETNEXTPIECE 7
#define BDOOR_CMD_SETSELLENGTH 8
#define BDOOR_CMD_SETNEXTPIECE 9
#define BDOOR_CMD_GETVERSION 10
#define BDOOR_CMD_GETDEVICELISTELEMENT 11
#define BDOOR_CMD_TOGGLEDEVICE 12
#define BDOOR_CMD_GETGUIOPTIONS 13
#define BDOOR_CMD_SETGUIOPTIONS 14
#define BDOOR_CMD_GETSCREENSIZE 15
#define BDOOR_CMD_MONITOR_CONTROL 16 /* Disabled by default. */
#define BDOOR_CMD_GETHWVERSION 17
#define BDOOR_CMD_OSNOTFOUND 18 /* CPL0 only. */
#define BDOOR_CMD_GETUUID 19
#define BDOOR_CMD_GETMEMSIZE 20
#define BDOOR_CMD_HOSTCOPY 21 /* Devel only. */
//#define BDOOR_CMD_SERVICE_VM 22 /* Not in use. Never shipped. */
#define BDOOR_CMD_GETTIME 23 /* Deprecated -> GETTIMEFULL. */
#define BDOOR_CMD_STOPCATCHUP 24
#define BDOOR_CMD_PUTCHR 25 /* Disabled by default. */
#define BDOOR_CMD_ENABLE_MSG 26 /* Devel only. */
#define BDOOR_CMD_GOTO_TCL 27 /* Devel only. */
#define BDOOR_CMD_INITPCIOPROM 28 /* CPL 0 only. */
//#define BDOOR_CMD_INT13 29 /* Not in use. */
#define BDOOR_CMD_MESSAGE 30
#define BDOOR_CMD_SIDT 31
#define BDOOR_CMD_SGDT 32
#define BDOOR_CMD_SLDT_STR 33
#define BDOOR_CMD_ISACPIDISABLED 34
//#define BDOOR_CMD_TOE 35 /* Not in use. */
#define BDOOR_CMD_ISMOUSEABSOLUTE 36
#define BDOOR_CMD_PATCH_SMBIOS_STRUCTS 37 /* CPL 0 only. */
#define BDOOR_CMD_MAPMEM 38 /* Devel only */
#define BDOOR_CMD_ABSPOINTER_DATA 39
#define BDOOR_CMD_ABSPOINTER_STATUS 40
#define BDOOR_CMD_ABSPOINTER_COMMAND 41
//#define BDOOR_CMD_TIMER_SPONGE 42 /* Not in use. */
#define BDOOR_CMD_PATCH_ACPI_TABLES 43 /* CPL 0 only. */
//#define BDOOR_CMD_DEVEL_FAKEHARDWARE 44 /* Not in use. */
#define BDOOR_CMD_GETHZ 45
#define BDOOR_CMD_GETTIMEFULL 46
#define BDOOR_CMD_STATELOGGER 47 /* Disabled by default. */
#define BDOOR_CMD_CHECKFORCEBIOSSETUP 48 /* CPL 0 only. */
#define BDOOR_CMD_LAZYTIMEREMULATION 49 /* CPL 0 only. */
#define BDOOR_CMD_BIOSBBS 50 /* CPL 0 only. */
//#define BDOOR_CMD_VASSERT 51 /* Not in use. */
#define BDOOR_CMD_ISGOSDARWIN 52
#define BDOOR_CMD_DEBUGEVENT 53
#define BDOOR_CMD_OSNOTMACOSXSERVER 54 /* CPL 0 only. */
#define BDOOR_CMD_GETTIMEFULL_WITH_LAG 55
#define BDOOR_CMD_ACPI_HOTPLUG_DEVICE 56 /* Devel only. */
#define BDOOR_CMD_ACPI_HOTPLUG_MEMORY 57 /* Devel only. */
#define BDOOR_CMD_ACPI_HOTPLUG_CBRET 58 /* Devel only. */
//#define BDOOR_CMD_GET_HOST_VIDEO_MODES 59 /* Not in use. */
#define BDOOR_CMD_ACPI_HOTPLUG_CPU 60 /* Devel only. */
//#define BDOOR_CMD_USB_HOTPLUG_MOUSE 61 /* Not in use. Never shipped. */
#define BDOOR_CMD_XPMODE 62 /* CPL 0 only. */
#define BDOOR_CMD_NESTING_CONTROL 63
#define BDOOR_CMD_FIRMWARE_INIT 64 /* CPL 0 only. */
#define BDOOR_CMD_FIRMWARE_ACPI_SERVICES 65 /* CPL 0 only. */
# define BDOOR_CMD_FAS_GET_TABLE_SIZE 0
# define BDOOR_CMD_FAS_GET_TABLE_DATA 1
# define BDOOR_CMD_FAS_GET_PLATFORM_NAME 2
# define BDOOR_CMD_FAS_GET_PCIE_OSC_MASK 3
# define BDOOR_CMD_FAS_GET_APIC_ROUTING 4
# define BDOOR_CMD_FAS_GET_TABLE_SKIP 5
# define BDOOR_CMD_FAS_GET_SLEEP_ENABLES 6
#define BDOOR_CMD_SENDPSHAREHINTS 66
#define BDOOR_CMD_ENABLE_USB_MOUSE 67
#define BDOOR_CMD_GET_VCPU_INFO 68
# define BDOOR_CMD_VCPU_SLC64 0
# define BDOOR_CMD_VCPU_SYNC_VTSCS 1
# define BDOOR_CMD_VCPU_HV_REPLAY_OK 2
# define BDOOR_CMD_VCPU_RESERVED 31
#define BDOOR_CMD_EFI_SERIALCON_CONFIG 69 /* CPL 0 only. */
#define BDOOR_CMD_BUG328986 70 /* CPL 0 only. */
#define BDOOR_CMD_FIRMWARE_ERROR 71 /* CPL 0 only. */
# define BDOOR_CMD_FE_INSUFFICIENT_MEM 0
# define BDOOR_CMD_FE_EXCEPTION 1
#define BDOOR_CMD_VMK_INFO 72
#define BDOOR_CMD_EFI_BOOT_CONFIG 73 /* CPL 0 only. */
# define BDOOR_CMD_EBC_LEGACYBOOT_ENABLED 0
# define BDOOR_CMD_EBC_GET_ORDER 1
# define BDOOR_CMD_EBC_SHELL_ACTIVE 2
#define BDOOR_CMD_GET_HW_MODEL 74 /* CPL 0 only. */
#define BDOOR_CMD_GET_SVGA_CAPABILITIES 75 /* CPL 0 only. */
#define BDOOR_CMD_GET_FORCE_X2APIC 76 /* CPL 0 only */
#define BDOOR_CMD_SET_PCI_HOLE 77 /* CPL 0 only */
#define BDOOR_CMD_GET_PCI_HOLE 78 /* CPL 0 only */
#define BDOOR_CMD_GET_PCI_BAR 79 /* CPL 0 only */
#define BDOOR_CMD_SHOULD_GENERATE_SYSTEMID 80 /* CPL 0 only */
#define BDOOR_CMD_MAX 81
/*
* IMPORTANT NOTE: When modifying the behavior of an existing backdoor command,
* you must adhere to the semantics expected by the oldest Tools who use that
* command. Specifically, do not alter the way in which the command modifies
* the registers. Otherwise backwards compatibility will suffer.
*/
/* Processing mode for guest pshare hints (SENDPSHAREHINTS cmd) */
#define BDOOR_PSHARE_HINTS_ASYNC 0
#define BDOOR_PSHARE_HINTS_SYNC 1
#define BDOOR_PSHARE_HINTS_TYPE(ecx) (((ecx) >> 16) & 0x1)
/* Version of backdoor pshare hints protocol */
#define BDOOR_PSHARE_HINTS_VERSION 1
#define BDOOR_PSHARE_HINTS_VER(ecx) (((ecx) >> 17) & 0x7f)
/* Task applied to backdoor pshare hints */
#define BDOOR_PSHARE_HINTS_CMD_SHARE 0
#define BDOOR_PSHARE_HINTS_CMD_DROP 1
#define BDOOR_PSHARE_HINTS_CMD_MAX 2
#define BDOOR_PSHARE_HINTS_CMD(ecx) (((ecx) >> 24) & 0xff)
/* Nesting control operations */
#define NESTING_CONTROL_RESTRICT_BACKDOOR 0
#define NESTING_CONTROL_OPEN_BACKDOOR 1
#define NESTING_CONTROL_QUERY 2
#define NESTING_CONTROL_MAX 2
/* EFI Boot Order options, nibble-sized. */
#define EFI_BOOT_ORDER_TYPE_EFI 0x0
#define EFI_BOOT_ORDER_TYPE_LEGACY 0x1
#define EFI_BOOT_ORDER_TYPE_NONE 0xf
/* High-bandwidth backdoor port. --hpreg */
#define BDOORHB_PORT 0x5659
#define BDOORHB_CMD_MESSAGE 0
#define BDOORHB_CMD_VASSERT 1
#define BDOORHB_CMD_MAX 2
/*
* There is another backdoor which allows access to certain TSC-related
* values using otherwise illegal PMC indices when the pseudo_perfctr
* control flag is set.
*/
#define BDOOR_PMC_HW_TSC 0x10000
#define BDOOR_PMC_REAL_NS 0x10001
#define BDOOR_PMC_APPARENT_NS 0x10002
#define BDOOR_PMC_PSEUDO_TSC 0x10003
#define IS_BDOOR_PMC(index) (((index) | 3) == 0x10003)
#define BDOOR_CMD(ecx) ((ecx) & 0xffff)
/* Sub commands for BDOOR_CMD_VMK_INFO */
#define BDOOR_CMD_VMK_INFO_ENTRY 1
#ifdef VMM
/*
*----------------------------------------------------------------------
*
* Backdoor_CmdRequiresFullyValidVCPU --
*
* A few backdoor commands require the full VCPU to be valid
* (including GDTR, IDTR, TR and LDTR). The rest get read/write
* access to GPRs and read access to Segment registers (selectors).
*
* Result:
* True iff VECX contains a command that require the full VCPU to
* be valid.
*
*----------------------------------------------------------------------
*/
static INLINE Bool
Backdoor_CmdRequiresFullyValidVCPU(unsigned cmd)
{
return cmd == BDOOR_CMD_SIDT ||
cmd == BDOOR_CMD_SGDT ||
cmd == BDOOR_CMD_SLDT_STR;
}
#endif
#endif
vmci-only/shared/vmci_page_channel.h 0000444 0000000 0000000 00000056304 12522066074 016544 0 ustar root root /*********************************************************
* Copyright (C) 2011-2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmci_page_channel.h
*
* vPageChannel structure and functions.
*/
#ifndef _VMCI_PAGE_CHANNEL_H_
#define _VMCI_PAGE_CHANNEL_H_
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
#include "vmci_defs.h"
#include "vmci_call_defs.h"
/** \cond PRIVATE */
#define VPAGECHANNEL_MAX_TX_BUF_SIZE (1 << 14)
#define VPAGECHANNEL_MAX_PAGES_PER_TX_BUFFER \
(VPAGECHANNEL_MAX_TX_BUF_SIZE / PAGE_SIZE + 1)
/** \endcond */
/**
* \brief Get a pointer to the elements in a packet.
*
* Returns a pointer to the elements at the end of a page channel packet.
*
* \see VPageChannelElem
* \see VPageChannelPacket
*/
#define VPAGECHANNEL_PACKET_ELEMS(packet) \
(VPageChannelElem *)((char *)(packet) + \
sizeof(VPageChannelPacket) + \
packet->msgLen)
/**
* \brief Get a pointer to the message in a packet.
*
* Returns a pointer to the message embedded in a page channel packet.
*
* \see VPageChannelPacket
*/
#define VPAGECHANNEL_PACKET_MESSAGE(packet) \
(char *)((char *)(packet) + sizeof(VPageChannelPacket))
/**
* \brief Notify client directly, and do not read packets.
*
* This flag indicates that the channel should invoke the client's receive
* callback directly when any packets are available. If not specified, then
* when a notification is received, packets are read from the channel and the
* callback invoked for each one separately.
*
* \note Not applicable to VMKernel.
*
* \see VPageChannel_CreateInVM()
*/
#define VPAGECHANNEL_FLAGS_NOTIFY_ONLY 0x1
/**
* \brief Invoke client's receive callback in delayed context.
*
* This flag indicates that all callbacks run in a delayed context, and the
* caller and callback are allowed to block. If not specified, then callbacks
* run in interrupt context and the channel will not block, and the caller
* is not allowed to block.
*
* \note Not applicable to VMKernel.
*
* \see VPageChannel_CreateInVM()
*/
#define VPAGECHANNEL_FLAGS_RECV_DELAYED 0x2
/**
* \brief Send from an atomic context.
*
* This flag indicates that the client wishes to call Send() from an atomic
* context and that the channel should not block. If the channel is not
* allowed to block, then the channel's pages are permanently mapped and
* pinned. Note that this will limit the total size of the channel to
* VMCI_MAX_PINNED_QP_MEMORY.
*
* \note Not applicable to VMKernel.
*
* \see VPageChannel_CreateInVM()
*/
#define VPAGECHANNEL_FLAGS_SEND_WHILE_ATOMIC 0x4
/**
* \brief An element describing a data range.
*
* Describes a data range, starting at a base address and for a given
* length, i.e., an element of a scatter-gather list. Indicates physical
* address for the guest and machine address for hypervisor. Can be passed
* via packets or buffers.
*
* \note Structure is packed.
*
* \see VPageChannelPacket
* \see VPageChanelBuffer
*/
typedef
#include "vmware_pack_begin.h"
struct VPageChannelElem {
union {
/** \brief Physical address for guest. */
uint64 pa;
/** \brief Machine address for hypervisor. */
uint64 ma;
};
/** \brief Length of range. */
uint32 le;
}
#include "vmware_pack_end.h"
VPageChannelElem;
/**
* \brief Page channel page types.
*
* The various types of page channel packets.
*
* \see VPageChannelPacket
*/
typedef enum {
/** \brief Data packet. */
VPCPacket_Data = 1,
/** \brief Completion notification, from hypervisor to guest. */
VPCPacket_Completion_Notify,
/** \cond PRIVATE */
/** \brief Connect to hypervisor, internal. */
VPCPacket_GuestConnect,
/** \brief Complete connection handshake, internal. */
VPCPacket_HyperConnect,
/** \brief Request buffers, internal. */
VPCPacket_RequestBuffer,
/** \brief Set buffers, internal. */
VPCPacket_SetRecvBuffer,
/** \brief Hypervisor channel disconnect, internal. */
VPCPacket_HyperDisconnect,
/** \brief Guest channel ACK hypervisor disconnect, internal. */
VPCPacket_GuestDisconnect,
/** \endcond */
} VPageChannelPacketType;
/**
* \brief Page channel packet structure.
*
* A packet structure for passing control/data between guest and hypervisor.
* Can optionally contain a message and also a number of elements.
*
* \note Structure is packed.
*
* \see VPageChannelPacketType
*/
typedef
#include "vmware_pack_begin.h"
struct VPageChannelPacket {
/** \brief Type of packet. */
VPageChannelPacketType type;
/** \brief Length of optional message. */
uint32 msgLen;
/** \brief Number of optional elements in packet. */
uint32 numElems;
/** \brief Followed by msgLen of message and numElems VPageChannelElem. */
}
#include "vmware_pack_end.h"
VPageChannelPacket;
/**
* \brief Page channel buffer structure.
*
* A buffer of elements (a scatter-gather list).
*
* \note Structure is packed.
*
* \see VPageChannelElem
*/
typedef
#include "vmware_pack_begin.h"
struct VPageChannelBuffer {
/** \brief Number of elements. */
uint32 numElems;
/** \brief First element. */
VPageChannelElem elems[1];
/** \brief Followed by numElems - 1 of VPageChannelElem. */
}
#include "vmware_pack_end.h"
VPageChannelBuffer;
/** \cond PRIVATE */
typedef
#include "vmware_pack_begin.h"
struct VPageChannelGuestConnectMessage {
/** \brief Guest channel's datagram handle for control channel. */
VMCIHandle dgHandle;
/** \brief Guest channel's queuepair handle. */
VMCIHandle qpHandle;
/** \brief Size of producer queue in queuepair in bytes. */
uint64 produceQSize;
/** \brief Size of consumer queue in queuepair in bytes. */
uint64 consumeQSize;
/** \brief Guest channel's doorbell handle. */
VMCIHandle doorbellHandle;
}
#include "vmware_pack_end.h"
VPageChannelGuestConnectMessage;
typedef
#include "vmware_pack_begin.h"
struct VPageChannelHyperConnectMessage {
/** \brief Hypervisor's doorbell handle. */
VMCIHandle doorbellHandle;
}
#include "vmware_pack_end.h"
VPageChannelHyperConnectMessage;
/** \endcond PRIVATE */
/** \cond PRIVATE */
typedef enum VPageChannelState {
VPCState_Free = 0,
VPCState_Unconnected,
VPCState_Connecting,
VPCState_Connected,
VPCState_Disconnecting,
VPCState_Disconnected,
} VPageChannelState;
/** \endcond PRIVATE */
/**
* \brief Opaque page channel type.
*/
struct VPageChannel;
typedef struct VPageChannel VPageChannel;
/**
* \brief Client receive callback type.
*
* Type of receive callback, invoked when there are data packets in the
* channel. The client provides a callback with this type to
* VPageChannel_CreateInVM(). If VPAGECHANNEL_FLAGS_NOTIFY_ONLY is specified
* in the channel creation flags, then \c packet is \c NULL; otherwise,
* \c packet points to a channel packet.
*
* \see VPageChannel_CreateInVM()
* \see VPageChannelPacket
*/
typedef void (*VPageChannelRecvCB)(void *clientData,
VPageChannelPacket *packet);
#if !defined(VMKERNEL)
/**
* \brief Client element allocation callback type.
*
* Type of element allocation callback, invoked when the channel needs
* elements. The client provides a callback of this type to
* VPageChannel_CreateInVM().
*
* \see VPageChannel_CreateInVM()
* \see VPageChannelElem
* \see VPageChannelFreeElemFn
*/
typedef int (*VPageChannelAllocElemFn)(void *clientData,
VPageChannelElem *elems,
int numElems);
/**
* \brief Client element release callback type.
*
* Type of element release callback, invoked when the channel releases
* elements. The client provides a callback of this type to
* VPageChannel_CreateInVM().
*
* \see VPageChannel_CreateInVM()
* \see VPageChannelElem
* \see VPageChannelAllocElemFn
*/
typedef void (*VPageChannelFreeElemFn)(void *clientData,
VPageChannelElem *elems,
int numElems);
/*
************************************************************************
* VPageChannel_CreateInVM */ /**
*
* \brief Create guest page channel.
*
* Creates a page channel in the guest. The channel should be released
* with VPageChannel_Destroy().
*
* \note Only applicable in the guest.
*
* \see VPageChannel_CreateInVMK()
* \see VPageChannel_Destroy()
*
* \param[out] channel Pointer to a newly constructed page
* channel if successful.
* \param[in] resourceId Resource ID on which the channel should
* register its control channel.
* \param[in] peerResourceId Resource ID of peer's control channel.
* \param[in] produceQSize Size of producer queue in queuepair in
* bytes.
* \param[in] consumeQSize Size of consumer queue in queuepair in
* bytes.
* \param[in] flags Channel flags.
* \param[in] recvCB Client's receive callback.
* \param[in] clientRecvData Client data for client's receive
* callback.
* \param[in] elemAlloc Element allocation callback for
* allocating page ranges (scatter-gather
* elements).
* \param[in] allocClientData Client data for element allocation
* callback.
* \param[in] elemFree Element release callback for elements.
* \param[in] freeClientData Client data for element release
* callback.
* \param[in] defRecvBufs Default number of elements sent to
* hypervisor channel.
* \param[in] maxRecvBufs Maximum number of elements that can be
* sent to the hypervisor channel.
*
* \retval VMCI_SUCCESS Creation succeeded, \c *channel contains
* a pointer to a valid channel.
* \retval other Failure.
*
************************************************************************
*/
int VPageChannel_CreateInVM(VPageChannel **channel,
VMCIId resourceId,
VMCIId peerResourceId,
uint64 produceQSize,
uint64 consumeQSize,
uint32 flags,
VPageChannelRecvCB recvCB,
void *clientRecvData,
VPageChannelAllocElemFn elemAlloc,
void *allocClientData,
VPageChannelFreeElemFn elemFree,
void *freeClientData,
int defRecvBufs,
int maxRecvBufs);
#else // VMKERNEL
/**
* \brief Type of VM memory access off callback.
*
* This callback is invoked when the memory of the VM containing the peer
* endpoint becomes inaccessible, for example due to a crash. When this
* occurs, the client should unmap any guest pages and cleanup any state.
* This callback runs in a blockable context. The client is not allowed to
* call VPageChannel_Destroy() from inside the callback, or it will deadlock,
* since that function will wait for the callback to complete.
*
* \note Only applicable on VMKernel.
*
* \see VPageChannel_CreateInVMK()
*/
typedef void (*VPageChannelMemAccessOffCB)(void *clientData);
/*
************************************************************************
* VPageChannel_CreateInVMK */ /**
*
* \brief Create a page channel in VMKernel.
*
* Creates a page channel. The channel should be released with
* VPageChannel_Destroy().
*
* \note Only applicable on VMKernel.
*
* \see VPageChannel_CreateInVM()
* \see VPageChannel_Destroy()
*
* \param[out] channel Pointer to a newly constructed page
* channel if successful.
* \param[in] resourceId Resource ID on which to register
* control channel.
* \param[in] recvCB Client's receive callback.
* \param[in] clientRecvData Client data for receive callback.
* \param[in] memAccessOffCB Client's mem access off callback.
* \param[in] memAccessOffData Client data for mem access off.
*
* \retval VMCI_SUCCESS Creation succeeded, \c *channel
* contains a pointer to a valid channel.
* \retval other Failure.
*
***********************************************************************
*/
int VPageChannel_CreateInVMK(VPageChannel **channel,
VMCIId resourceId,
VPageChannelRecvCB recvCB,
void *clientRecvData,
VPageChannelMemAccessOffCB memAccessOffCB,
void *memAccessOffData);
/*
************************************************************************
* VPageChannel_ReserveBuffers */ /**
*
* \brief Reserve guest elements.
*
* Reserve sufficient guest elements to cover the given length. The
* buffers can then be posted to the guest. This allocates both the
* buffer and the elements within the buffer.
*
* \note Only applicable on VMKernel.
*
* \see VPageChannel_ReleaseBuffers()
*
* \param[in] channel Page channel.
* \param[in] dataLen Length to reserve in bytes.
* \param[out] buffer Pointer to a buffer containing elements to cover
* the given length if successful.
*
* \retval VMCI_SUCCESS Reservation succeeded, \c *buffer contains
* a pointer to a valid buffer.
* \retval other Failure.
*
************************************************************************
*/
int VPageChannel_ReserveBuffers(VPageChannel *channel,
size_t dataLen,
VPageChannelBuffer **buffer);
/*
************************************************************************
* VPageChannel_ReleaseBuffers */ /**
*
* \brief Release guest elements.
*
* \note Only applicable on VMKernel.
*
* \see VPageChannel_ReserveBuffers()
*
* Release guest elements previous reserved with
* VPageChannel_ReserveBuffers(). If the buffers were sent to the guest,
* then only the buffer itself should be released, i.e.,
* \c returnToFreePool should be \c FALSE; the guest will release the
* buffers on completion. Otherwise, if for some reason they are not
* sent after reserving them, then \c returnToFreePool should be set to
* \c TRUE.
*
* \param[in] channel Page channel.
* \param[in] buffer Buffer to be released.
* \param[in] returnToFreePool If \c TRUE, then release the elements
* of the buffer along with the buffer
* itself. If \c FALSE, then release only
* the buffer pointer itself.
*
************************************************************************
*/
void VPageChannel_ReleaseBuffers(VPageChannel *channel,
VPageChannelBuffer *buffer,
Bool returnToFreePool);
/*
************************************************************************
* VPageChannel_CompletionNotify */ /**
*
* \brief Notify channel of completion.
*
* This function is called when the client is finished using the elements
* (scatter-gather list) of a packet. This will generated a notification
* to the guest to pass ownership of the buffers back to the guest. This
* can also be used to read back the data from the hypervisor and send
* it to the guest.
*
* \note Only applicable on VMKernel.
*
* \see VPageChannel_ReserveBuffers
*
* \param[in] channel Channel on which I/O is complete.
* \param[in] message Optional message to send to guest.
* \param[in] len Length of optional message.
* \param[in] buffer Buffer used for I/O.
*
************************************************************************
*/
int VPageChannel_CompletionNotify(VPageChannel *channel,
char *message,
int len,
VPageChannelBuffer *buffer);
/*
************************************************************************
* VPageChannel_MapToMa */ /**
*
* \brief Map guest PA in an element to a list of MAs.
*
* Map a guest physical address to a list of hypervisor machine
* addresses.
*
* \note Only applicable on VMKernel.
*
* \param[in] channel Channel on which to map.
* \param[in] paElem Guest's physical address.
* \param[out] maElems Hypervisor machine addresses.
* \param[in] numElems Max number of hypervisor elements.
*
* \retval elems Number of mapped elements.
*
************************************************************************
*/
int VPageChannel_MapToMa(VPageChannel *channel,
VPageChannelElem paElem,
VPageChannelElem *maElems,
uint32 numElems);
/*
************************************************************************
* VPageChannel_UnmapMa */ /**
*
* \brief Unmap MA for a buffer.
*
* Unmap hypervisor machine addresses referring to a guest physical
* addresses.
*
* \note Only applicable on VMKernel.
*
* \see VPageChannel_MapToMa
*
* \param[in] channel Channel for which to unmap.
* \param[in] buffer Buffer containing elements to unmap.
* \param[in] numElems Number of elements to unmap.
*
* \retval 0 Unmap successful.
* \retval -1 World not found for channel.
*
************************************************************************
*/
int VPageChannel_UnmapMa(VPageChannel *channel,
VPageChannelBuffer *buffer,
int numElems);
#endif // VMKERNEL
/*
************************************************************************
* VPageChannel_Destroy */ /**
*
* \brief Destroy the given channel.
*
* Destroy the given channel. This will disconnect from the peer
* channel (if connected) and release all resources.
*
* \see VPageChannel_CreateInVMK
* \see VPageChannel_CreateInVM
*
* \param[in] channel The channel to be destroyed.
*
************************************************************************
*/
void VPageChannel_Destroy(VPageChannel *channel);
/*
************************************************************************
* VPageChannel_Send */ /**
*
* \brief Send a packet to the channel's peer.
*
* Send a packet to the channel's peer. A message and a number of
* elements may optionally be sent. If the send is successful, the
* elements are owned by the peer and only the buffer itself should
* be released, but not the elements within. If the send fails, the
* client should release the buffer and the elements.
*
* \see VPageChannel_SendPacket
*
* \param[in] channel Channel on which to send.
* \param[in] type Type of packet to send.
* \param[in] message Optional message to send.
* \param[in] len Length of optional message.
* \param[in] buffer Buffer (of elements) to send.
*
* \retval VMCI_SUCCESS Packet successfully sent, buffer elements
* owned by peer.
* \retval other Failure to send, client should release
* elements.
*
************************************************************************
*/
int VPageChannel_Send(VPageChannel *channel,
VPageChannelPacketType type,
char *message,
int len,
VPageChannelBuffer *buffer);
/*
************************************************************************
* VPageChannel_SendPacket */ /**
*
* \brief Send the given packet to the channel's peer.
*
* Send a client-constructed packet to the channel's peer. If the
* send is successful, any elements in the packet are owned by the
* peer. Otherwise, the client retains ownership.
*
* \see VPageChannel_Send
*
* \param[in] channel Channel on which to send.
* \param[in] packet Packet to be sent.
*
* \retval VMCI_SUCCESS Packet successfully sent, buffer elements
* owned by peer.
* \retval other Failure to send, client should release
* elements.
*
************************************************************************
*/
int VPageChannel_SendPacket(VPageChannel *channel,
VPageChannelPacket *packet);
/*
************************************************************************
* VPageChannel_PollRecvQ */ /**
*
* \brief Poll the channel's receive queue for packets.
*
* Poll the channel's receive queue for packets from the peer. If any
* packets are available, the channel's receive callback will be invoked.
*
* \param[in] channel Channel to poll.
*
************************************************************************
*/
void VPageChannel_PollRecvQ(VPageChannel *channel);
/*
************************************************************************
* VPageChannel_BufferLen */ /**
*
* \brief Determine the length of a packet.
*
* Determine the length of the given packet in bytes.
*
* \param[in] packet Packet for which length is to be determined.
*
* \retval bytes Size of the packet in bytes.
*
************************************************************************
*/
static INLINE size_t
VPageChannelPacket_BufferLen(VPageChannelPacket *packet) // IN
{
size_t len, i;
VPageChannelElem *elems;
ASSERT(packet);
len = 0;
elems = VPAGECHANNEL_PACKET_ELEMS(packet);
for (i = 0; i < packet->numElems; i++) {
len += elems[i].le;
}
return len;
}
/** \cond PRIVATE */
#if defined(linux) && !defined(VMKERNEL)
#include "compat_pci.h"
#define vmci_pci_map_page(_pg, _off, _sz, _dir) \
pci_map_page(NULL, (_pg), (_off), (_sz), (_dir))
#define vmci_pci_unmap_page(_dma, _sz, _dir) \
pci_unmap_page(NULL, (_dma), (_sz), (_dir))
#endif // linux && !VMKERNEL
/** \endcond PRIVATE */
#endif // _VMCI_PACKET_H_
vmci-only/shared/compat_slab.h 0000444 0000000 0000000 00000006653 12522066073 015407 0 ustar root root /*********************************************************
* Copyright (C) 2005 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_SLAB_H__
# define __COMPAT_SLAB_H__
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0)
# include
#else
# include
#endif
/*
* Before 2.6.20, kmem_cache_t was the accepted way to refer to a kmem_cache
* structure. Prior to 2.6.15, this structure was called kmem_cache_s, and
* afterwards it was renamed to kmem_cache. Here we keep things simple and use
* the accepted typedef until it became deprecated, at which point we switch
* over to the kmem_cache name.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
# define compat_kmem_cache struct kmem_cache
#else
# define compat_kmem_cache kmem_cache_t
#endif
/*
* Up to 2.6.22 kmem_cache_create has 6 arguments - name, size, alignment, flags,
* constructor, and destructor. Then for some time kernel was asserting that
* destructor is NULL, and since 2.6.23-pre1 kmem_cache_create takes only 5
* arguments - destructor is gone.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) || defined(VMW_KMEMCR_HAS_DTOR)
#define compat_kmem_cache_create(name, size, align, flags, ctor) \
kmem_cache_create(name, size, align, flags, ctor, NULL)
#else
#define compat_kmem_cache_create(name, size, align, flags, ctor) \
kmem_cache_create(name, size, align, flags, ctor)
#endif
/*
* Up to 2.6.23 kmem_cache constructor has three arguments - pointer to block to
* prepare (aka "this"), from which cache it came, and some unused flags. After
* 2.6.23 flags were removed, and order of "this" and cache parameters was swapped...
* Since 2.6.27-rc2 everything is different again, and ctor has only one argument.
*
* HAS_3_ARGS has precedence over HAS_2_ARGS if both are defined.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23) && !defined(VMW_KMEMCR_CTOR_HAS_3_ARGS)
# define VMW_KMEMCR_CTOR_HAS_3_ARGS
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) && !defined(VMW_KMEMCR_CTOR_HAS_2_ARGS)
# define VMW_KMEMCR_CTOR_HAS_2_ARGS
#endif
#if defined(VMW_KMEMCR_CTOR_HAS_3_ARGS)
typedef void compat_kmem_cache_ctor(void *, compat_kmem_cache *, unsigned long);
#define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) void *arg, \
compat_kmem_cache *cache, \
unsigned long flags
#elif defined(VMW_KMEMCR_CTOR_HAS_2_ARGS)
typedef void compat_kmem_cache_ctor(compat_kmem_cache *, void *);
#define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) compat_kmem_cache *cache, \
void *arg
#else
typedef void compat_kmem_cache_ctor(void *);
#define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) void *arg
#endif
#endif /* __COMPAT_SLAB_H__ */
vmci-only/shared/compat_highmem.h 0000444 0000000 0000000 00000002423 12522066073 016073 0 ustar root root /*********************************************************
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_HIGHMEM_H__
# define __COMPAT_HIGHMEM_H__
#include
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
# define compat_kmap_atomic(_page) kmap_atomic(_page)
# define compat_kunmap_atomic(_page) kunmap_atomic(_page)
#else
# define compat_kmap_atomic(_page) kmap_atomic((_page), KM_USER0)
# define compat_kunmap_atomic(_page) kunmap_atomic((_page), KM_USER0)
#endif
#endif /* __COMPAT_HIGHMEM_H__ */
vmci-only/shared/compat_timer.h 0000444 0000000 0000000 00000006551 12522066073 015603 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_TIMER_H__
# define __COMPAT_TIMER_H__
/*
* The del_timer_sync() API appeared in 2.3.43
* It became reliable in 2.4.0-test3
*
* --hpreg
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
# define compat_del_timer_sync(timer) del_timer_sync(timer)
#else
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43)
/* 2.3.43 removed asm/softirq.h's reference to bh_base. */
# include
# endif
# include
static inline int
compat_del_timer_sync(struct timer_list *timer) // IN
{
int wasPending;
start_bh_atomic();
wasPending = del_timer(timer);
end_bh_atomic();
return wasPending;
}
#endif
/*
* The msleep_interruptible() API appeared in 2.6.9.
* It is based on the msleep() API, which appeared in 2.4.29.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9)
# include
# define compat_msleep_interruptible(msecs) msleep_interruptible(msecs)
# define compat_msleep(msecs) msleep(msecs)
#else
# include
/*
* msecs_to_jiffies appeared in 2.6.7. For earlier kernels,
* fall back to slow-case code (we don't use this operation
* enough to need the performance).
*/
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7)
# define msecs_to_jiffies(msecs) (((msecs) * HZ + 999) / 1000)
# endif
/*
* set_current_state appeared in 2.2.18.
*/
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)
# define set_current_state(a) do { current->state = (a); } while(0)
# endif
static inline void
compat_msleep_interruptible(unsigned long msecs) // IN
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(msecs) + 1);
}
static inline void
compat_msleep(unsigned long msecs) // IN
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(msecs) + 1);
}
#endif
/*
* There is init_timer_deferrable() since 2.6.22.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
# define compat_init_timer_deferrable(timer) init_timer_deferrable(timer)
#else
# define compat_init_timer_deferrable(timer) init_timer(timer)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
static inline void compat_setup_timer(struct timer_list * timer,
void (*function)(unsigned long),
unsigned long data)
{
timer->function = function;
timer->data = data;
init_timer(timer);
}
#else
# define compat_setup_timer(timer, function, data) \
setup_timer(timer, function, data)
#endif
#endif /* __COMPAT_TIMER_H__ */
vmci-only/shared/compat_ioport.h 0000444 0000000 0000000 00000004041 12522066073 015767 0 ustar root root /*********************************************************
* Copyright (C) 2003 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_IOPORT_H__
# define __COMPAT_IOPORT_H__
#include
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
static inline void *
compat_request_region(unsigned long start, unsigned long len, const char *name)
{
if (check_region(start, len)) {
return NULL;
}
request_region(start, len, name);
return (void*)1;
}
#else
#define compat_request_region(start, len, name) request_region(start, len, name)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 7)
/* mmap io support starts from 2.3.7, fail the call for kernel prior to that */
static inline void *
compat_request_mem_region(unsigned long start, unsigned long len, const char *name)
{
return NULL;
}
static inline void
compat_release_mem_region(unsigned long start, unsigned long len)
{
return;
}
#else
#define compat_request_mem_region(start, len, name) request_mem_region(start, len, name)
#define compat_release_mem_region(start, len) release_mem_region(start, len)
#endif
/* these two macro defs are needed by compat_pci_request_region */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 15)
# define IORESOURCE_IO 0x00000100
# define IORESOURCE_MEM 0x00000200
#endif
#endif /* __COMPAT_IOPORT_H__ */
vmci-only/shared/vmci_kernel_if.h 0000444 0000000 0000000 00000043034 12522066074 016072 0 ustar root root /*********************************************************
* Copyright (C) 2006-2011 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmci_kernel_if.h --
*
* This file defines helper functions for VMCI host _and_ guest
* kernel code. It must work for Windows, Mac OS, vmkernel, Linux and
* Solaris kernels, i.e. using defines where necessary.
*/
#ifndef _VMCI_KERNEL_IF_H_
#define _VMCI_KERNEL_IF_H_
#if !defined(linux) && !defined(_WIN32) && !defined(__APPLE__) && \
!defined(VMKERNEL) && !defined(SOLARIS)
# error "Platform not supported."
#endif
#if defined(_WIN32)
# include
#endif
#if defined(linux) && !defined(VMKERNEL)
# include "driver-config.h"
# include "compat_cred.h"
# include "compat_module.h"
# include "compat_semaphore.h"
# include "compat_spinlock.h"
# include "compat_version.h"
# include
#endif // linux
#ifdef __APPLE__
# include
# include
# include
# include
#endif
#ifdef VMKERNEL
# include "splock.h"
# include "semaphore_ext.h"
# include "vmkapi.h"
# include "world_dist.h"
#endif
#ifdef SOLARIS
# include
# include
# include
# include
# include
# include
# include
#endif
#include "vm_basic_types.h"
#include "vmci_defs.h"
#if defined(VMKERNEL)
# include "list.h"
#else
# include "dbllnklst.h"
#endif
/* Flags for specifying memory type. */
#define VMCI_MEMORY_NORMAL 0x0
#define VMCI_MEMORY_ATOMIC 0x1
#define VMCI_MEMORY_NONPAGED 0x2
/* Platform specific type definitions. */
#if defined(VMKERNEL)
# define VMCI_EXPORT_SYMBOL(_SYMBOL) VMK_MODULE_EXPORT_SYMBOL(_SYMBOL);
#elif defined(linux)
# define VMCI_EXPORT_SYMBOL(_symbol) EXPORT_SYMBOL(_symbol);
#elif defined(__APPLE__)
# define VMCI_EXPORT_SYMBOL(_symbol) __attribute__((visibility("default")))
#else
# define VMCI_EXPORT_SYMBOL(_symbol)
#endif
#if defined(VMKERNEL)
typedef SP_SpinLock VMCILock;
typedef SP_IRQL VMCILockFlags;
typedef Semaphore VMCIEvent;
typedef Semaphore VMCIMutex;
typedef World_ID VMCIHostVmID;
typedef uint32 VMCIHostUser;
typedef PPN *VMCIQPGuestMem;
#elif defined(linux)
typedef spinlock_t VMCILock;
typedef unsigned long VMCILockFlags;
typedef wait_queue_head_t VMCIEvent;
typedef struct semaphore VMCIMutex;
typedef PPN *VMCIPpnList; /* List of PPNs in produce/consume queue. */
typedef uid_t VMCIHostUser;
typedef VA64 VMCIQPGuestMem;
#elif defined(__APPLE__)
typedef IOLock *VMCILock;
typedef unsigned long VMCILockFlags;
typedef struct {
IOLock *lock;
DblLnkLst_Links waiters;
int buffered;
} VMCIEvent;
typedef IOLock *VMCIMutex;
typedef void *VMCIPpnList; /* Actually a pointer to the C++ Object IOMemoryDescriptor */
typedef uid_t VMCIHostUser;
typedef VA64 *VMCIQPGuestMem;
#elif defined(_WIN32)
typedef KSPIN_LOCK VMCILock;
typedef KIRQL VMCILockFlags;
typedef KEVENT VMCIEvent;
typedef FAST_MUTEX VMCIMutex;
typedef PMDL VMCIPpnList; /* MDL to map the produce/consume queue. */
typedef PSID VMCIHostUser;
typedef VA64 *VMCIQPGuestMem;
#elif defined(SOLARIS)
typedef kmutex_t VMCILock;
typedef unsigned long VMCILockFlags;
typedef ksema_t VMCIEvent;
typedef kmutex_t VMCIMutex;
typedef PPN *VMCIPpnList; /* List of PPNs in produce/consume queue. */
typedef uid_t VMCIHostUser;
typedef VA64 VMCIQPGuestMem;
#endif // VMKERNEL
/* Callback needed for correctly waiting on events. */
typedef int (*VMCIEventReleaseCB)(void *clientData);
/*
* Internal locking dependencies within VMCI:
* * CONTEXTFIRE < CONTEXT, CONTEXTLIST, EVENT, HASHTABLE
* * DOORBELL < HASHTABLE
* * QPHIBERNATE < EVENT
*/
#ifdef VMKERNEL
typedef Lock_Rank VMCILockRank;
typedef SemaRank VMCISemaRank;
#define VMCI_SEMA_RANK_QPHEADER (SEMA_RANK_FS - 1)
#define VMCI_LOCK_RANK_MAX (MIN(SP_RANK_WAIT, \
SP_RANK_HEAPLOCK_DYNAMIC) - 1)
#else
typedef unsigned long VMCILockRank;
typedef unsigned long VMCISemaRank;
#define VMCI_LOCK_RANK_MAX 0x0fff
#define VMCI_SEMA_RANK_QPHEADER 0x0fff
#endif // VMKERNEL
#define VMCI_LOCK_RANK_CONTEXT VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_CONTEXTLIST VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_DATAGRAMVMK VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_EVENT VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_HASHTABLE VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_RESOURCE VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_QPHEADER VMCI_LOCK_RANK_MAX
#define VMCI_LOCK_RANK_DOORBELL (VMCI_LOCK_RANK_HASHTABLE - 1)
#define VMCI_LOCK_RANK_CONTEXTFIRE (MIN(VMCI_LOCK_RANK_CONTEXT, \
MIN(VMCI_LOCK_RANK_CONTEXTLIST, \
MIN(VMCI_LOCK_RANK_EVENT, \
VMCI_LOCK_RANK_HASHTABLE))) - 1)
#define VMCI_LOCK_RANK_QPHIBERNATE (VMCI_LOCK_RANK_EVENT - 1)
#define VMCI_LOCK_RANK_PACKET_QP (VMCI_LOCK_RANK_QPHEADER - 1)
//#define VMCI_LOCK_RANK_PACKET_QP 0xffd /* For vVol */
#define VMCI_SEMA_RANK_QUEUEPAIRLIST (VMCI_SEMA_RANK_QPHEADER - 1)
#define VMCI_SEMA_RANK_GUESTMEM (VMCI_SEMA_RANK_QUEUEPAIRLIST - 1)
/*
* Host specific struct used for signalling.
*/
typedef struct VMCIHost {
#if defined(VMKERNEL)
World_ID vmmWorldID[2]; /*
* First one is the active one and the second
* one is shadow world during FSR.
*/
#elif defined(linux)
wait_queue_head_t waitQueue;
#elif defined(__APPLE__)
struct Socket *socket; /* vmci Socket object on Mac OS. */
#elif defined(_WIN32)
KEVENT *callEvent; /* Ptr to userlevel event used when signalling
* new pending guestcalls in kernel.
*/
#elif defined(SOLARIS)
struct pollhead pollhead; /* Per datagram handle pollhead structure to
* be treated as a black-box. None of its
* fields should be referenced.
*/
#endif
} VMCIHost;
/*
* Guest device port I/O.
*/
#if defined(linux)
typedef unsigned short int VMCIIoPort;
typedef int VMCIIoHandle;
#elif defined(_WIN32)
typedef PUCHAR VMCIIoPort;
typedef int VMCIIoHandle;
#elif defined(SOLARIS)
typedef uint8_t * VMCIIoPort;
typedef ddi_acc_handle_t VMCIIoHandle;
#elif defined(__APPLE__)
typedef unsigned short int VMCIIoPort;
typedef void *VMCIIoHandle;
#endif // __APPLE__
void VMCI_ReadPortBytes(VMCIIoHandle handle, VMCIIoPort port, uint8 *buffer,
size_t bufferLength);
int VMCI_InitLock(VMCILock *lock, char *name, VMCILockRank rank);
void VMCI_CleanupLock(VMCILock *lock);
void VMCI_GrabLock(VMCILock *lock, VMCILockFlags *flags);
void VMCI_ReleaseLock(VMCILock *lock, VMCILockFlags flags);
void VMCI_GrabLock_BH(VMCILock *lock, VMCILockFlags *flags);
void VMCI_ReleaseLock_BH(VMCILock *lock, VMCILockFlags flags);
void VMCIHost_InitContext(VMCIHost *hostContext, uintptr_t eventHnd);
void VMCIHost_ReleaseContext(VMCIHost *hostContext);
void VMCIHost_SignalCall(VMCIHost *hostContext);
void VMCIHost_ClearCall(VMCIHost *hostContext);
Bool VMCIHost_WaitForCallLocked(VMCIHost *hostContext,
VMCILock *lock,
VMCILockFlags *flags,
Bool useBH);
#ifdef VMKERNEL
int VMCIHost_ContextToHostVmID(VMCIHost *hostContext, VMCIHostVmID *hostVmID);
int VMCIHost_ContextHasUuid(VMCIHost *hostContext, const char *uuid);
void VMCIHost_SetActiveHnd(VMCIHost *hostContext, uintptr_t eventHnd);
Bool VMCIHost_RemoveHnd(VMCIHost *hostContext, uintptr_t eventHnd);
Bool VMCIHost_IsActiveHnd(VMCIHost *hostContext, uintptr_t eventHnd);
void VMCIHost_SetInactiveHnd(VMCIHost *hostContext, uintptr_t eventHnd);
uint32 VMCIHost_NumHnds(VMCIHost *hostContext);
uintptr_t VMCIHost_GetActiveHnd(VMCIHost *hostContext);
void VMCIHost_SignalBitmap(VMCIHost *hostContext);
void VMCIHost_SignalBitmapAlways(VMCIHost *hostContext);
void VMCIHost_SignalCallAlways(VMCIHost *hostContext);
#endif
#if defined(_WIN32)
/*
* On Windows, Driver Verifier will panic() if we leak memory when we are
* unloaded. It dumps the leaked blocks for us along with callsites, which
* it handily tracks, but if we embed ExAllocate() inside a function, then
* the callsite is useless. So make this a macro on this platform only.
*/
# define VMCI_AllocKernelMem(_sz, _f) \
ExAllocatePoolWithTag((((_f) & VMCI_MEMORY_NONPAGED) ? \
NonPagedPool : PagedPool), \
(_sz), 'MMTC')
#else // _WIN32
void *VMCI_AllocKernelMem(size_t size, int flags);
#endif // _WIN32
void VMCI_FreeKernelMem(void *ptr, size_t size);
int VMCI_CopyToUser(VA64 dst, const void *src, size_t len);
Bool VMCIWellKnownID_AllowMap(VMCIId wellKnownID,
VMCIPrivilegeFlags privFlags);
int VMCIHost_CompareUser(VMCIHostUser *user1, VMCIHostUser *user2);
void VMCI_CreateEvent(VMCIEvent *event);
void VMCI_DestroyEvent(VMCIEvent *event);
void VMCI_SignalEvent(VMCIEvent *event);
void VMCI_WaitOnEvent(VMCIEvent *event, VMCIEventReleaseCB releaseCB,
void *clientData);
#if (defined(__APPLE__) || defined(__linux__) || defined(_WIN32)) && !defined(VMKERNEL)
Bool VMCI_WaitOnEventInterruptible(VMCIEvent *event,
VMCIEventReleaseCB releaseCB,
void *clientData);
#endif
#if !defined(VMKERNEL) && (defined(__linux__) || defined(_WIN32) || \
defined(__APPLE__) || defined(SOLARIS))
int VMCI_CopyFromUser(void *dst, VA64 src, size_t len);
#endif
typedef void (VMCIWorkFn)(void *data);
Bool VMCI_CanScheduleDelayedWork(void);
int VMCI_ScheduleDelayedWork(VMCIWorkFn *workFn, void *data);
int VMCIMutex_Init(VMCIMutex *mutex, char *name, VMCILockRank rank);
void VMCIMutex_Destroy(VMCIMutex *mutex);
void VMCIMutex_Acquire(VMCIMutex *mutex);
void VMCIMutex_Release(VMCIMutex *mutex);
#if defined(SOLARIS) || defined(_WIN32) || defined(__APPLE__)
int VMCIKernelIf_Init(void);
void VMCIKernelIf_Exit(void);
#if defined(_WIN32)
void VMCIKernelIf_DrainDelayedWork(void);
#endif // _WIN32
#endif // SOLARIS || _WIN32 || __APPLE__ || VMKERNEL
#if !defined(VMKERNEL) && (defined(__linux__) || defined(_WIN32) || \
defined(SOLARIS) || defined(__APPLE__))
void *VMCI_AllocQueue(uint64 size, uint32 flags);
void VMCI_FreeQueue(void *q, uint64 size);
typedef struct PPNSet {
uint64 numProducePages;
uint64 numConsumePages;
VMCIPpnList producePPNs;
VMCIPpnList consumePPNs;
Bool initialized;
} PPNSet;
int VMCI_AllocPPNSet(void *produceQ, uint64 numProducePages, void *consumeQ,
uint64 numConsumePages, PPNSet *ppnSet);
void VMCI_FreePPNSet(PPNSet *ppnSet);
int VMCI_PopulatePPNList(uint8 *callBuf, const PPNSet *ppnSet);
#endif
struct VMCIQueue;
struct PageStoreAttachInfo;
struct VMCIQueue *VMCIHost_AllocQueue(uint64 queueSize);
void VMCIHost_FreeQueue(struct VMCIQueue *queue, uint64 queueSize);
#if defined(VMKERNEL)
typedef World_Handle *VMCIGuestMemID;
#define INVALID_VMCI_GUEST_MEM_ID NULL
#else
typedef uint32 VMCIGuestMemID;
#define INVALID_VMCI_GUEST_MEM_ID 0
#endif
#if defined(VMKERNEL) || defined(__linux__) || defined(_WIN32) || \
defined(__APPLE__)
struct QueuePairPageStore;
int VMCIHost_RegisterUserMemory(unsigned int index,
struct QueuePairPageStore *pageStore,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
void VMCIHost_UnregisterUserMemory(unsigned int index,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
int VMCIHost_MapQueues(unsigned int index,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ,
uint32 flags);
int VMCIHost_UnmapQueues(unsigned int index,
VMCIGuestMemID gid,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
void VMCI_InitQueueMutex(struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
void VMCI_CleanupQueueMutex(struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
int VMCI_AcquireQueueMutex(struct VMCIQueue *queue, Bool canBlock);
void VMCI_ReleaseQueueMutex(struct VMCIQueue *queue);
#else // Below are the guest OS'es without host side support.
# define VMCI_InitQueueMutex(_pq, _cq)
# define VMCI_CleanupQueueMutex(_pq, _cq) do { } while (0)
# define VMCI_AcquireQueueMutex(_q, _cb) VMCI_SUCCESS
# define VMCI_ReleaseQueueMutex(_q) do { } while (0)
# define VMCIHost_RegisterUserMemory(_idx, _ps, _pq, _cq) VMCI_ERROR_UNAVAILABLE
# define VMCIHost_UnregisterUserMemory(_idx, _pq, _cq) do { } while (0)
# define VMCIHost_MapQueues(_idx, _pq, _cq, _f) VMCI_SUCCESS
# define VMCIHost_UnmapQueues(_idx, _gid, _pq, _cq) VMCI_SUCCESS
#endif
#if defined(VMKERNEL)
void VMCIHost_MarkQueuesAvailable(unsigned int index,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
void VMCIHost_MarkQueuesUnavailable(unsigned int index,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
#else
# define VMCIHost_MarkQueuesAvailable(_idx, _q, _p) do { } while (0)
# define VMCIHost_MarkQueuesUnavailable(_idx, _q, _p) do { } while(0)
#endif
#if defined(VMKERNEL) || defined(__linux__)
void VMCI_LockQueueHeader(struct VMCIQueue *queue);
void VMCI_UnlockQueueHeader(struct VMCIQueue *queue);
#else
# define VMCI_LockQueueHeader(_q) ASSERT_NOT_IMPLEMENTED(FALSE)
# define VMCI_UnlockQueueHeader(_q) ASSERT_NOT_IMPLEMENTED(FALSE)
#endif
#if (!defined(VMKERNEL) && defined(__linux__)) || defined(_WIN32) || \
defined(__APPLE__) || defined(SOLARIS)
int VMCIHost_GetUserMemory(unsigned int index,
VA64 produceUVA, VA64 consumeUVA,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
void VMCIHost_ReleaseUserMemory(unsigned int index,
struct VMCIQueue *produceQ,
struct VMCIQueue *consumeQ);
#else
# define VMCIHost_GetUserMemory(_idx, _puva, _cuva, _pq, _cq) VMCI_ERROR_UNAVAILABLE
# define VMCIHost_ReleaseUserMemory(_idx, _pq, _cq) ASSERT_NOT_IMPLEMENTED(FALSE)
#endif
#if defined(_WIN32)
Bool VMCI_EnqueueToDevNull(struct VMCIQueue *queue);
int VMCI_ConvertToLocalQueue(struct VMCIQueue *queueInfo,
struct VMCIQueue *otherQueueInfo,
uint64 size, Bool keepContent,
void **oldQueue);
void VMCI_RevertToNonLocalQueue(struct VMCIQueue *queueInfo,
void *nonLocalQueue, uint64 size);
void VMCI_FreeQueueBuffer(void *queue, uint64 size);
Bool VMCI_CanCreate(void);
#else // _WIN32
# define VMCI_EnqueueToDevNull(_q) FALSE
# define VMCI_ConvertToLocalQueue(_pq, _cq, _s, _oq, _kc) VMCI_ERROR_UNAVAILABLE
# define VMCI_RevertToNonLocalQueue(_q, _nlq, _s)
# define VMCI_FreeQueueBuffer(_q, _s)
# define VMCI_CanCreate() TRUE
#endif // !_WIN32
Bool VMCI_GuestPersonalityActive(void);
Bool VMCI_HostPersonalityActive(void);
#if defined(VMKERNEL)
typedef List_Links VMCIListItem;
typedef List_Links VMCIList;
# define VMCIList_Init(_l) List_Init(_l)
# define VMCIList_InitEntry(_e) List_InitElement(_e)
# define VMCIList_Empty(_l) List_IsEmpty(_l)
# define VMCIList_Insert(_e, _l) List_Insert(_e, LIST_ATREAR(_l))
# define VMCIList_Remove(_e) List_Remove(_e)
# define VMCIList_Scan(_cur, _l) LIST_FORALL(_l, _cur)
# define VMCIList_ScanSafe(_cur, _next, _l) LIST_FORALL_SAFE(_l, _cur, _next)
# define VMCIList_Entry(_elem, _type, _field) List_Entry(_elem, _type, _field)
# define VMCIList_First(_l) (VMCIList_Empty(_l)?NULL:List_First(_l))
#else
typedef DblLnkLst_Links VMCIListItem;
typedef DblLnkLst_Links VMCIList;
# define VMCIList_Init(_l) DblLnkLst_Init(_l)
# define VMCIList_InitEntry(_e) DblLnkLst_Init(_e)
# define VMCIList_Empty(_l) (!DblLnkLst_IsLinked(_l))
# define VMCIList_Insert(_e, _l) DblLnkLst_LinkLast(_l, _e)
# define VMCIList_Remove(_e) DblLnkLst_Unlink1(_e)
# define VMCIList_Scan(_cur, _l) DblLnkLst_ForEach(_cur, _l)
# define VMCIList_ScanSafe(_cur, _next, _l) DblLnkLst_ForEachSafe(_cur, _next, _l)
# define VMCIList_Entry(_elem, _type, _field) DblLnkLst_Container(_elem, _type, _field)
# define VMCIList_First(_l) (VMCIList_Empty(_l)?NULL:(_l)->next)
#endif
#endif // _VMCI_KERNEL_IF_H_
vmci-only/shared/vmci_handle_array.h 0000444 0000000 0000000 00000021667 12522066074 016575 0 ustar root root /*********************************************************
* Copyright (C) 2006 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vmci_handle_array.h --
*
* Simple dynamic array.
*/
#ifndef _VMCI_HANDLE_ARRAY_H_
#define _VMCI_HANDLE_ARRAY_H_
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
#include "vmci_kernel_if.h"
#include "vmware.h"
#include "vmci_defs.h"
#include "vm_assert.h"
#ifdef VMKERNEL
#include "vm_libc.h"
#endif // VMKERNEL
#ifdef SOLARIS
#include
#include
#include
#include
#endif
#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
typedef struct VMCIHandleArray {
uint32 capacity;
uint32 size;
VMCIHandle entries[1];
} VMCIHandleArray;
/*
*-----------------------------------------------------------------------------------
*
* VMCIHandleArray_Create --
*
* Results:
* Array if successful, NULL if not.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------------
*/
static INLINE VMCIHandleArray *
VMCIHandleArray_Create(uint32 capacity)
{
VMCIHandleArray *array;
if (capacity == 0) {
capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
}
array = (VMCIHandleArray *)VMCI_AllocKernelMem(sizeof array->capacity +
sizeof array->size +
capacity * sizeof(VMCIHandle),
VMCI_MEMORY_NONPAGED |
VMCI_MEMORY_ATOMIC);
if (array == NULL) {
return NULL;
}
array->capacity = capacity;
array->size = 0;
return array;
}
/*
*-----------------------------------------------------------------------------------
*
* VMCIHandleArray_Destroy --
*
* Results:
* None.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------------
*/
static INLINE void
VMCIHandleArray_Destroy(VMCIHandleArray *array)
{
VMCI_FreeKernelMem(array,
sizeof array->capacity + sizeof array->size +
array->capacity * sizeof(VMCIHandle));
}
/*
*-----------------------------------------------------------------------------------
*
* VMCIHandleArray_AppendEntry --
*
* Results:
* None.
*
* Side effects:
* Array may be reallocated.
*
*-----------------------------------------------------------------------------------
*/
static INLINE void
VMCIHandleArray_AppendEntry(VMCIHandleArray **arrayPtr,
VMCIHandle handle)
{
VMCIHandleArray *array;
ASSERT(arrayPtr && *arrayPtr);
array = *arrayPtr;
if (UNLIKELY(array->size >= array->capacity)) {
/* reallocate. */
uint32 arraySize = sizeof array->capacity + sizeof array->size +
array->capacity * sizeof(VMCIHandle);
VMCIHandleArray *newArray = (VMCIHandleArray *)
VMCI_AllocKernelMem(arraySize + array->capacity * sizeof(VMCIHandle),
VMCI_MEMORY_NONPAGED | VMCI_MEMORY_ATOMIC);
if (newArray == NULL) {
return;
}
memcpy(newArray, array, arraySize);
newArray->capacity *= 2;
VMCI_FreeKernelMem(array, arraySize);
*arrayPtr = newArray;
array = newArray;
}
array->entries[array->size] = handle;
array->size++;
}
/*
*-----------------------------------------------------------------------------------
*
* VMCIHandleArray_RemoveEntry --
*
* Results:
* Handle that was removed, VMCI_INVALID_HANDLE if entry not found.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------------
*/
static INLINE VMCIHandle
VMCIHandleArray_RemoveEntry(VMCIHandleArray *array,
VMCIHandle entryHandle)
{
uint32 i;
VMCIHandle handle = VMCI_INVALID_HANDLE;
ASSERT(array);
for (i = 0; i < array->size; i++) {
if (VMCI_HANDLE_EQUAL(array->entries[i], entryHandle)) {
handle = array->entries[i];
array->entries[i] = array->entries[array->size-1];
array->entries[array->size-1] = VMCI_INVALID_HANDLE;
array->size--;
break;
}
}
return handle;
}
/*
*-----------------------------------------------------------------------------------
*
* VMCIHandleArray_RemoveTail --
*
* Results:
* Handle that was removed, VMCI_INVALID_HANDLE if array was empty.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------------
*/
static INLINE VMCIHandle
VMCIHandleArray_RemoveTail(VMCIHandleArray *array)
{
VMCIHandle handle;
if (array->size == 0) {
return VMCI_INVALID_HANDLE;
}
handle = array->entries[array->size-1];
array->entries[array->size-1] = VMCI_INVALID_HANDLE;
array->size--;
return handle;
}
/*
*-----------------------------------------------------------------------------------
*
* VMCIHandleArray_GetEntry --
*
* Results:
* Handle at given index, VMCI_INVALID_HANDLE if invalid index.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------------
*/
static INLINE VMCIHandle
VMCIHandleArray_GetEntry(const VMCIHandleArray *array,
uint32 index)
{
ASSERT(array);
if (UNLIKELY(index >= array->size)) {
return VMCI_INVALID_HANDLE;
}
return array->entries[index];
}
/*
*-----------------------------------------------------------------------------------
*
* VMCIHandleArray_GetSize --
*
* Results:
* Number of entries in array.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------------
*/
static INLINE uint32
VMCIHandleArray_GetSize(const VMCIHandleArray *array)
{
ASSERT(array);
return array->size;
}
/*
*-----------------------------------------------------------------------------------
*
* VMCIHandleArray_HasEntry --
*
* Results:
* TRUE is entry exists in array, FALSE if not.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------------
*/
static INLINE Bool
VMCIHandleArray_HasEntry(const VMCIHandleArray *array,
VMCIHandle entryHandle)
{
uint32 i;
ASSERT(array);
for (i = 0; i < array->size; i++) {
if (VMCI_HANDLE_EQUAL(array->entries[i], entryHandle)) {
return TRUE;
}
}
return FALSE;
}
/*
*-----------------------------------------------------------------------------------
*
* VMCIHandleArray_GetCopy --
*
* Results:
* Returns pointer to copy of array on success or NULL, if memory allocation
* fails.
*
* Side effects:
* Allocates nonpaged memory.
*
*-----------------------------------------------------------------------------------
*/
static INLINE VMCIHandleArray *
VMCIHandleArray_GetCopy(const VMCIHandleArray *array)
{
VMCIHandleArray *arrayCopy;
ASSERT(array);
arrayCopy = (VMCIHandleArray *)VMCI_AllocKernelMem(sizeof array->capacity +
sizeof array->size +
array->size * sizeof(VMCIHandle),
VMCI_MEMORY_NONPAGED |
VMCI_MEMORY_ATOMIC);
if (arrayCopy != NULL) {
memcpy(&arrayCopy->size, &array->size,
sizeof array->size + array->size * sizeof(VMCIHandle));
arrayCopy->capacity = array->size;
}
return arrayCopy;
}
/*
*-----------------------------------------------------------------------------------
*
* VMCIHandleArray_GetHandles --
*
* Results:
* NULL if the array is empty. Otherwise, a pointer to the array
* of VMCI handles in the handle array.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------------
*/
static INLINE VMCIHandle *
VMCIHandleArray_GetHandles(VMCIHandleArray *array) // IN
{
ASSERT(array);
if (array->size) {
return array->entries;
} else {
return NULL;
}
}
#endif // _VMCI_HANDLE_ARRAY_H_
vmci-only/shared/compat_pgtable.h 0000444 0000000 0000000 00000011044 12522066073 016072 0 ustar root root /*********************************************************
* Copyright (C) 2002 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_PGTABLE_H__
# define __COMPAT_PGTABLE_H__
#if defined(CONFIG_PARAVIRT) && defined(CONFIG_HIGHPTE)
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)
# include
# undef paravirt_map_pt_hook
# define paravirt_map_pt_hook(type, va, pfn) do {} while (0)
# endif
#endif
#include
/* pte_page() API modified in 2.3.23 to return a struct page * --hpreg */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 23)
# define compat_pte_page pte_page
#else
# include "compat_page.h"
# define compat_pte_page(_pte) virt_to_page(pte_page(_pte))
#endif
/* Appeared in 2.5.5 --hpreg */
#ifndef pte_offset_map
/* Appeared in SuSE 8.0's 2.4.18 --hpreg */
# ifdef pte_offset_atomic
# define pte_offset_map pte_offset_atomic
# define pte_unmap pte_kunmap
# else
# define pte_offset_map pte_offset
# define pte_unmap(_pte)
# endif
#endif
/* Appeared in 2.5.74-mmX --petr */
#ifndef pmd_offset_map
# define pmd_offset_map(pgd, address) pmd_offset(pgd, address)
# define pmd_unmap(pmd)
#endif
/*
* Appeared in 2.6.10-rc2-mm1. Older kernels did L4 page tables as
* part of pgd_offset, or they did not have L4 page tables at all.
* In 2.6.11 pml4 -> pgd -> pmd -> pte hierarchy was replaced by
* pgd -> pud -> pmd -> pte hierarchy.
*/
#ifdef PUD_MASK
# define compat_pgd_offset(mm, address) pgd_offset(mm, address)
# define compat_pgd_present(pgd) pgd_present(pgd)
# define compat_pud_offset(pgd, address) pud_offset(pgd, address)
# define compat_pud_present(pud) pud_present(pud)
typedef pgd_t compat_pgd_t;
typedef pud_t compat_pud_t;
#elif defined(pml4_offset)
# define compat_pgd_offset(mm, address) pml4_offset(mm, address)
# define compat_pgd_present(pml4) pml4_present(pml4)
# define compat_pud_offset(pml4, address) pml4_pgd_offset(pml4, address)
# define compat_pud_present(pgd) pgd_present(pgd)
typedef pml4_t compat_pgd_t;
typedef pgd_t compat_pud_t;
#else
# define compat_pgd_offset(mm, address) pgd_offset(mm, address)
# define compat_pgd_present(pgd) pgd_present(pgd)
# define compat_pud_offset(pgd, address) (pgd)
# define compat_pud_present(pud) (1)
typedef pgd_t compat_pgd_t;
typedef pgd_t compat_pud_t;
#endif
#define compat_pgd_offset_k(mm, address) pgd_offset_k(address)
/* Introduced somewhere in 2.6.0, + backported to some 2.4 RedHat kernels */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(pte_pfn)
# define pte_pfn(pte) page_to_pfn(compat_pte_page(pte))
#endif
/* A page_table_lock field is added to struct mm_struct in 2.3.10 --hpreg */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 10)
# define compat_get_page_table_lock(_mm) (&(_mm)->page_table_lock)
#else
# define compat_get_page_table_lock(_mm) NULL
#endif
/*
* Define VM_PAGE_KERNEL_EXEC for vmapping executable pages.
*
* On ia32 PAGE_KERNEL_EXEC was introduced in 2.6.8.1. Unfortunately it accesses
* __PAGE_KERNEL_EXEC which is not exported for modules. So we use
* __PAGE_KERNEL and just cut _PAGE_NX bit from it.
*
* For ia32 kernels before 2.6.8.1 we use PAGE_KERNEL directly, these kernels
* do not have noexec support.
*
* On x86-64 situation is a bit better: they always supported noexec, but
* before 2.6.8.1 flag was named PAGE_KERNEL_EXECUTABLE, and it was renamed
* to PAGE_KERNEL_EXEC when ia32 got noexec too (see above).
*/
#ifdef CONFIG_X86
#ifdef _PAGE_NX
#define VM_PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL & ~_PAGE_NX)
#else
#define VM_PAGE_KERNEL_EXEC PAGE_KERNEL
#endif
#else
#ifdef PAGE_KERNEL_EXECUTABLE
#define VM_PAGE_KERNEL_EXEC PAGE_KERNEL_EXECUTABLE
#else
#define VM_PAGE_KERNEL_EXEC PAGE_KERNEL_EXEC
#endif
#endif
#endif /* __COMPAT_PGTABLE_H__ */
vmci-only/shared/compat_version.h 0000444 0000000 0000000 00000007363 12522066073 016152 0 ustar root root /*********************************************************
* Copyright (C) 1998 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
#ifndef __COMPAT_VERSION_H__
# define __COMPAT_VERSION_H__
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMKDRIVERS
#include "includeCheck.h"
#ifndef __linux__
# error "linux-version.h"
#endif
#include
#ifndef KERNEL_VERSION
# error KERNEL_VERSION macro is not defined, environment is busted
#endif
/*
* Distinguish relevant classes of Linux kernels.
*
* The convention is that version X defines all
* the KERNEL_Y symbols where Y <= X.
*
* XXX Do not add more definitions here. This way of doing things does not
* scale, and we are going to phase it out soon --hpreg
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 0)
# define KERNEL_2_1
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0)
# define KERNEL_2_2
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 1)
# define KERNEL_2_3_1
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 15)
/* new networking */
# define KERNEL_2_3_15
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 25)
/* new procfs */
# define KERNEL_2_3_25
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 29)
/* even newer procfs */
# define KERNEL_2_3_29
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 43)
/* softnet changes */
# define KERNEL_2_3_43
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 47)
/* more softnet changes */
# define KERNEL_2_3_47
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 99)
/* name in netdevice struct is array and not pointer */
# define KERNEL_2_3_99
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
/* New 'owner' member at the beginning of struct file_operations */
# define KERNEL_2_4_0
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8)
/* New netif_rx_ni() --hpreg */
# define KERNEL_2_4_8
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 2)
/* New kdev_t, major()/minor() API --hpreg */
# define KERNEL_2_5_2
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5)
/* New sk_alloc(), pte_offset_map()/pte_unmap() --hpreg */
# define KERNEL_2_5_5
#endif
/* Linux kernel 3.0 can be called 2.6.40, and 3.1 can be 2.6.41...
* Use COMPAT_LINUX_VERSION_CHECK_LT iff you need to compare running kernel to
* versions 3.0 and above.
*
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
/* Straight forward comparison if kernel version is 3.0.0 and beyond */
# define COMPAT_LINUX_VERSION_CHECK_LT(a, b, c) LINUX_VERSION_CODE < KERNEL_VERSION (a, b, c)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 40)
/* Use b of the check to calculate corresponding c of kernel
* version to compare */
# define COMPAT_LINUX_VERSION_CHECK_LT(a, b, c) LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, (b + 40))
#else
/* This is anyways lesser than any 3.x versions */
# define COMPAT_LINUX_VERSION_CHECK_LT(a, b, c) 1
#endif
#endif /* __COMPAT_VERSION_H__ */
vmci-only/shared/vm_basic_asm.h 0000444 0000000 0000000 00000102214 12522066074 015535 0 ustar root root /*********************************************************
* Copyright (C) 2003-2011 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*********************************************************/
/*
* vm_basic_asm.h
*
* Basic asm macros
*
* ARM not implemented.
*/
#ifndef _VM_BASIC_ASM_H_
#define _VM_BASIC_ASM_H_
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMK_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#define INCLUDE_ALLOW_DISTRIBUTE
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_VMIROM
#include "includeCheck.h"
#include "vm_basic_types.h"
#if defined VM_X86_64
#include "vm_basic_asm_x86_64.h"
#elif defined __i386__
#include "vm_basic_asm_x86.h"
#endif
/*
* x86-64 windows doesn't support inline asm so we have to use these
* intrinsic functions defined in the compiler. Not all of these are well
* documented. There is an array in the compiler dll (c1.dll) which has
* an array of the names of all the intrinsics minus the leading
* underscore. Searching around in the ntddk.h file can also be helpful.
*
* The declarations for the intrinsic functions were taken from the DDK.
* Our declarations must match the ddk's otherwise the 64-bit c++ compiler
* will complain about second linkage of the intrinsic functions.
* We define the intrinsic using the basic types corresponding to the
* Windows typedefs. This avoids having to include windows header files
* to get to the windows types.
*/
#ifdef _MSC_VER
#ifdef __cplusplus
extern "C" {
#endif
/*
* It seems x86 & x86-64 windows still implements these intrinsic
* functions. The documentation for the x86-64 suggest the
* __inbyte/__outbyte intrinsics even though the _in/_out work fine and
* __inbyte/__outbyte aren't supported on x86.
*/
int _inp(unsigned short);
unsigned short _inpw(unsigned short);
unsigned long _inpd(unsigned short);
int _outp(unsigned short, int);
unsigned short _outpw(unsigned short, unsigned short);
unsigned long _outpd(uint16, unsigned long);
#pragma intrinsic(_inp, _inpw, _inpd, _outp, _outpw, _outpw, _outpd)
/*
* Prevents compiler from re-ordering reads, writes and reads&writes.
* These functions do not add any instructions thus only affect
* the compiler ordering.
*
* See:
* `Lockless Programming Considerations for Xbox 360 and Microsoft Windows'
* http://msdn.microsoft.com/en-us/library/bb310595(VS.85).aspx
*/
void _ReadBarrier(void);
void _WriteBarrier(void);
void _ReadWriteBarrier(void);
#pragma intrinsic(_ReadBarrier, _WriteBarrier, _ReadWriteBarrier)
void _mm_mfence(void);
void _mm_lfence(void);
#pragma intrinsic(_mm_mfence, _mm_lfence)
unsigned int __getcallerseflags(void);
#pragma intrinsic(__getcallerseflags)
#ifdef VM_X86_64
/*
* intrinsic functions only supported by x86-64 windows as of 2k3sp1
*/
unsigned __int64 __rdtsc(void);
void __stosw(unsigned short *, unsigned short, size_t);
void __stosd(unsigned long *, unsigned long, size_t);
void _mm_pause(void);
#pragma intrinsic(__rdtsc, __stosw, __stosd, _mm_pause)
unsigned char _BitScanForward64(unsigned long *, unsigned __int64);
unsigned char _BitScanReverse64(unsigned long *, unsigned __int64);
#pragma intrinsic(_BitScanForward64, _BitScanReverse64)
#endif /* VM_X86_64 */
unsigned char _BitScanForward(unsigned long *, unsigned long);
unsigned char _BitScanReverse(unsigned long *, unsigned long);
#pragma intrinsic(_BitScanForward, _BitScanReverse)
unsigned char _bittest(const long *, long);
unsigned char _bittestandset(long *, long);
unsigned char _bittestandreset(long *, long);
unsigned char _bittestandcomplement(long *, long);
#pragma intrinsic(_bittest, _bittestandset, _bittestandreset, _bittestandcomplement)
#ifdef VM_X86_64
unsigned char _bittestandset64(__int64 *, __int64);
unsigned char _bittestandreset64(__int64 *, __int64);
#pragma intrinsic(_bittestandset64, _bittestandreset64)
#endif /* VM_X86_64 */
#ifdef __cplusplus
}
#endif
#endif /* _MSC_VER */
#ifdef __GNUC__ // {
#if defined(__i386__) || defined(__x86_64__) // Only on x86*
/*
* Checked against the Intel manual and GCC --hpreg
*
* volatile because reading from port can modify the state of the underlying
* hardware.
*
* Note: The undocumented %z construct doesn't work (internal compiler error)
* with gcc-2.95.1
*/
#define __GCC_IN(s, type, name) \
static INLINE type \
name(uint16 port) \
{ \
type val; \
\
__asm__ __volatile__( \
"in" #s " %w1, %0" \
: "=a" (val) \
: "Nd" (port) \
); \
\
return val; \
}
__GCC_IN(b, uint8, INB)
__GCC_IN(w, uint16, INW)
__GCC_IN(l, uint32, IN32)
/*
* Checked against the Intel manual and GCC --hpreg
*
* Note: The undocumented %z construct doesn't work (internal compiler error)
* with gcc-2.95.1
*/
#define __GCC_OUT(s, s2, port, val) do { \
__asm__( \
"out" #s " %" #s2 "1, %w0" \
: \
: "Nd" (port), "a" (val) \
); \
} while (0)
#define OUTB(port, val) __GCC_OUT(b, b, port, val)
#define OUTW(port, val) __GCC_OUT(w, w, port, val)
#define OUT32(port, val) __GCC_OUT(l, , port, val)
#define GET_CURRENT_EIP(_eip) \
__asm__ __volatile("call 0\n\tpopl %0" : "=r" (_eip): );
static INLINE unsigned int
GetCallerEFlags(void)
{
unsigned long flags;
asm volatile("pushf; pop %0" : "=r"(flags));
return flags;
}
#endif // x86*
#elif defined(_MSC_VER) // } {
static INLINE uint8
INB(uint16 port)
{
return (uint8)_inp(port);
}
static INLINE void
OUTB(uint16 port, uint8 value)
{
_outp(port, value);
}
static INLINE uint16
INW(uint16 port)
{
return _inpw(port);
}
static INLINE void
OUTW(uint16 port, uint16 value)
{
_outpw(port, value);
}
static INLINE uint32
IN32(uint16 port)
{
return _inpd(port);
}
static INLINE void
OUT32(uint16 port, uint32 value)
{
_outpd(port, value);
}
#ifndef VM_X86_64
#ifdef NEAR
#undef NEAR
#endif
#define GET_CURRENT_EIP(_eip) do { \
__asm call NEAR PTR $+5 \
__asm pop eax \
__asm mov _eip, eax \
} while (0)
#endif // VM_X86_64
static INLINE unsigned int
GetCallerEFlags(void)
{
return __getcallerseflags();
}
#else // } {
#error
#endif // }
/* Sequence recommended by Intel for the Pentium 4. */
#define INTEL_MICROCODE_VERSION() ( \
__SET_MSR(MSR_BIOS_SIGN_ID, 0), \
__GET_EAX_FROM_CPUID(1), \
__GET_MSR(MSR_BIOS_SIGN_ID))
/*
* Locate most and least significant bit set functions. Use our own name
* space to avoid namespace collisions. The new names follow a pattern,
*