commit 9041b9bff49caa7ce21ea0f590782f64adf330b3 Author: Daniel Larimer Date: Fri Sep 7 22:50:37 2012 -0400 Initial checkin of FC code. diff --git a/CMakeCache.txt b/CMakeCache.txt new file mode 100644 index 0000000..71a0fef --- /dev/null +++ b/CMakeCache.txt @@ -0,0 +1,504 @@ +# This is the CMakeCache file. +# For build in directory: /Users/dlarimer/Downloads/fc +# It was generated by CMake: /opt/local/bin/cmake +# You can edit this file to change values found and used by cmake. +# If you do not want to change any of the values, simply exit the editor. +# If you do want to change a value, simply edit, save, and exit the editor. +# The syntax for the file is as follows: +# KEY:TYPE=VALUE +# KEY is the name of a variable in the cache. +# TYPE is a hint to GUI's for the type of VALUE, DO NOT EDIT TYPE!. +# VALUE is the current value for the KEY. + +######################## +# EXTERNAL cache entries +######################## + +//The Boost CHRONO library +Boost_CHRONO_LIBRARY:FILEPATH=/usr/local/lib/libboost_chrono.a + +//Boost chrono library (debug) +Boost_CHRONO_LIBRARY_DEBUG:FILEPATH=/usr/local/lib/libboost_chrono.a + +//Boost chrono library (release) +Boost_CHRONO_LIBRARY_RELEASE:FILEPATH=/usr/local/lib/libboost_chrono.a + +//The Boost CONTEXT library +Boost_CONTEXT_LIBRARY:FILEPATH=/usr/local/lib/libboost_context.a + +//Boost context library (debug) +Boost_CONTEXT_LIBRARY_DEBUG:FILEPATH=/usr/local/lib/libboost_context.a + +//Boost context library (release) +Boost_CONTEXT_LIBRARY_RELEASE:FILEPATH=/usr/local/lib/libboost_context.a + +//The Boost DATE_TIME library +Boost_DATE_TIME_LIBRARY:FILEPATH=/usr/local/lib/libboost_date_time.a + +//Boost date_time library (debug) +Boost_DATE_TIME_LIBRARY_DEBUG:FILEPATH=/usr/local/lib/libboost_date_time.a + +//Boost date_time library (release) +Boost_DATE_TIME_LIBRARY_RELEASE:FILEPATH=/usr/local/lib/libboost_date_time.a + +//The directory containing a CMake configuration file for Boost. +Boost_DIR:PATH=Boost_DIR-NOTFOUND + +//The Boost FILESYSTEM library +Boost_FILESYSTEM_LIBRARY:FILEPATH=/usr/local/lib/libboost_filesystem.a + +//Boost filesystem library (debug) +Boost_FILESYSTEM_LIBRARY_DEBUG:FILEPATH=/usr/local/lib/libboost_filesystem.a + +//Boost filesystem library (release) +Boost_FILESYSTEM_LIBRARY_RELEASE:FILEPATH=/usr/local/lib/libboost_filesystem.a + +//Path to a file. +Boost_INCLUDE_DIR:PATH=/usr/local/include + +//Boost library directory +Boost_LIBRARY_DIRS:FILEPATH=/usr/local/lib + +//The Boost PROGRAM_OPTIONS library +Boost_PROGRAM_OPTIONS_LIBRARY:FILEPATH=/usr/local/lib/libboost_program_options.a + +//Boost program_options library (debug) +Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG:FILEPATH=/usr/local/lib/libboost_program_options.a + +//Boost program_options library (release) +Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE:FILEPATH=/usr/local/lib/libboost_program_options.a + +//The Boost SERIALIZATION library +Boost_SERIALIZATION_LIBRARY:FILEPATH=/usr/local/lib/libboost_serialization.a + +//Boost serialization library (debug) +Boost_SERIALIZATION_LIBRARY_DEBUG:FILEPATH=/usr/local/lib/libboost_serialization.a + +//Boost serialization library (release) +Boost_SERIALIZATION_LIBRARY_RELEASE:FILEPATH=/usr/local/lib/libboost_serialization.a + +//The Boost SIGNALS library +Boost_SIGNALS_LIBRARY:FILEPATH=/usr/local/lib/libboost_signals.a + +//Boost signals library (debug) +Boost_SIGNALS_LIBRARY_DEBUG:FILEPATH=/usr/local/lib/libboost_signals.a + +//Boost signals library (release) +Boost_SIGNALS_LIBRARY_RELEASE:FILEPATH=/usr/local/lib/libboost_signals.a + +//The Boost SYSTEM library +Boost_SYSTEM_LIBRARY:FILEPATH=/usr/local/lib/libboost_system.a + +//Boost system library (debug) +Boost_SYSTEM_LIBRARY_DEBUG:FILEPATH=/usr/local/lib/libboost_system.a + +//Boost system library (release) +Boost_SYSTEM_LIBRARY_RELEASE:FILEPATH=/usr/local/lib/libboost_system.a + +//The Boost THREAD library +Boost_THREAD_LIBRARY:FILEPATH=/usr/local/lib/libboost_thread.a + +//Boost thread library (debug) +Boost_THREAD_LIBRARY_DEBUG:FILEPATH=/usr/local/lib/libboost_thread.a + +//Boost thread library (release) +Boost_THREAD_LIBRARY_RELEASE:FILEPATH=/usr/local/lib/libboost_thread.a + +//The Boost UNIT_TEST_FRAMEWORK library +Boost_UNIT_TEST_FRAMEWORK_LIBRARY:FILEPATH=/usr/local/lib/libboost_unit_test_framework.a + +//Boost unit_test_framework library (debug) +Boost_UNIT_TEST_FRAMEWORK_LIBRARY_DEBUG:FILEPATH=/usr/local/lib/libboost_unit_test_framework.a + +//Boost unit_test_framework library (release) +Boost_UNIT_TEST_FRAMEWORK_LIBRARY_RELEASE:FILEPATH=/usr/local/lib/libboost_unit_test_framework.a + +//Path to a program. +CMAKE_AR:FILEPATH=/opt/local/bin/ar + +//Choose the type of build, options are: None(CMAKE_CXX_FLAGS or +// CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel. +CMAKE_BUILD_TYPE:STRING= + +//Enable/Disable color output during build. +CMAKE_COLOR_MAKEFILE:BOOL=ON + +//CXX compiler. +CMAKE_CXX_COMPILER:FILEPATH=/opt/local/bin/c++ + +//Flags used by the compiler during all build types. +CMAKE_CXX_FLAGS:STRING= + +//Flags used by the compiler during debug builds. +CMAKE_CXX_FLAGS_DEBUG:STRING=-g + +//Flags used by the compiler during release minsize builds. +CMAKE_CXX_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG + +//Flags used by the compiler during release builds (/MD /Ob1 /Oi +// /Ot /Oy /Gs will produce slightly less optimized but smaller +// files). +CMAKE_CXX_FLAGS_RELEASE:STRING=-O3 -DNDEBUG + +//Flags used by the compiler during Release with Debug Info builds. +CMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -g + +//C compiler. +CMAKE_C_COMPILER:FILEPATH=/opt/local/bin/gcc + +//Flags used by the compiler during all build types. +CMAKE_C_FLAGS:STRING= + +//Flags used by the compiler during debug builds. +CMAKE_C_FLAGS_DEBUG:STRING=-g + +//Flags used by the compiler during release minsize builds. +CMAKE_C_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG + +//Flags used by the compiler during release builds (/MD /Ob1 /Oi +// /Ot /Oy /Gs will produce slightly less optimized but smaller +// files). +CMAKE_C_FLAGS_RELEASE:STRING=-O3 -DNDEBUG + +//Flags used by the compiler during Release with Debug Info builds. +CMAKE_C_FLAGS_RELWITHDEBINFO:STRING=-O2 -g + +//Flags used by the linker. +CMAKE_EXE_LINKER_FLAGS:STRING=' ' + +//Flags used by the linker during debug builds. +CMAKE_EXE_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during release minsize builds. +CMAKE_EXE_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during release builds. +CMAKE_EXE_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during Release with Debug Info builds. +CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Enable/Disable output of compile commands during generation. +CMAKE_EXPORT_COMPILE_COMMANDS:BOOL=OFF + +//Path to a program. +CMAKE_INSTALL_NAME_TOOL:FILEPATH=/opt/local/bin/install_name_tool + +//Install path prefix, prepended onto install directories. +CMAKE_INSTALL_PREFIX:PATH=/usr/local + +//Path to a program. +CMAKE_LINKER:FILEPATH=/opt/local/bin/ld + +//Path to a program. +CMAKE_MAKE_PROGRAM:FILEPATH=/usr/bin/make + +//Flags used by the linker during the creation of modules. +CMAKE_MODULE_LINKER_FLAGS:STRING=' ' + +//Flags used by the linker during debug builds. +CMAKE_MODULE_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during release minsize builds. +CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during release builds. +CMAKE_MODULE_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during Release with Debug Info builds. +CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Path to a program. +CMAKE_NM:FILEPATH=/opt/local/bin/nm + +//Path to a program. +CMAKE_OBJCOPY:FILEPATH=CMAKE_OBJCOPY-NOTFOUND + +//Path to a program. +CMAKE_OBJDUMP:FILEPATH=CMAKE_OBJDUMP-NOTFOUND + +//Build architectures for OSX +CMAKE_OSX_ARCHITECTURES:STRING= + +//Minimum OS X version to target for deployment (at runtime); newer +// APIs weak linked. Set to empty string for default value. +CMAKE_OSX_DEPLOYMENT_TARGET:STRING= + +//The product will be built against the headers and libraries located +// inside the indicated SDK. +CMAKE_OSX_SYSROOT:PATH=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.7.sdk + +//Value Computed by CMake +CMAKE_PROJECT_NAME:STATIC=fc + +//Path to a program. +CMAKE_RANLIB:FILEPATH=/opt/local/bin/ranlib + +//Flags used by the linker during the creation of dll's. +CMAKE_SHARED_LINKER_FLAGS:STRING=' ' + +//Flags used by the linker during debug builds. +CMAKE_SHARED_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during release minsize builds. +CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during release builds. +CMAKE_SHARED_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during Release with Debug Info builds. +CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//If set, runtime paths are not added when installing shared libraries, +// but are added when building. +CMAKE_SKIP_INSTALL_RPATH:BOOL=NO + +//If set, runtime paths are not added when using shared libraries. +CMAKE_SKIP_RPATH:BOOL=NO + +//Path to a program. +CMAKE_STRIP:FILEPATH=/opt/local/bin/strip + +//If true, cmake will use relative paths in makefiles and projects. +CMAKE_USE_RELATIVE_PATHS:BOOL=OFF + +//If this value is on, makefiles will be generated without the +// .SILENT directive, and all commands will be echoed to the console +// during the make. This is useful for debugging only. With Visual +// Studio IDE projects all commands are done without /nologo. +CMAKE_VERBOSE_MAKEFILE:BOOL=FALSE + +//Path to a program. +CMAKE_XCODE_SELECT:FILEPATH=/usr/bin/xcode-select + +//OFF +UNITY_BUILD:BOOL=OFF + +//Value Computed by CMake +fc_BINARY_DIR:STATIC=/Users/dlarimer/Downloads/fc + +//Dependencies for target +fc_LIB_DEPENDS:STATIC= + +//Value Computed by CMake +fc_SOURCE_DIR:STATIC=/Users/dlarimer/Downloads/fc + + +######################## +# INTERNAL cache entries +######################## + +//Whether the Boost CHRONO library found +Boost_CHRONO_FOUND:INTERNAL=ON +//ADVANCED property for variable: Boost_CHRONO_LIBRARY +Boost_CHRONO_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_CHRONO_LIBRARY_DEBUG +Boost_CHRONO_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_CHRONO_LIBRARY_RELEASE +Boost_CHRONO_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//Whether the Boost CONTEXT library found +Boost_CONTEXT_FOUND:INTERNAL=ON +//ADVANCED property for variable: Boost_CONTEXT_LIBRARY +Boost_CONTEXT_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_CONTEXT_LIBRARY_DEBUG +Boost_CONTEXT_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_CONTEXT_LIBRARY_RELEASE +Boost_CONTEXT_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//Whether the Boost DATE_TIME library found +Boost_DATE_TIME_FOUND:INTERNAL=ON +//ADVANCED property for variable: Boost_DATE_TIME_LIBRARY +Boost_DATE_TIME_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_DATE_TIME_LIBRARY_DEBUG +Boost_DATE_TIME_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_DATE_TIME_LIBRARY_RELEASE +Boost_DATE_TIME_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//Whether the Boost FILESYSTEM library found +Boost_FILESYSTEM_FOUND:INTERNAL=ON +//ADVANCED property for variable: Boost_FILESYSTEM_LIBRARY +Boost_FILESYSTEM_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_FILESYSTEM_LIBRARY_DEBUG +Boost_FILESYSTEM_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_FILESYSTEM_LIBRARY_RELEASE +Boost_FILESYSTEM_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_INCLUDE_DIR +Boost_INCLUDE_DIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_LIBRARY_DIRS +Boost_LIBRARY_DIRS-ADVANCED:INTERNAL=1 +//The library version string for boost libraries +Boost_LIB_VERSION:INTERNAL=1_51 +//Whether the Boost PROGRAM_OPTIONS library found +Boost_PROGRAM_OPTIONS_FOUND:INTERNAL=ON +//ADVANCED property for variable: Boost_PROGRAM_OPTIONS_LIBRARY +Boost_PROGRAM_OPTIONS_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG +Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE +Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//Whether the Boost SERIALIZATION library found +Boost_SERIALIZATION_FOUND:INTERNAL=ON +//ADVANCED property for variable: Boost_SERIALIZATION_LIBRARY +Boost_SERIALIZATION_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_SERIALIZATION_LIBRARY_DEBUG +Boost_SERIALIZATION_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_SERIALIZATION_LIBRARY_RELEASE +Boost_SERIALIZATION_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//Whether the Boost SIGNALS library found +Boost_SIGNALS_FOUND:INTERNAL=ON +//ADVANCED property for variable: Boost_SIGNALS_LIBRARY +Boost_SIGNALS_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_SIGNALS_LIBRARY_DEBUG +Boost_SIGNALS_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_SIGNALS_LIBRARY_RELEASE +Boost_SIGNALS_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//Whether the Boost SYSTEM library found +Boost_SYSTEM_FOUND:INTERNAL=ON +//ADVANCED property for variable: Boost_SYSTEM_LIBRARY +Boost_SYSTEM_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_SYSTEM_LIBRARY_DEBUG +Boost_SYSTEM_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_SYSTEM_LIBRARY_RELEASE +Boost_SYSTEM_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//Whether the Boost THREAD library found +Boost_THREAD_FOUND:INTERNAL=ON +//ADVANCED property for variable: Boost_THREAD_LIBRARY +Boost_THREAD_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_THREAD_LIBRARY_DEBUG +Boost_THREAD_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_THREAD_LIBRARY_RELEASE +Boost_THREAD_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//Whether the Boost UNIT_TEST_FRAMEWORK library found +Boost_UNIT_TEST_FRAMEWORK_FOUND:INTERNAL=ON +//ADVANCED property for variable: Boost_UNIT_TEST_FRAMEWORK_LIBRARY +Boost_UNIT_TEST_FRAMEWORK_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_UNIT_TEST_FRAMEWORK_LIBRARY_DEBUG +Boost_UNIT_TEST_FRAMEWORK_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_UNIT_TEST_FRAMEWORK_LIBRARY_RELEASE +Boost_UNIT_TEST_FRAMEWORK_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//The version number for boost libraries +Boost_VERSION:INTERNAL=105100 +//ADVANCED property for variable: CMAKE_AR +CMAKE_AR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_BUILD_TOOL +CMAKE_BUILD_TOOL-ADVANCED:INTERNAL=1 +//What is the target build tool cmake is generating for. +CMAKE_BUILD_TOOL:INTERNAL=/usr/bin/make +//This is the directory where this CMakeCache.txt was created +CMAKE_CACHEFILE_DIR:INTERNAL=/Users/dlarimer/Downloads/fc +//Major version of cmake used to create the current loaded cache +CMAKE_CACHE_MAJOR_VERSION:INTERNAL=2 +//Minor version of cmake used to create the current loaded cache +CMAKE_CACHE_MINOR_VERSION:INTERNAL=8 +//Patch version of cmake used to create the current loaded cache +CMAKE_CACHE_PATCH_VERSION:INTERNAL=8 +//ADVANCED property for variable: CMAKE_COLOR_MAKEFILE +CMAKE_COLOR_MAKEFILE-ADVANCED:INTERNAL=1 +//Path to CMake executable. +CMAKE_COMMAND:INTERNAL=/opt/local/bin/cmake +//Path to cpack program executable. +CMAKE_CPACK_COMMAND:INTERNAL=/opt/local/bin/cpack +//Path to ctest program executable. +CMAKE_CTEST_COMMAND:INTERNAL=/opt/local/bin/ctest +//ADVANCED property for variable: CMAKE_CXX_COMPILER +CMAKE_CXX_COMPILER-ADVANCED:INTERNAL=1 +CMAKE_CXX_COMPILER_WORKS:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS +CMAKE_CXX_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_DEBUG +CMAKE_CXX_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_MINSIZEREL +CMAKE_CXX_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELEASE +CMAKE_CXX_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELWITHDEBINFO +CMAKE_CXX_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_COMPILER +CMAKE_C_COMPILER-ADVANCED:INTERNAL=1 +CMAKE_C_COMPILER_WORKS:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS +CMAKE_C_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_DEBUG +CMAKE_C_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_MINSIZEREL +CMAKE_C_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_RELEASE +CMAKE_C_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_RELWITHDEBINFO +CMAKE_C_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//Result of TRY_COMPILE +CMAKE_DETERMINE_CXX_ABI_COMPILED:INTERNAL=TRUE +//Result of TRY_COMPILE +CMAKE_DETERMINE_C_ABI_COMPILED:INTERNAL=TRUE +//Path to cache edit program executable. +CMAKE_EDIT_COMMAND:INTERNAL=/opt/local/bin/ccmake +//Executable file format +CMAKE_EXECUTABLE_FORMAT:INTERNAL=Unknown +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS +CMAKE_EXE_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_DEBUG +CMAKE_EXE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_MINSIZEREL +CMAKE_EXE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELEASE +CMAKE_EXE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXPORT_COMPILE_COMMANDS +CMAKE_EXPORT_COMPILE_COMMANDS-ADVANCED:INTERNAL=1 +//Name of generator. +CMAKE_GENERATOR:INTERNAL=Unix Makefiles +//Start directory with the top level CMakeLists.txt file for this +// project +CMAKE_HOME_DIRECTORY:INTERNAL=/Users/dlarimer/Downloads/fc +//ADVANCED property for variable: CMAKE_INSTALL_NAME_TOOL +CMAKE_INSTALL_NAME_TOOL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_LINKER +CMAKE_LINKER-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MAKE_PROGRAM +CMAKE_MAKE_PROGRAM-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS +CMAKE_MODULE_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_DEBUG +CMAKE_MODULE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL +CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELEASE +CMAKE_MODULE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_NM +CMAKE_NM-ADVANCED:INTERNAL=1 +//number of local generators +CMAKE_NUMBER_OF_LOCAL_GENERATORS:INTERNAL=1 +//ADVANCED property for variable: CMAKE_OBJCOPY +CMAKE_OBJCOPY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_OBJDUMP +CMAKE_OBJDUMP-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_RANLIB +CMAKE_RANLIB-ADVANCED:INTERNAL=1 +//Path to CMake installation. +CMAKE_ROOT:INTERNAL=/opt/local/share/cmake-2.8 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS +CMAKE_SHARED_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_DEBUG +CMAKE_SHARED_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL +CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELEASE +CMAKE_SHARED_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SKIP_INSTALL_RPATH +CMAKE_SKIP_INSTALL_RPATH-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SKIP_RPATH +CMAKE_SKIP_RPATH-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STRIP +CMAKE_STRIP-ADVANCED:INTERNAL=1 +//uname command +CMAKE_UNAME:INTERNAL=/usr/bin/uname +//ADVANCED property for variable: CMAKE_USE_RELATIVE_PATHS +CMAKE_USE_RELATIVE_PATHS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_VERBOSE_MAKEFILE +CMAKE_VERBOSE_MAKEFILE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_XCODE_SELECT +CMAKE_XCODE_SELECT-ADVANCED:INTERNAL=1 + diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..f3f58da --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,72 @@ +PROJECT( fc ) + +CMAKE_MINIMUM_REQUIRED( VERSION 2.8.0 ) + +SET( CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/CMakeModules;${CMAKE_MODULE_PATH}" ) + +INCLUDE( VersionMacros ) +INCLUDE( SetupTargetMacros ) + +SET( DEFAULT_HEADER_INSTALL_DIR include/\${target} ) +SET( DEFAULT_LIBRARY_INSTALL_DIR lib/ ) +SET( DEFAULT_EXECUTABLE_INSTALL_DIR bin/ ) +SET( CMAKE_DEBUG_POSTFIX _debug ) +#SET( BUILD_SHARED_LIBS NO ) + +SET(Boost_USE_STATIC_LIBS ON) +FIND_PACKAGE(Boost 1.50 COMPONENTS thread date_time system filesystem program_options signals serialization chrono unit_test_framework context ) + + + +INCLUDE_DIRECTORIES( ${CMAKE_SOURCE_DIR} ) +LINK_DIRECTORIES( ${Boost_LIBRARY_DIRS} ) + +IF( WIN32 ) + ADD_DEFINITIONS( -DBOOST_CONTEXT_NO_LIB ) + ADD_DEFINITIONS( -D_SCL_SECURE_NO_WARNINGS ) + ADD_DEFINITIONS( -D_WIN32_WINNT=0x0501 ) + ADD_DEFINITIONS( -D_CRT_SECURE_NO_WARNINGS ) +ELSE(WIN32) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x -Wall") +ENDIF(WIN32) + +if( UNIX ) + if( NOT APPLE ) + set(rt_library rt ) + set(pthread_library pthread) + endif() +endif() + +option( UNITY_BUILD OFF ) + +include_directories( ~/projects/mace/libs/atomic/include ) +include_directories( ~/projects/mace/libs/context/include ) +include_directories( ${Boost_INCLUDE_DIR} ) +include_directories( include ) + +set( sources + src/json_rpc_connection.cpp + src/spin_lock.cpp + src/spin_yield_lock.cpp + src/task.cpp + src/future.cpp + src/shared_ptr.cpp + src/string.cpp + src/json.cpp + src/log.cpp + src/time.cpp + src/stream.cpp + src/exception.cpp + src/thread.cpp + src/value.cpp + src/value_cast.cpp +) +setup_library( fc SOURCES ${sources} ) + +add_executable( test_vec tests/vector_test.cpp ) +target_link_libraries( test_vec fc ${Boost_SYSTEM_LIBRARY} ${Boost_CHRONO_LIBRARY} ${Boost_THREAD_LIBRARY} ${Boost_CONTEXT_LIBRARY} ) + + +add_executable( unit_tests tests/unit.cpp ) +target_link_libraries( unit_tests fc ${Boost_CHRONO_LIBRARY} ${Boost_THREAD_LIBRARY} ${Boost_CONTEXT_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY} ) + diff --git a/README.md b/README.md new file mode 100644 index 0000000..cae31d1 --- /dev/null +++ b/README.md @@ -0,0 +1,25 @@ +# Fast Compiliing C++ Library +----------------------------------------- + +In my prior attempts at developing MACE what I discovered is that compile times +would explode to unreasonable levels that hinder the rate of development more +than what can be saved by reduced typing. So I began a quest to get C++ to compile +as quickly as Java or C# and the result is this library. + +One of the major drawbacks to templates is that they place everything in header and +must be compiled with every run and generate a lot of object code. With Link Time Optimization, +the benefit of inline methods mostly disapears, leaving only static vs dynamic polymorphism. + +For the vast majority of applications, a virtual method call is not the bottleneck and the +increased compile times costs more than is otherwise justified; therefore, the Fast Compiling C++ +library opts for virtual interfaces to handle reflection instead of template interfaces. One could +argue that both types of reflection could be useful. + +Another source of slowness was the standard template library itself. Most standard template library +classes cannot be forward declared and import thousands of lines of code into every compilation unit. + +Another source of slowness is the need to include headers simply because the 'size' of the object must +be known. A new utility class allows you to 'forward declare' the size required for certain types which +allows you to remove their inclusion from the header file. + + diff --git a/include/boost/atomic.hpp b/include/boost/atomic.hpp new file mode 100644 index 0000000..a8b9c55 --- /dev/null +++ b/include/boost/atomic.hpp @@ -0,0 +1,217 @@ +#ifndef BOOST_ATOMIC_HPP +#define BOOST_ATOMIC_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include + +#include +#include +#include +#include +#include +//#include + +namespace boost { + +template +class atomic : public detail::atomic::internal_atomic { +public: + typedef detail::atomic::internal_atomic super; + + atomic() {} + explicit atomic(T v) : super(v) {} +private: + atomic(const atomic &); + void operator=(const atomic &); +}; + + +template<> +class atomic : private detail::atomic::internal_atomic { +public: + typedef detail::atomic::internal_atomic super; + + atomic() {} + explicit atomic(bool v) : super(v) {} + + using super::load; + using super::store; + using super::compare_exchange_strong; + using super::compare_exchange_weak; + using super::exchange; + using super::is_lock_free; + + operator bool(void) const volatile {return load();} + bool operator=(bool v) volatile {store(v); return v;} +private: + atomic(const atomic &); + void operator=(const atomic &); +}; + +template<> +class atomic : private detail::atomic::internal_atomic { +public: + typedef detail::atomic::internal_atomic super; + + atomic() {} + explicit atomic(void * p) : super(p) {} + using super::load; + using super::store; + using super::compare_exchange_strong; + using super::compare_exchange_weak; + using super::exchange; + using super::is_lock_free; + + operator void *(void) const volatile {return load();} + void * operator=(void * v) volatile {store(v); return v;} + +private: + atomic(const atomic &); + void * operator=(const atomic &); +}; + +/* FIXME: pointer arithmetic still missing */ + +template +class atomic : private detail::atomic::internal_atomic { +public: + typedef detail::atomic::internal_atomic super; + + atomic() {} + explicit atomic(T * p) : super((intptr_t)p) {} + + T *load(memory_order order=memory_order_seq_cst) const volatile + { + return (T*)super::load(order); + } + void store(T *v, memory_order order=memory_order_seq_cst) volatile + { + super::store((intptr_t)v, order); + } + bool compare_exchange_strong( + T * &expected, + T * desired, + memory_order order=memory_order_seq_cst) volatile + { + return compare_exchange_strong(expected, desired, order, detail::atomic::calculate_failure_order(order)); + } + bool compare_exchange_weak( + T * &expected, + T *desired, + memory_order order=memory_order_seq_cst) volatile + { + return compare_exchange_weak(expected, desired, order, detail::atomic::calculate_failure_order(order)); + } + bool compare_exchange_weak( + T * &expected, + T *desired, + memory_order success_order, + memory_order failure_order) volatile + { + intptr_t expected_=(intptr_t)expected; + intptr_t desired_=(intptr_t)desired; + bool success=super::compare_exchange_weak(expected_, desired_, success_order, failure_order); + expected=(T*)expected_; + return success; + } + bool compare_exchange_strong( + T * &expected, + T *desired, + memory_order success_order, + memory_order failure_order) volatile + { + intptr_t expected_=(intptr_t)expected, desired_=(intptr_t)desired; + bool success=super::compare_exchange_strong(expected_, desired_, success_order, failure_order); + expected=(T*)expected_; + return success; + } + T *exchange(T * replacement, memory_order order=memory_order_seq_cst) volatile + { + return (T*)super::exchange((intptr_t)replacement, order); + } + using super::is_lock_free; + + operator T *(void) const volatile {return load();} + T * operator=(T * v) volatile {store(v); return v;} + + T * fetch_add(ptrdiff_t diff, memory_order order=memory_order_seq_cst) volatile + { + return (T*)super::fetch_add(diff*sizeof(T), order); + } + T * fetch_sub(ptrdiff_t diff, memory_order order=memory_order_seq_cst) volatile + { + return (T*)super::fetch_sub(diff*sizeof(T), order); + } + + T *operator++(void) volatile {return fetch_add(1)+1;} + T *operator++(int) volatile {return fetch_add(1);} + T *operator--(void) volatile {return fetch_sub(1)-1;} + T *operator--(int) volatile {return fetch_sub(1);} +private: + atomic(const atomic &); + T * operator=(const atomic &); +}; + +class atomic_flag : private atomic { +public: + typedef atomic super; + using super::is_lock_free; + + atomic_flag(bool initial_state) : super(initial_state?1:0) {} + atomic_flag() {} + + bool test_and_set(memory_order order=memory_order_seq_cst) + { + return super::exchange(1, order) ? true : false; + } + void clear(memory_order order=memory_order_seq_cst) + { + super::store(0, order); + } +}; + +typedef atomic atomic_char; +typedef atomic atomic_uchar; +typedef atomic atomic_schar; +typedef atomic atomic_uint8_t; +typedef atomic atomic_int8_t; +typedef atomic atomic_ushort; +typedef atomic atomic_short; +typedef atomic atomic_uint16_t; +typedef atomic atomic_int16_t; +typedef atomic atomic_uint; +typedef atomic atomic_int; +typedef atomic atomic_uint32_t; +typedef atomic atomic_int32_t; +typedef atomic atomic_ulong; +typedef atomic atomic_long; +typedef atomic atomic_uint64_t; +typedef atomic atomic_int64_t; +#ifdef BOOST_HAS_LONG_LONG +typedef atomic atomic_ullong; +typedef atomic atomic_llong; +#endif +#ifdef BOOST_ATOMIC_HAVE_GNU_128BIT_INTEGERS +typedef atomic<__uint128_t> atomic_uint128_t; +typedef atomic<__int128_t> atomic_int128_t; +#endif +#if BOOST_MSVC >= 1500 && (defined(_M_IA64) || defined(_M_AMD64)) && defined(BOOST_ATOMIC_HAVE_SSE2) +typedef atomic<__m128i> atomic_uint128_t; +typedef atomic<__m128i> atomic_int128_t; +#endif +typedef atomic atomic_address; +typedef atomic atomic_bool; + +static inline void atomic_thread_fence(memory_order order) +{ + detail::atomic::platform_atomic_thread_fence(order); +} + +} + +#endif diff --git a/include/boost/atomic/detail/base.hpp b/include/boost/atomic/detail/base.hpp new file mode 100644 index 0000000..daa0d94 --- /dev/null +++ b/include/boost/atomic/detail/base.hpp @@ -0,0 +1,186 @@ +#ifndef BOOST_DETAIL_ATOMIC_BASE_HPP +#define BOOST_DETAIL_ATOMIC_BASE_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include +#include + +namespace boost { +namespace detail { +namespace atomic { + +static inline memory_order calculate_failure_order(memory_order order) +{ + switch(order) { + case memory_order_acq_rel: return memory_order_acquire; + case memory_order_release: return memory_order_relaxed; + default: return order; + } +} + +template +class platform_atomic : public fallback_atomic { +public: + typedef fallback_atomic super; + + explicit platform_atomic(T v) : super(v) {} + platform_atomic() {} +protected: + typedef typename super::integral_type integral_type; +}; + +template +class platform_atomic_integral : public build_atomic_from_exchange > { +public: + typedef build_atomic_from_exchange > super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral() {} +protected: + typedef typename super::integral_type integral_type; +}; + +template +static inline void platform_atomic_thread_fence(T order) +{ + /* FIXME: this does not provide + sequential consistency, need one global + variable for that... */ + platform_atomic a; + a.exchange(0, order); +} + +template::test> +class internal_atomic; + +template +class internal_atomic : private detail::atomic::platform_atomic { +public: + typedef detail::atomic::platform_atomic super; + + internal_atomic() {} + explicit internal_atomic(T v) : super(v) {} + + operator T(void) const volatile {return load();} + T operator=(T v) volatile {store(v); return v;} + + using super::is_lock_free; + using super::load; + using super::store; + using super::exchange; + + bool compare_exchange_strong( + T &expected, + T desired, + memory_order order=memory_order_seq_cst) volatile + { + return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order)); + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order order=memory_order_seq_cst) volatile + { + return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order)); + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return super::compare_exchange_strong(expected, desired, success_order, failure_order); + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return super::compare_exchange_strong(expected, desired, success_order, failure_order); + } +private: + internal_atomic(const internal_atomic &); + void operator=(const internal_atomic &); +}; + +template +class internal_atomic : private detail::atomic::platform_atomic_integral { +public: + typedef detail::atomic::platform_atomic_integral super; + typedef typename super::integral_type integral_type; + + internal_atomic() {} + explicit internal_atomic(T v) : super(v) {} + + using super::is_lock_free; + using super::load; + using super::store; + using super::exchange; + using super::fetch_add; + using super::fetch_sub; + using super::fetch_and; + using super::fetch_or; + using super::fetch_xor; + + operator integral_type(void) const volatile {return load();} + integral_type operator=(integral_type v) volatile {store(v); return v;} + + integral_type operator&=(integral_type c) volatile {return fetch_and(c)&c;} + integral_type operator|=(integral_type c) volatile {return fetch_or(c)|c;} + integral_type operator^=(integral_type c) volatile {return fetch_xor(c)^c;} + + integral_type operator+=(integral_type c) volatile {return fetch_add(c)+c;} + integral_type operator-=(integral_type c) volatile {return fetch_sub(c)-c;} + + integral_type operator++(void) volatile {return fetch_add(1)+1;} + integral_type operator++(int) volatile {return fetch_add(1);} + integral_type operator--(void) volatile {return fetch_sub(1)-1;} + integral_type operator--(int) volatile {return fetch_sub(1);} + + bool compare_exchange_strong( + integral_type &expected, + integral_type desired, + memory_order order=memory_order_seq_cst) volatile + { + return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order)); + } + bool compare_exchange_weak( + integral_type &expected, + integral_type desired, + memory_order order=memory_order_seq_cst) volatile + { + return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order)); + } + bool compare_exchange_strong( + integral_type &expected, + integral_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + return super::compare_exchange_strong(expected, desired, success_order, failure_order); + } + bool compare_exchange_weak( + integral_type &expected, + integral_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + return super::compare_exchange_strong(expected, desired, success_order, failure_order); + } +private: + internal_atomic(const internal_atomic &); + void operator=(const internal_atomic &); +}; + +} +} +} + +#endif diff --git a/include/boost/atomic/detail/builder.hpp b/include/boost/atomic/detail/builder.hpp new file mode 100644 index 0000000..19f8b39 --- /dev/null +++ b/include/boost/atomic/detail/builder.hpp @@ -0,0 +1,412 @@ +#ifndef BOOST_DETAIL_ATOMIC_BUILDER_HPP +#define BOOST_DETAIL_ATOMIC_BUILDER_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include + +namespace boost { +namespace detail { +namespace atomic { + +/* +given a Base that implements: + +- load(memory_order order) +- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order) + +generates exchange and compare_exchange_strong +*/ +template +class build_exchange : public Base { +public: + typedef typename Base::integral_type integral_type; + + using Base::load; + using Base::compare_exchange_weak; + + bool compare_exchange_strong( + integral_type &expected, + integral_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + integral_type expected_save=expected; + while(true) { + if (compare_exchange_weak(expected, desired, success_order, failure_order)) return true; + if (expected_save!=expected) return false; + expected=expected_save; + } + } + + integral_type exchange(integral_type replacement, memory_order order=memory_order_seq_cst) volatile + { + integral_type o=load(memory_order_relaxed); + do {} while(!compare_exchange_weak(o, replacement, order, memory_order_relaxed)); + return o; + } + + build_exchange() {} + explicit build_exchange(integral_type i) : Base(i) {} +}; + +/* +given a Base that implements: + +- fetch_add_var(integral_type c, memory_order order) +- fetch_inc(memory_order order) +- fetch_dec(memory_order order) + +creates a fetch_add method that delegates to fetch_inc/fetch_dec if operand +is constant +1/-1, and uses fetch_add_var otherwise + +the intention is to allow optimizing the incredibly common case of +1/-1 +*/ +template +class build_const_fetch_add : public Base { +public: + typedef typename Base::integral_type integral_type; + + integral_type fetch_add( + integral_type c, + memory_order order=memory_order_seq_cst) volatile + { + if (__builtin_constant_p(c)) { + switch(c) { + case -1: return fetch_dec(order); + case 1: return fetch_inc(order); + } + } + return fetch_add_var(c, order); + } + + build_const_fetch_add() {} + explicit build_const_fetch_add(integral_type i) : Base(i) {} +protected: + using Base::fetch_add_var; + using Base::fetch_inc; + using Base::fetch_dec; +}; + +/* +given a Base that implements: + +- load(memory_order order) +- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order) + +generates a -- not very efficient, but correct -- fetch_add operation +*/ +template +class build_fetch_add : public Base { +public: + typedef typename Base::integral_type integral_type; + + using Base::compare_exchange_weak; + + integral_type fetch_add( + integral_type c, memory_order order=memory_order_seq_cst) volatile + { + integral_type o=Base::load(memory_order_relaxed), n; + do {n=o+c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed)); + return o; + } + + build_fetch_add() {} + explicit build_fetch_add(integral_type i) : Base(i) {} +}; + +/* +given a Base that implements: + +- fetch_add(integral_type c, memory_order order) + +generates fetch_sub and post/pre- increment/decrement operators +*/ +template +class build_arithmeticops : public Base { +public: + typedef typename Base::integral_type integral_type; + + using Base::fetch_add; + + integral_type fetch_sub( + integral_type c, + memory_order order=memory_order_seq_cst) volatile + { +#if defined(BOOST_MSVC) +#pragma warning(push) +#pragma warning(disable: 4146) // unary minus operator applied to unsigned type, result still unsigned +#endif + return fetch_add(-c, order); +#if defined(BOOST_MSVC) +#pragma warning(pop) +#endif + } + + build_arithmeticops() {} + explicit build_arithmeticops(integral_type i) : Base(i) {} +}; + +/* +given a Base that implements: + +- load(memory_order order) +- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order) + +generates -- not very efficient, but correct -- fetch_and, fetch_or and fetch_xor operators +*/ +template +class build_logicops : public Base { +public: + typedef typename Base::integral_type integral_type; + + using Base::compare_exchange_weak; + using Base::load; + + integral_type fetch_and(integral_type c, memory_order order=memory_order_seq_cst) volatile + { + integral_type o=load(memory_order_relaxed), n; + do {n=o&c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed)); + return o; + } + integral_type fetch_or(integral_type c, memory_order order=memory_order_seq_cst) volatile + { + integral_type o=load(memory_order_relaxed), n; + do {n=o|c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed)); + return o; + } + integral_type fetch_xor(integral_type c, memory_order order=memory_order_seq_cst) volatile + { + integral_type o=load(memory_order_relaxed), n; + do {n=o^c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed)); + return o; + } + + build_logicops() {} + build_logicops(integral_type i) : Base(i) {} +}; + +/* +given a Base that implements: + +- load(memory_order order) +- store(integral_type i, memory_order order) +- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order) + +generates the full set of atomic operations for integral types +*/ +template +class build_atomic_from_minimal : public build_logicops< build_arithmeticops< build_fetch_add< build_exchange > > > { +public: + typedef build_logicops< build_arithmeticops< build_fetch_add< build_exchange > > > super; + typedef typename super::integral_type integral_type; + + build_atomic_from_minimal(void) {} + build_atomic_from_minimal(typename super::integral_type i) : super(i) {} +}; + +/* +given a Base that implements: + +- load(memory_order order) +- store(integral_type i, memory_order order) +- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order) +- compare_exchange_strong(integral_type &expected, integral_type desired, memory_order order) +- exchange(integral_type replacement, memory_order order) +- fetch_add_var(integral_type c, memory_order order) +- fetch_inc(memory_order order) +- fetch_dec(memory_order order) + +generates the full set of atomic operations for integral types +*/ +template +class build_atomic_from_typical : public build_logicops< build_arithmeticops< build_const_fetch_add > > { +public: + typedef build_logicops< build_arithmeticops< build_const_fetch_add > > super; + typedef typename super::integral_type integral_type; + + build_atomic_from_typical(void) {} + build_atomic_from_typical(typename super::integral_type i) : super(i) {} +}; + +/* +given a Base that implements: + +- load(memory_order order) +- store(integral_type i, memory_order order) +- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order) +- compare_exchange_strong(integral_type &expected, integral_type desired, memory_order order) +- exchange(integral_type replacement, memory_order order) +- fetch_add(integral_type c, memory_order order) + +generates the full set of atomic operations for integral types +*/ +template +class build_atomic_from_add : public build_logicops< build_arithmeticops > { +public: + typedef build_logicops< build_arithmeticops > super; + typedef typename super::integral_type integral_type; + + build_atomic_from_add(void) {} + build_atomic_from_add(typename super::integral_type i) : super(i) {} +}; + +/* +given a Base that implements: + +- load(memory_order order) +- store(integral_type i, memory_order order) +- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order) +- compare_exchange_strong(integral_type &expected, integral_type desired, memory_order order) +- exchange(integral_type replacement, memory_order order) + +generates the full set of atomic operations for integral types +*/ +template +class build_atomic_from_exchange : public build_logicops< build_arithmeticops< build_fetch_add > > { +public: + typedef build_logicops< build_arithmeticops< build_fetch_add > > super; + typedef typename super::integral_type integral_type; + + build_atomic_from_exchange(void) {} + build_atomic_from_exchange(typename super::integral_type i) : super(i) {} +}; + + +/* +given a Base that implements: + +- compare_exchange_weak() + +generates load, store and compare_exchange_weak for a smaller +data type (e.g. an atomic "byte" embedded into a temporary +and properly aligned atomic "int"). +*/ +template +class build_base_from_larger_type { +public: + typedef Type integral_type; + + build_base_from_larger_type() {} + build_base_from_larger_type(integral_type t) {store(t, memory_order_relaxed);} + + integral_type load(memory_order order=memory_order_seq_cst) const volatile + { + larger_integral_type v=get_base().load(order); + return extract(v); + } + bool compare_exchange_weak(integral_type &expected, + integral_type desired, + memory_order success_order, + memory_order failure_order) volatile + { + larger_integral_type expected_; + larger_integral_type desired_; + + expected_=get_base().load(memory_order_relaxed); + expected_=insert(expected_, expected); + desired_=insert(expected_, desired); + bool success=get_base().compare_exchange_weak(expected_, desired_, success_order, failure_order); + expected=extract(expected_); + return success; + } + void store(integral_type v, + memory_order order=memory_order_seq_cst) volatile + { + larger_integral_type expected, desired; + expected=get_base().load(memory_order_relaxed); + do { + desired=insert(expected, v); + } while(!get_base().compare_exchange_weak(expected, desired, order, memory_order_relaxed)); + } + + bool is_lock_free(void) + { + return get_base().is_lock_free(); + } +private: + typedef typename Base::integral_type larger_integral_type; + + const Base &get_base(void) const volatile + { + intptr_t address=(intptr_t)this; + address&=~(sizeof(larger_integral_type)-1); + return *reinterpret_cast(address); + } + Base &get_base(void) volatile + { + intptr_t address=(intptr_t)this; + address&=~(sizeof(larger_integral_type)-1); + return *reinterpret_cast(address); + } + unsigned int get_offset(void) const volatile + { + intptr_t address=(intptr_t)this; + address&=(sizeof(larger_integral_type)-1); + return address; + } + + unsigned int get_shift(void) const volatile + { +#if defined(BOOST_LITTLE_ENDIAN) + return get_offset()*8; +#elif defined(BOOST_BIG_ENDIAN) + return (sizeof(larger_integral_type)-sizeof(integral_type)-get_offset())*8; +#else + #error "Unknown endian" +#endif + } + + integral_type extract(larger_integral_type v) const volatile + { + return v>>get_shift(); + } + + larger_integral_type insert(larger_integral_type target, integral_type source) const volatile + { + larger_integral_type tmp=source; + larger_integral_type mask=(larger_integral_type)-1; + + mask=~(mask<<(8*sizeof(integral_type))); + + mask=mask< +class build_atomic_from_larger_type : public build_atomic_from_minimal< build_base_from_larger_type > { +public: + typedef build_atomic_from_minimal< build_base_from_larger_type > super; + //typedef typename super::integral_type integral_type; + typedef Type integral_type; + + build_atomic_from_larger_type() {} + build_atomic_from_larger_type(integral_type v) : super(v) {} +}; + +} +} +} + +#endif diff --git a/include/boost/atomic/detail/fallback.hpp b/include/boost/atomic/detail/fallback.hpp new file mode 100644 index 0000000..e539099 --- /dev/null +++ b/include/boost/atomic/detail/fallback.hpp @@ -0,0 +1,76 @@ +#ifndef BOOST_DETAIL_ATOMIC_FALLBACK_HPP +#define BOOST_DETAIL_ATOMIC_FALLBACK_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include + +namespace boost { +namespace detail { +namespace atomic { + +template +class fallback_atomic { +public: + fallback_atomic(void) {} + explicit fallback_atomic(const T &t) {memcpy(&i, &t, sizeof(T));} + + void store(const T &t, memory_order order=memory_order_seq_cst) volatile + { + detail::spinlock_pool<0>::scoped_lock guard(const_cast(&i)); + memcpy((void*)&i, &t, sizeof(T)); + } + T load(memory_order /*order*/=memory_order_seq_cst) volatile const + { + detail::spinlock_pool<0>::scoped_lock guard(const_cast(&i)); + T tmp; + memcpy(&tmp, (T*)&i, sizeof(T)); + return tmp; + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order /*success_order*/, + memory_order /*failure_order*/) volatile + { + detail::spinlock_pool<0>::scoped_lock guard(const_cast(&i)); + if (memcmp((void*)&i, &expected, sizeof(T))==0) { + memcpy((void*)&i, &desired, sizeof(T)); + return true; + } else { + memcpy(&expected, (void*)&i, sizeof(T)); + return false; + } + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T replacement, memory_order /*order*/=memory_order_seq_cst) volatile + { + detail::spinlock_pool<0>::scoped_lock guard(const_cast(&i)); + T tmp; + memcpy(&tmp, (void*)&i, sizeof(T)); + memcpy((void*)&i, &replacement, sizeof(T)); + return tmp; + } + bool is_lock_free(void) const volatile {return false;} +protected: + T i; + typedef T integral_type; +}; + +} +} +} + +#endif diff --git a/include/boost/atomic/detail/gcc-alpha.hpp b/include/boost/atomic/detail/gcc-alpha.hpp new file mode 100644 index 0000000..8e55f44 --- /dev/null +++ b/include/boost/atomic/detail/gcc-alpha.hpp @@ -0,0 +1,354 @@ +#ifndef BOOST_DETAIL_ATOMIC_GCC_ALPHA_HPP +#define BOOST_DETAIL_ATOMIC_GCC_ALPHA_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include + +/* + Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html + (HP OpenVMS systems documentation) and the alpha reference manual. + */ + +/* + NB: The most natural thing would be to write the increment/decrement + operators along the following lines: + + __asm__ __volatile__( + "1: ldl_l %0,%1 \n" + "addl %0,1,%0 \n" + "stl_c %0,%1 \n" + "beq %0,1b\n" + : "=&b" (tmp) + : "m" (value) + : "cc" + ); + + However according to the comments on the HP website and matching + comments in the Linux kernel sources this defies branch prediction, + as the cpu assumes that backward branches are always taken; so + instead copy the trick from the Linux kernel, introduce a forward + branch and back again. + + I have, however, had a hard time measuring the difference between + the two versions in microbenchmarks -- I am leaving it in nevertheless + as it apparently does not hurt either. +*/ + +namespace boost { +namespace detail { +namespace atomic { + +static inline void fence_before(memory_order order) +{ + switch(order) { + case memory_order_consume: + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("mb" ::: "memory"); + default:; + } +} + +static inline void fence_after(memory_order order) +{ + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("mb" ::: "memory"); + default:; + } +} + +template<> +inline void platform_atomic_thread_fence(memory_order order) +{ + switch(order) { + case memory_order_acquire: + case memory_order_consume: + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("mb" ::: "memory"); + default:; + } +} + +template +class atomic_alpha_32 { +public: + typedef T integral_type; + explicit atomic_alpha_32(T v) : i(v) {} + atomic_alpha_32() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=*reinterpret_cast(&i); + fence_after(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + fence_before(order); + *reinterpret_cast(&i)=(int)v; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + fence_before(success_order); + int current, success; + __asm__ __volatile__( + "1: ldl_l %2, %4\n" + "cmpeq %2, %0, %3\n" + "mov %2, %0\n" + "beq %3, 3f\n" + "stl_c %1, %4\n" + "2:\n" + + ".subsection 2\n" + "3: mov %3, %1\n" + "br 2b\n" + ".previous\n" + + : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success) + : "m" (i) + : + ); + if (desired) fence_after(success_order); + else fence_after(failure_order); + return desired; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + inline T fetch_add_var(T c, memory_order order) volatile + { + fence_before(order); + T original, modified; + __asm__ __volatile__( + "1: ldl_l %0, %2\n" + "addl %0, %3, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), "=&r" (modified) + : "m" (i), "r" (c) + : + ); + fence_after(order); + return original; + } + inline T fetch_inc(memory_order order) volatile + { + fence_before(order); + int original, modified; + __asm__ __volatile__( + "1: ldl_l %0, %2\n" + "addl %0, 1, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), "=&r" (modified) + : "m" (i) + : + ); + fence_after(order); + return original; + } + inline T fetch_dec(memory_order order) volatile + { + fence_before(order); + int original, modified; + __asm__ __volatile__( + "1: ldl_l %0, %2\n" + "subl %0, 1, %1\n" + "stl_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), "=&r" (modified) + : "m" (i) + : + ); + fence_after(order); + return original; + } +private: + T i; +}; + +template +class atomic_alpha_64 { +public: + typedef T integral_type; + explicit atomic_alpha_64(T v) : i(v) {} + atomic_alpha_64() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=*reinterpret_cast(&i); + fence_after(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + fence_before(order); + *reinterpret_cast(&i)=v; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + fence_before(success_order); + int current, success; + __asm__ __volatile__( + "1: ldq_l %2, %4\n" + "cmpeq %2, %0, %3\n" + "mov %2, %0\n" + "beq %3, 3f\n" + "stq_c %1, %4\n" + "2:\n" + + ".subsection 2\n" + "3: mov %3, %1\n" + "br 2b\n" + ".previous\n" + + : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success) + : "m" (i) + : + ); + if (desired) fence_after(success_order); + else fence_after(failure_order); + return desired; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + inline T fetch_add_var(T c, memory_order order) volatile + { + fence_before(order); + T original, modified; + __asm__ __volatile__( + "1: ldq_l %0, %2\n" + "addq %0, %3, %1\n" + "stq_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), "=&r" (modified) + : "m" (i), "r" (c) + : + ); + fence_after(order); + return original; + } + inline T fetch_inc(memory_order order) volatile + { + fence_before(order); + T original, modified; + __asm__ __volatile__( + "1: ldq_l %0, %2\n" + "addq %0, 1, %1\n" + "stq_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), "=&r" (modified) + : "m" (i) + : + ); + fence_after(order); + return original; + } + inline T fetch_dec(memory_order order) volatile + { + fence_before(order); + T original, modified; + __asm__ __volatile__( + "1: ldq_l %0, %2\n" + "subq %0, 1, %1\n" + "stq_c %1, %2\n" + "beq %1, 2f\n" + + ".subsection 2\n" + "2: br 1b\n" + ".previous\n" + + : "=&r" (original), "=&r" (modified) + : "m" (i) + : + ); + fence_after(order); + return original; + } +private: + T i; +}; + +template +class platform_atomic_integral : public build_atomic_from_typical > > { +public: + typedef build_atomic_from_typical > > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral : public build_atomic_from_typical > > { +public: + typedef build_atomic_from_typical > > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral: public build_atomic_from_larger_type, T> { +public: + typedef build_atomic_from_larger_type, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral: public build_atomic_from_larger_type, T> { +public: + typedef build_atomic_from_larger_type, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +} +} +} + +#endif diff --git a/include/boost/atomic/detail/gcc-armv6+.hpp b/include/boost/atomic/detail/gcc-armv6+.hpp new file mode 100644 index 0000000..04a5bda --- /dev/null +++ b/include/boost/atomic/detail/gcc-armv6+.hpp @@ -0,0 +1,299 @@ +#ifndef BOOST_DETAIL_ATOMIC_GCC_ARMV6P_HPP +#define BOOST_DETAIL_ATOMIC_GCC_ARMV6P_HPP + +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// Copyright (c) 2009 Helge Bahmann +// Copyright (c) 2009 Phil Endecott +// ARM Code by Phil Endecott, based on other architectures. + + +#include +#include +#include + +// From the ARM Architecture Reference Manual for architecture v6: +// +// LDREX{} , [] +// Specifies the destination register for the memory word addressed by +// Specifies the register containing the address. +// +// STREX{} , , [] +// Specifies the destination register for the returned status value. +// 0 if the operation updates memory +// 1 if the operation fails to update memory +// Specifies the register containing the word to be stored to memory. +// Specifies the register containing the address. +// Rd must not be the same register is Rm or Rn. +// +// ARM v7 is like ARM v6 plus: +// There are half-word and byte versions of the LDREX and STREX instructions, +// LDREXH, LDREXB, STREXH and STREXB. +// There are also double-word versions, LDREXD and STREXD. +// (Actually it looks like these are available from version 6k onwards.) +// FIXME these are not yet used; should be mostly a matter of copy-and-paste. +// I think you can supply an immediate offset to the address. +// +// A memory barrier is effected using a "co-processor 15" instruction, +// though a separate assembler mnemonic is available for it in v7. + +namespace boost { +namespace detail { +namespace atomic { + + +// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It +// doesn't include all instructions and in particular it doesn't include the co-processor +// instruction used for the memory barrier or the load-locked/store-conditional +// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our +// asm blocks with code to temporarily change to ARM mode. +// +// You can only change between ARM and Thumb modes when branching using the bx instruction. +// bx takes an address specified in a register. The least significant bit of the address +// indicates the mode, so 1 is added to indicate that the destination code is Thumb. +// A temporary register is needed for the address and is passed as an argument to these +// macros. It must be one of the "low" registers accessible to Thumb code, specified +// usng the "l" attribute in the asm statement. +// +// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM +// instruction set. So in v7 we don't need to change to ARM mode; we can write "universal +// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing +// we need to do to make this "universal" assembler mode work is to insert "IT" instructions +// to annotate the conditional instructions. These are ignored in other modes (e.g. v6), +// so they can always be present. + +#if defined(__thumb__) && !defined(__ARM_ARCH_7A__) +// FIXME also other v7 variants. +#define BOOST_ATOMIC_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 1f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "1: " +#define BOOST_ATOMIC_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 1f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "1: " + +#else +// The tmpreg is wasted in this case, which is non-optimal. +#define BOOST_ATOMIC_ARM_ASM_START(TMPREG) +#define BOOST_ATOMIC_ARM_ASM_END(TMPREG) +#endif + + +#if defined(__ARM_ARCH_7A__) +// FIXME ditto. +#define BOOST_ATOMIC_ARM_DMB "dmb\n" +#else +#define BOOST_ATOMIC_ARM_DMB "mcr\tp15, 0, r0, c7, c10, 5\n" +#endif + +// There is also a "Data Synchronisation Barrier" DSB; this exists in v6 as another co-processor +// instruction like the above. + + +static inline void fence_before(memory_order order) +{ + // FIXME I don't understand enough about barriers to know what this should do. + switch(order) { + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + int brtmp; + __asm__ __volatile__ ( + BOOST_ATOMIC_ARM_ASM_START(%0) + BOOST_ATOMIC_ARM_DMB + BOOST_ATOMIC_ARM_ASM_END(%0) + : "=&l" (brtmp) :: "memory" + ); + default:; + } +} + +static inline void fence_after(memory_order order) +{ + // FIXME I don't understand enough about barriers to know what this should do. + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + int brtmp; + __asm__ __volatile__ ( + BOOST_ATOMIC_ARM_ASM_START(%0) + BOOST_ATOMIC_ARM_DMB + BOOST_ATOMIC_ARM_ASM_END(%0) + : "=&l" (brtmp) :: "memory" + ); + case memory_order_consume: + __asm__ __volatile__ ("" ::: "memory"); + default:; + } +} + +#undef BOOST_ATOMIC_ARM_DMB + + +template +class atomic_arm_4 { +public: + typedef T integral_type; + explicit atomic_arm_4(T v) : i(v) {} + atomic_arm_4() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=const_cast(i); + fence_after(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + fence_before(order); + const_cast(i)=v; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + fence_before(success_order); + int success; + int tmp; + __asm__ __volatile__( + BOOST_ATOMIC_ARM_ASM_START(%2) + "mov %1, #0\n" // success = 0 + "ldrex %0, [%3]\n" // expected' = *(&i) + "teq %0, %4\n" // flags = expected'==expected + "ittt eq\n" + "strexeq %2, %5, [%3]\n" // if (flags.equal) *(&i) = desired, tmp = !OK + "teqeq %2, #0\n" // if (flags.equal) flags = tmp==0 + "moveq %1, #1\n" // if (flags.equal) success = 1 + BOOST_ATOMIC_ARM_ASM_END(%2) + : "=&r" (expected), // %0 + "=&r" (success), // %1 + "=&l" (tmp) // %2 + : "r" (&i), // %3 + "r" (expected), // %4 + "r" ((int)desired) // %5 + : "cc" + ); + if (success) fence_after(success_order); + else fence_after(failure_order); + return success; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + inline T fetch_add_var(T c, memory_order order) volatile + { + fence_before(order); + T original, tmp; + int tmp2; + __asm__ __volatile__( + BOOST_ATOMIC_ARM_ASM_START(%2) + "1: ldrex %0, [%3]\n" // original = *(&i) + "add %1, %0, %4\n" // tmp = original + c + "strex %2, %1, [%3]\n" // *(&i) = tmp; tmp2 = !OK + "teq %2, #0\n" // flags = tmp2==0 + "it ne\n" + "bne 1b\n" // if (!flags.equal) goto 1 + BOOST_ATOMIC_ARM_ASM_END(%2) + : "=&r" (original), // %0 + "=&r" (tmp), // %1 + "=&l" (tmp2) // %2 + : "r" (&i), // %3 + "r" (c) // %4 + : "cc" + ); + fence_after(order); + return original; + } + inline T fetch_inc(memory_order order) volatile + { + fence_before(order); + T original, tmp; + int tmp2; + __asm__ __volatile__( + BOOST_ATOMIC_ARM_ASM_START(%2) + "1: ldrex %0, [%3]\n" // original = *(&i) + "add %1, %0, #1\n" // tmp = original + 1 + "strex %2, %1, [%3]\n" // *(&i) = tmp; tmp2 = !OK + "teq %2, #0\n" // flags = tmp2==0 + "it ne\n" + "bne 1b\n" // if (!flags.equal) goto 1 + BOOST_ATOMIC_ARM_ASM_END(%2) + : "=&r" (original), // %0 + "=&r" (tmp), // %1 + "=&l" (tmp2) // %2 + : "r" (&i) // %3 + : "cc" + ); + fence_after(order); + return original; + } + inline T fetch_dec(memory_order order) volatile + { + fence_before(order); + T original, tmp; + int tmp2; + __asm__ __volatile__( + BOOST_ATOMIC_ARM_ASM_START(%2) + "1: ldrex %0, [%3]\n" // original = *(&i) + "sub %1, %0, #1\n" // tmp = original - 1 + "strex %2, %1, [%3]\n" // *(&i) = tmp; tmp2 = !OK + "teq %2, #0\n" // flags = tmp2==0 + "it ne\n" + "bne 1b\n" // if (!flags.equal) goto 1 + BOOST_ATOMIC_ARM_ASM_END(%2) + : "=&r" (original), // %0 + "=&r" (tmp), // %1 + "=&l" (tmp2) // %2 + : "r" (&i) // %3 + : "cc" + ); + fence_after(order); + return original; + } +private: + T i; +}; + + +// #ifdef _ARM_ARCH_7 +// FIXME TODO can add native byte and halfword version here + + +template +class platform_atomic_integral : public build_atomic_from_typical > > { +public: + typedef build_atomic_from_typical > > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral: public build_atomic_from_larger_type, T> { +public: + typedef build_atomic_from_larger_type, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral: public build_atomic_from_larger_type, T> { +public: + typedef build_atomic_from_larger_type, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + + + +typedef build_exchange > platform_atomic_address; + +} +} +} + +#undef BOOST_ATOMIC_ARM_ASM_START +#undef BOOST_ATOMIC_ARM_ASM_END + + +#endif diff --git a/include/boost/atomic/detail/gcc-ppc.hpp b/include/boost/atomic/detail/gcc-ppc.hpp new file mode 100644 index 0000000..1041e73 --- /dev/null +++ b/include/boost/atomic/detail/gcc-ppc.hpp @@ -0,0 +1,351 @@ +#ifndef BOOST_DETAIL_ATOMIC_GCC_PPC_HPP +#define BOOST_DETAIL_ATOMIC_GCC_PPC_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include + +/* + Refer to: Motorola: "Programming Environments Manual for 32-Bit + Implementations of the PowerPC Architecture", Appendix E: + "Synchronization Programming Examples" for an explanation of what is + going on here (can be found on the web at various places by the + name "MPCFPE32B.pdf", Google is your friend...) + */ + +namespace boost { +namespace detail { +namespace atomic { + +static inline void fence_before(memory_order order) +{ + switch(order) { + case memory_order_release: + case memory_order_acq_rel: +#if defined(__powerpc64__) + __asm__ __volatile__ ("lwsync" ::: "memory"); + break; +#endif + case memory_order_seq_cst: + __asm__ __volatile__ ("sync" ::: "memory"); + default:; + } +} + +/* Note on the barrier instructions used by fence_after and +atomic_thread_fence: the "isync" instruction normally does +not wait for memory-accessing operations to complete, the +"trick" is to introduce a conditional branch that formally +depends on the memory-accessing instruction -- isync waits +until the branch can be resolved and thus implicitly until +the memory access completes. + +This means that the load(memory_order_relaxed) instruction +includes this branch, even though no barrier would be required +here, but as a consequence atomic_thread_fence(memory_order_acquire) +would have to be implemented using "sync" instead of "isync". +The following simple cost-analysis provides the rationale +for this decision: + +- isync: about ~12 cycles +- sync: about ~50 cycles +- "spurious" branch after load: 1-2 cycles +- making the right decision: priceless + +*/ + +static inline void fence_after(memory_order order) +{ + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("isync"); + case memory_order_consume: + __asm__ __volatile__ ("" ::: "memory"); + default:; + } +} + +template<> +inline void platform_atomic_thread_fence(memory_order order) +{ + switch(order) { + case memory_order_acquire: + __asm__ __volatile__ ("isync" ::: "memory"); + break; + case memory_order_release: + case memory_order_acq_rel: +#if defined(__powerpc64__) + __asm__ __volatile__ ("lwsync" ::: "memory"); + break; +#endif + case memory_order_seq_cst: + __asm__ __volatile__ ("sync" ::: "memory"); + default:; + } +} + + +/* note: the __asm__ constraint "b" instructs gcc to use any register +except r0; this is required because r0 is not allowed in +some places. Since I am sometimes unsure if it is allowed +or not just play it safe and avoid r0 entirely -- ppc isn't +exactly register-starved, so this really should not matter :) */ + +template +class atomic_ppc_32 { +public: + typedef T integral_type; + explicit atomic_ppc_32(T v) : i(v) {} + atomic_ppc_32() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=*reinterpret_cast(&i); + __asm__ __volatile__ ( + "cmpw %0, %0\n" + "bne- 1f\n" + "1f:\n" + : "+b"(v)); + fence_after(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + fence_before(order); + *reinterpret_cast(&i)=v; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + fence_before(success_order); + int success; + __asm__ __volatile__( + "lwarx %0,0,%2\n" + "cmpw %0, %3\n" + "bne- 2f\n" + "stwcx. %4,0,%2\n" + "bne- 2f\n" + "addi %1,0,1\n" + "1:" + + ".subsection 2\n" + "2: addi %1,0,0\n" + "b 1b\n" + ".previous\n" + : "=&b" (expected), "=&b" (success) + : "b" (&i), "b" (expected), "b" ((int)desired) + ); + if (success) fence_after(success_order); + else fence_after(failure_order); + return success; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + inline T fetch_add_var(T c, memory_order order) volatile + { + fence_before(order); + T original, tmp; + __asm__ __volatile__( + "1: lwarx %0,0,%2\n" + "add %1,%0,%3\n" + "stwcx. %1,0,%2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp) + : "b" (&i), "b" (c) + : "cc"); + fence_after(order); + return original; + } + inline T fetch_inc(memory_order order) volatile + { + fence_before(order); + T original, tmp; + __asm__ __volatile__( + "1: lwarx %0,0,%2\n" + "addi %1,%0,1\n" + "stwcx. %1,0,%2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp) + : "b" (&i) + : "cc"); + fence_after(order); + return original; + } + inline T fetch_dec(memory_order order) volatile + { + fence_before(order); + T original, tmp; + __asm__ __volatile__( + "1: lwarx %0,0,%2\n" + "addi %1,%0,-1\n" + "stwcx. %1,0,%2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp) + : "b" (&i) + : "cc"); + fence_after(order); + return original; + } +private: + T i; +}; + +#if defined(__powerpc64__) + +#warning Untested code -- please inform me if it works + +template +class atomic_ppc_64 { +public: + typedef T integral_type; + explicit atomic_ppc_64(T v) : i(v) {} + atomic_ppc_64() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=*reinterpret_cast(&i); + __asm__ __volatile__ ( + "cmpw %0, %0\n" + "bne- 1f\n" + "1f:\n" + : "+b"(v)); + fence_after(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + fence_before(order); + *reinterpret_cast(&i)=v; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + fence_before(success_order); + int success; + __asm__ __volatile__( + "ldarx %0,0,%2\n" + "cmpw %0, %3\n" + "bne- 2f\n" + "stdcx. %4,0,%2\n" + "bne- 2f\n" + "addi %1,0,1\n" + "1:" + + ".subsection 2\n" + "2: addi %1,0,0\n" + "b 1b\n" + ".previous\n" + : "=&b" (expected), "=&b" (success) + : "b" (&i), "b" (expected), "b" ((int)desired) + ); + if (success) fence_after(success_order); + else fence_after(failure_order); + fence_after(order); + return success; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + inline T fetch_add_var(T c, memory_order order) volatile + { + fence_before(order); + T original, tmp; + __asm__ __volatile__( + "1: ldarx %0,0,%2\n" + "add %1,%0,%3\n" + "stdcx. %1,0,%2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp) + : "b" (&i), "b" (c) + : "cc"); + fence_after(order); + return original; + } + inline T fetch_inc(memory_order order) volatile + { + fence_before(order); + T original, tmp; + __asm__ __volatile__( + "1: ldarx %0,0,%2\n" + "addi %1,%0,1\n" + "stdcx. %1,0,%2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp) + : "b" (&i) + : "cc"); + fence_after(order); + return original; + } + inline T fetch_dec(memory_order order) volatile + { + fence_before(order); + T original, tmp; + __asm__ __volatile__( + "1: ldarx %0,0,%2\n" + "addi %1,%0,-1\n" + "stdcx. %1,0,%2\n" + "bne- 1b\n" + : "=&b" (original), "=&b" (tmp) + : "b" (&i) + : "cc"); + fence_after(order); + return original; + } +private: + T i; +}; +#endif + +template +class platform_atomic_integral : public build_atomic_from_typical > > { +public: + typedef build_atomic_from_typical > > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral: public build_atomic_from_larger_type, T> { +public: + typedef build_atomic_from_larger_type, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral: public build_atomic_from_larger_type, T> { +public: + typedef build_atomic_from_larger_type, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +#if defined(__powerpc64__) +template +class platform_atomic_integral : public build_atomic_from_typical > > { +public: + typedef build_atomic_from_typical > > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; +#endif + +} +} +} + +#endif diff --git a/include/boost/atomic/detail/gcc-x86.hpp b/include/boost/atomic/detail/gcc-x86.hpp new file mode 100644 index 0000000..6f1e83a --- /dev/null +++ b/include/boost/atomic/detail/gcc-x86.hpp @@ -0,0 +1,546 @@ +#ifndef BOOST_DETAIL_ATOMIC_GCC_X86_HPP +#define BOOST_DETAIL_ATOMIC_GCC_X86_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include + +namespace boost { +namespace detail { +namespace atomic { + +static inline void fence_before(memory_order order) +{ + switch(order) { + case memory_order_consume: + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("" ::: "memory"); + default:; + } +} + +static inline void fence_after(memory_order order) +{ + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ("" ::: "memory"); + default:; + } +} + +static inline void full_fence(void) +{ +#if (defined(__amd64__) || defined(__x86_64__)) + __asm__ __volatile__("mfence" ::: "memory"); +#else + /* could use mfence iff i686, but it does not appear to matter much */ + __asm__ __volatile__("lock; addl $0, (%%esp)" ::: "memory"); +#endif +} + +static inline void fence_after_load(memory_order order) +{ + switch(order) { + case memory_order_seq_cst: + full_fence(); + case memory_order_acquire: + case memory_order_acq_rel: + __asm__ __volatile__ ("" ::: "memory"); + default:; + } +} + +template<> +inline void platform_atomic_thread_fence(memory_order order) +{ + switch(order) { + case memory_order_seq_cst: + full_fence(); + case memory_order_acquire: + case memory_order_consume: + case memory_order_acq_rel: + case memory_order_release: + __asm__ __volatile__ ("" ::: "memory"); + default:; + } +} + +template +class atomic_x86_8 { +public: + explicit atomic_x86_8(T v) : i(v) {} + atomic_x86_8() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=*reinterpret_cast(&i); + fence_after_load(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + if (order!=memory_order_seq_cst) { + fence_before(order); + *reinterpret_cast(&i)=v; + } else { + exchange(v); + } + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + fence_before(success_order); + T prev=expected; + __asm__ __volatile__("lock; cmpxchgb %1, %2\n" : "=a" (prev) : "q" (desired), "m" (i), "a" (expected) : "memory"); + bool success=(prev==expected); + if (success) fence_after(success_order); + else fence_after(failure_order); + expected=prev; + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T r, memory_order order=memory_order_seq_cst) volatile + { + __asm__ __volatile__("xchgb %0, %1\n" : "=q" (r) : "m"(i), "0" (r) : "memory"); + return r; + } + T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile + { + __asm__ __volatile__("lock; xaddb %0, %1" : "+q" (c), "+m" (i) :: "memory"); + return c; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + typedef T integral_type; +private: + T i; +}; + +template +class platform_atomic_integral : public build_atomic_from_add > { +public: + typedef build_atomic_from_add > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class atomic_x86_16 { +public: + explicit atomic_x86_16(T v) : i(v) {} + atomic_x86_16() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=*reinterpret_cast(&i); + fence_after_load(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + if (order!=memory_order_seq_cst) { + fence_before(order); + *reinterpret_cast(&i)=v; + } else { + exchange(v); + } + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + fence_before(success_order); + T prev=expected; + __asm__ __volatile__("lock; cmpxchgw %1, %2\n" : "=a" (prev) : "q" (desired), "m" (i), "a" (expected) : "memory"); + bool success=(prev==expected); + if (success) fence_after(success_order); + else fence_after(failure_order); + expected=prev; + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T r, memory_order order=memory_order_seq_cst) volatile + { + __asm__ __volatile__("xchgw %0, %1\n" : "=r" (r) : "m"(i), "0" (r) : "memory"); + return r; + } + T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile + { + __asm__ __volatile__("lock; xaddw %0, %1" : "+r" (c), "+m" (i) :: "memory"); + return c; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + typedef T integral_type; +private: + T i; +}; + +template +class platform_atomic_integral : public build_atomic_from_add > { +public: + typedef build_atomic_from_add > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class atomic_x86_32 { +public: + explicit atomic_x86_32(T v) : i(v) {} + atomic_x86_32() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=*reinterpret_cast(&i); + fence_after_load(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + if (order!=memory_order_seq_cst) { + fence_before(order); + *reinterpret_cast(&i)=v; + } else { + exchange(v); + } + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + fence_before(success_order); + T prev=expected; + __asm__ __volatile__("lock; cmpxchgl %1, %2\n" : "=a" (prev) : "q" (desired), "m" (i), "a" (expected) : "memory"); + bool success=(prev==expected); + if (success) fence_after(success_order); + else fence_after(failure_order); + expected=prev; + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T r, memory_order order=memory_order_seq_cst) volatile + { + __asm__ __volatile__("xchgl %0, %1\n" : "=r" (r) : "m"(i), "0" (r) : "memory"); + return r; + } + T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile + { + __asm__ __volatile__("lock; xaddl %0, %1" : "+r" (c), "+m" (i) :: "memory"); + return c; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + typedef T integral_type; +private: + T i; +}; + +template +class platform_atomic_integral : public build_atomic_from_add > { +public: + typedef build_atomic_from_add > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +#if (defined(__amd64__) || defined(__x86_64__)) +template +class atomic_x86_64 { +public: + explicit atomic_x86_64(T v) : i(v) {} + atomic_x86_64() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=*reinterpret_cast(&i); + fence_after_load(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + if (order!=memory_order_seq_cst) { + fence_before(order); + *reinterpret_cast(&i)=v; + } else { + exchange(v); + } + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + fence_before(success_order); + T prev=expected; + __asm__ __volatile__("lock; cmpxchgq %1, %2\n" : "=a" (prev) : "q" (desired), "m" (i), "a" (expected) : "memory"); + bool success=(prev==expected); + if (success) fence_after(success_order); + else fence_after(failure_order); + expected=prev; + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T r, memory_order order=memory_order_seq_cst) volatile + { + __asm__ __volatile__("xchgq %0, %1\n" : "=r" (r) : "m"(i), "0" (r) : "memory"); + return r; + } + T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile + { + __asm__ __volatile__("lock; xaddq %0, %1" : "+r" (c), "+m" (i) :: "memory"); + return c; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + typedef T integral_type; +private: + T i; +} __attribute__((aligned(8))); + +#elif defined(__i686__) + +template +class atomic_x86_64 { +private: + typedef atomic_x86_64 this_type; +public: + explicit atomic_x86_64(T v) : i(v) {} + atomic_x86_64() {} + + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + long scratch; + fence_before(success_order); + T prev=expected; + /* Make sure ebx is saved and restored properly in case + this object is compiled as "position independent". Since + programmers on x86 tend to forget specifying -DPIC or + similar, always assume PIC. + + To make this work uniformly even in the non-PIC case, + setup register constraints such that ebx can not be + used by accident e.g. as base address for the variable + to be modified. Accessing "scratch" should always be okay, + as it can only be placed on the stack (and therefore + accessed through ebp or esp only). + + In theory, could push/pop ebx onto/off the stack, but movs + to a prepared stack slot turn out to be faster. */ + __asm__ __volatile__( + "movl %%ebx, %1\n" + "movl %2, %%ebx\n" + "lock; cmpxchg8b 0(%4)\n" + "movl %1, %%ebx\n" + : "=A" (prev), "=m" (scratch) + : "D" ((long)desired), "c" ((long)(desired>>32)), "S" (&i), "0" (prev) + : "memory"); + bool success=(prev==expected); + if (success) fence_after(success_order); + else fence_after(failure_order); + expected=prev; + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T r, memory_order order=memory_order_seq_cst) volatile + { + T prev=i; + do {} while(!compare_exchange_strong(prev, r, order, memory_order_relaxed)); + return prev; + } + + T load(memory_order order=memory_order_seq_cst) const volatile + { + /* this is a bit problematic -- there is no other + way to atomically load a 64 bit value, but of course + compare_exchange requires write access to the memory + area */ + T expected=i; + do { } while(!const_cast(this)->compare_exchange_strong(expected, expected, order, memory_order_relaxed)); + return expected; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + exchange(v, order); + } + T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile + { + T expected=i, desired;; + do { + desired=expected+c; + } while(!compare_exchange_strong(expected, desired, order, memory_order_relaxed)); + return expected; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + typedef T integral_type; +private: + T i; +} __attribute__((aligned(8))) ; + +#endif + +#if (defined(__amd64__) || defined(__x86_64__)) || defined(__i686__) +template +class platform_atomic_integral : public build_atomic_from_add >{ +public: + typedef build_atomic_from_add > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; +#endif + +// TODO: only use the sync intrinsics as a fallback, prefer inline asm as it +// allows us to do relaxed memory ordering. +#if (defined(__amd64__) || defined(__x86_64__)) && \ + defined(BOOST_ATOMIC_HAVE_SSE2) && \ + defined(BOOST_ATOMIC_HAVE_GNU_SYNC_16) && \ + defined(BOOST_ATOMIC_HAVE_GNU_ALIGNED_16) +template +class atomic_x86_128 { +public: + explicit atomic_x86_128(T v) : i(v) {} + atomic_x86_128() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v; + __asm__ __volatile__ ( + "movdqa %1, %%xmm0 ;\n" + "movdqa %%xmm0, %0 ;\n" + : "=m" (v) + : "m" (i) + : "xmm0", "memory" + ); + fence_after_load(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + fence_before(order); + // Atomically stores 128bit value by SSE instruction movdqa + __asm__ __volatile__ ( + "movdqa %1, %%xmm0 ;\n" + "movdqa %%xmm0, %0 ;\n" + : "=m" (i) + : "m" (v) + : "xmm0", "memory" + ); + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + T prev = __sync_val_compare_and_swap_16 + (reinterpret_cast(&i), expected, desired); + bool success=(prev==expected); + if (success) fence_after(success_order); + else fence_after(failure_order); + expected=prev; + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T r, memory_order order=memory_order_seq_cst) volatile + { + while (!__sync_bool_compare_and_swap_16 + (reinterpret_cast(&i), i, r)) + {}; + + return r; + } + T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile + { + T expected=i, desired; + do { + desired=expected+c; + } while(!compare_exchange_strong(expected, desired, order, memory_order_relaxed)); + return expected; + } + + bool is_lock_free(void) const volatile {return true;} +protected: + typedef T integral_type; +private: + T i; +} __attribute__((aligned(16))); + +template +class platform_atomic_integral : public build_atomic_from_add >{ +public: + typedef build_atomic_from_add > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +#endif + +} +} +} + +#endif diff --git a/include/boost/atomic/detail/generic-cas.hpp b/include/boost/atomic/detail/generic-cas.hpp new file mode 100644 index 0000000..dc14a8b --- /dev/null +++ b/include/boost/atomic/detail/generic-cas.hpp @@ -0,0 +1,192 @@ +#ifndef BOOST_DETAIL_ATOMIC_GENERIC_CAS_HPP +#define BOOST_DETAIL_ATOMIC_GENERIC_CAS_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include + +#include +#include +#include + +/* fallback implementation for various compilation targets; +this is *not* efficient, particularly because all operations +are fully fenced (full memory barriers before and after +each operation) */ + +#if defined(__GNUC__) + namespace boost { namespace detail { namespace atomic { + static inline int32_t + fenced_compare_exchange_strong_32(volatile int32_t *ptr, int32_t expected, int32_t desired) + { + return __sync_val_compare_and_swap_4(ptr, expected, desired); + } + #define BOOST_ATOMIC_HAVE_CAS32 1 + + #if (defined(__amd64__) || defined(__x86_64__)) || defined(__i686__) + static inline int64_t + fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired) + { + return __sync_val_compare_and_swap_8(ptr, expected, desired); + } + #define BOOST_ATOMIC_HAVE_CAS64 1 + #endif + }}} + +#elif defined(__ICL) || defined(_MSC_VER) + + #if defined(_MSC_VER) + #include + #include + #endif + + namespace boost { namespace detail { namespace atomic { + static inline int32_t + fenced_compare_exchange_strong(int32_t *ptr, int32_t expected, int32_t desired) + { + return _InterlockedCompareExchange(reinterpret_cast(ptr), desired, expected); + } + #define BOOST_ATOMIC_HAVE_CAS32 1 + #if defined(_WIN64) + static inline int64_t + fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired) + { + return _InterlockedCompareExchange64(ptr, desired, expected); + } + #define BOOST_ATOMIC_HAVE_CAS64 1 + #endif + }}} + +#elif (defined(__ICC) || defined(__ECC)) + namespace boost { namespace detail { namespace atomic { + static inline int32_t + fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired) + { + return _InterlockedCompareExchange((void*)ptr, desired, expected); + } + #define BOOST_ATOMIC_HAVE_CAS32 1 + #if defined(__x86_64) + static inline int64_t + fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired) + { + return cas64(ptr, expected, desired); + } + #define BOOST_ATOMIC_HAVE_CAS64 1 + #elif defined(__ECC) //IA-64 version + static inline int64_t + fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired) + { + return _InterlockedCompareExchange64((void*)ptr, desired, expected); + } + #define BOOST_ATOMIC_HAVE_CAS64 1 + #endif + }}} + +#elif (defined(__SUNPRO_CC) && defined(__sparc)) + #include + namespace boost { namespace detail { namespace atomic { + static inline int32_t + fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired) + { + return atomic_cas_32((volatile unsigned int*)ptr, expected, desired); + } + #define BOOST_ATOMIC_HAVE_CAS32 1 + + /* FIXME: check for 64 bit mode */ + static inline int64_t + fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired) + { + return atomic_cas_64((volatile unsigned long long*)ptr, expected, desired); + } + #define BOOST_ATOMIC_HAVE_CAS64 1 + }}} +#endif + + +namespace boost { namespace detail { namespace atomic { + +#ifdef BOOST_ATOMIC_HAVE_CAS32 +template +class atomic_generic_cas32 { +private: + typedef atomic_generic_cas32 this_type; +public: + explicit atomic_generic_cas32(T v) : i((int32_t)v) {} + atomic_generic_cas32() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T expected=(T)i; + do { } while(!const_cast(this)->compare_exchange_weak(expected, expected, order, memory_order_relaxed)); + return expected; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + exchange(v); + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + T found; + found=(T)fenced_compare_exchange_strong_32(&i, (int32_t)expected, (int32_t)desired); + bool success=(found==expected); + expected=found; + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T r, memory_order order=memory_order_seq_cst) volatile + { + T expected=(T)i; + do { } while(!compare_exchange_weak(expected, r, order, memory_order_relaxed)); + return expected; + } + + bool is_lock_free(void) const volatile {return true;} + typedef T integral_type; +private: + mutable int32_t i; +}; + +template +class platform_atomic_integral : public build_atomic_from_exchange > { +public: + typedef build_atomic_from_exchange > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral: public build_atomic_from_larger_type, T> { +public: + typedef build_atomic_from_larger_type, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral: public build_atomic_from_larger_type, T> { +public: + typedef build_atomic_from_larger_type, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; +#endif + +} } } + +#endif diff --git a/include/boost/atomic/detail/integral-casts.hpp b/include/boost/atomic/detail/integral-casts.hpp new file mode 100644 index 0000000..2b045d2 --- /dev/null +++ b/include/boost/atomic/detail/integral-casts.hpp @@ -0,0 +1,465 @@ +#ifndef BOOST_DETAIL_ATOMIC_INTEGRAL_CASTS_HPP +#define BOOST_DETAIL_ATOMIC_INTEGRAL_CASTS_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include + +namespace boost { namespace detail { namespace atomic { + +template +class platform_atomic : private platform_atomic_integral { +public: + typedef platform_atomic_integral super; +#if defined(BOOST_ATOMIC_ENFORCE_PODNESS) + typedef union { T e; boost::uint8_t i;} conv; +#endif + + platform_atomic() {} + explicit platform_atomic(T t) : super(to_integral(t)) + { + } + + void store(T t, memory_order order=memory_order_seq_cst) volatile + { + super::store(to_integral(t), order); + } + T load(memory_order order=memory_order_seq_cst) volatile const + { + return from_integral(super::load(order)); + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + boost::uint8_t _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_strong(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + boost::uint8_t _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_weak(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + + T exchange(T replacement, memory_order order=memory_order_seq_cst) volatile + { + return from_integral(super::exchange(to_integral(replacement), order)); + } + + operator T(void) const volatile {return load();} + T operator=(T v) volatile {store(v); return v;} + + using super::is_lock_free; +protected: + static inline boost::uint8_t to_integral(T &t) + { + boost::uint8_t tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } + static inline T from_integral(boost::uint8_t t) + { + T tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } +}; + +template +class platform_atomic : private platform_atomic_integral { +public: + typedef platform_atomic_integral super; +#if defined(BOOST_ATOMIC_ENFORCE_PODNESS) + typedef union { T e; boost::uint16_t i;} conv; +#endif + + platform_atomic() {} + explicit platform_atomic(T t) : super(to_integral(t)) + { + } + + void store(T t, memory_order order=memory_order_seq_cst) volatile + { + super::store(to_integral(t), order); + } + T load(memory_order order=memory_order_seq_cst) volatile const + { + return from_integral(super::load(order)); + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + boost::uint16_t _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_strong(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + boost::uint16_t _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_weak(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + + T exchange(T replacement, memory_order order=memory_order_seq_cst) volatile + { + return from_integral(super::exchange(to_integral(replacement), order)); + } + + operator T(void) const volatile {return load();} + T operator=(T v) volatile {store(v); return v;} + + using super::is_lock_free; +protected: + static inline boost::uint16_t to_integral(T &t) + { + boost::uint16_t tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } + static inline T from_integral(boost::uint16_t t) + { + T tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } +}; + +template +class platform_atomic : private platform_atomic_integral { +public: + typedef platform_atomic_integral super; +#if defined(BOOST_ATOMIC_ENFORCE_PODNESS) + typedef union { T e; boost::uint32_t i;} conv; +#endif + + platform_atomic() {} + explicit platform_atomic(T t) : super(to_integral(t)) + { + } + + void store(T t, memory_order order=memory_order_seq_cst) volatile + { + super::store(to_integral(t), order); + } + T load(memory_order order=memory_order_seq_cst) volatile const + { + return from_integral(super::load(order)); + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + boost::uint32_t _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_strong(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + boost::uint32_t _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_weak(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + + T exchange(T replacement, memory_order order=memory_order_seq_cst) volatile + { + return from_integral(super::exchange(to_integral(replacement), order)); + } + + operator T(void) const volatile {return load();} + T operator=(T v) volatile {store(v); return v;} + + using super::is_lock_free; +protected: + static inline boost::uint32_t to_integral(T &t) + { + boost::uint32_t tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } + static inline T from_integral(boost::uint32_t t) + { + T tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } +}; + +template +class platform_atomic : private platform_atomic_integral { +public: + typedef platform_atomic_integral super; +#if defined(BOOST_ATOMIC_ENFORCE_PODNESS) + typedef union { T e; boost::uint64_t i;} conv; +#endif + + platform_atomic() {} + explicit platform_atomic(T t) : super(to_integral(t)) + { + } + + void store(T t, memory_order order=memory_order_seq_cst) volatile + { + super::store(to_integral(t), order); + } + T load(memory_order order=memory_order_seq_cst) volatile const + { + return from_integral(super::load(order)); + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + boost::uint64_t _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_strong(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + boost::uint64_t _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_weak(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + + T exchange(T replacement, memory_order order=memory_order_seq_cst) volatile + { + return from_integral(super::exchange(to_integral(replacement), order)); + } + + operator T(void) const volatile {return load();} + T operator=(T v) volatile {store(v); return v;} + + using super::is_lock_free; +protected: + static inline boost::uint64_t to_integral(T &t) + { + boost::uint64_t tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } + static inline T from_integral(boost::uint64_t t) + { + T tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } +}; + +#if (defined(__amd64__) || defined(__x86_64__)) && \ + defined(BOOST_ATOMIC_HAVE_SSE2) && \ + defined(BOOST_ATOMIC_HAVE_GNU_SYNC_16) && \ + defined(BOOST_ATOMIC_HAVE_GNU_ALIGNED_16) && \ + defined(BOOST_ATOMIC_HAVE_GNU_128BIT_INTEGERS) + +#define BOOST_ATOMIC_HAVE_128BIT_SUPPORT + +template +class platform_atomic : private platform_atomic_integral<__uint128_t> { +public: + typedef platform_atomic_integral<__uint128_t> super; +#if defined(BOOST_ATOMIC_ENFORCE_PODNESS) + typedef union { T e; __uint128_t i;} conv; +#endif + + platform_atomic() {} + explicit platform_atomic(T t) : super(to_integral(t)) + { + } + + void store(T t, memory_order order=memory_order_seq_cst) volatile + { + super::store(to_integral(t), order); + } + T load(memory_order order=memory_order_seq_cst) volatile const + { + return from_integral(super::load(order)); + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + __uint128_t _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_strong(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + __uint128_t _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_weak(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + + T exchange(T replacement, memory_order order=memory_order_seq_cst) volatile + { + return from_integral(super::exchange(to_integral(replacement), order)); + } + + operator T(void) const volatile {return load();} + T operator=(T v) volatile {store(v); return v;} + + using super::is_lock_free; +protected: + static inline __uint128_t to_integral(T &t) + { + __uint128_t tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } + static inline T from_integral(__uint128_t t) + { + T tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } +}; + +#elif BOOST_MSVC >= 1500 && (defined(_M_IA64) || defined(_M_AMD64)) && defined(BOOST_ATOMIC_HAVE_SSE2) + +#define BOOST_ATOMIC_HAVE_128BIT_SUPPORT + +#include + +template +class platform_atomic : private platform_atomic_integral<__m128i> { +public: + typedef platform_atomic_integral<__m128i> super; +#if defined(BOOST_ATOMIC_ENFORCE_PODNESS) + typedef union { T e; __m128i i;} conv; +#endif + + platform_atomic() {} + explicit platform_atomic(T t) : super(to_integral(t)) + { + } + + void store(T t, memory_order order=memory_order_seq_cst) volatile + { + super::store(to_integral(t), order); + } + T load(memory_order order=memory_order_seq_cst) volatile const + { + return from_integral(super::load(order)); + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + __m128i _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_strong(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + __m128i _expected, _desired; + _expected=to_integral(expected); + _desired=to_integral(desired); + bool success=super::compare_exchange_weak(_expected, _desired, success_order, failure_order); + expected=from_integral(_expected); + return success; + } + + T exchange(T replacement, memory_order order=memory_order_seq_cst) volatile + { + return from_integral(super::exchange(to_integral(replacement), order)); + } + + operator T(void) const volatile {return load();} + T operator=(T v) volatile {store(v); return v;} + + using super::is_lock_free; +protected: + static inline __m128i to_integral(T &t) + { + __m128i tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } + static inline T from_integral(__m128i t) + { + T tmp; + memcpy(&tmp, &t, sizeof(t)); + return tmp; + } +}; + +#endif + +} } } + +#endif diff --git a/include/boost/atomic/detail/interlocked.hpp b/include/boost/atomic/detail/interlocked.hpp new file mode 100644 index 0000000..dcb8502 --- /dev/null +++ b/include/boost/atomic/detail/interlocked.hpp @@ -0,0 +1,365 @@ +#ifndef BOOST_DETAIL_ATOMIC_INTERLOCKED_HPP +#define BOOST_DETAIL_ATOMIC_INTERLOCKED_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include + +#include +#include + +namespace boost { +namespace detail { +namespace atomic { + +static inline void full_fence(void) +{ + long tmp; + BOOST_INTERLOCKED_EXCHANGE(&tmp, 0); +} + +template<> +inline void platform_atomic_thread_fence(memory_order order) +{ + switch(order) { + case memory_order_seq_cst: + full_fence(); + default:; + } +} + +static inline void fence_after_load(memory_order order) +{ + switch(order) { + case memory_order_seq_cst: + full_fence(); + case memory_order_acquire: + case memory_order_acq_rel: + default:; + } +} + + +template +class atomic_interlocked_32 { +public: + explicit atomic_interlocked_32(T v) : i(v) {} + atomic_interlocked_32() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=*reinterpret_cast(&i); + fence_after_load(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + if (order!=memory_order_seq_cst) { + *reinterpret_cast(&i)=v; + } else { + exchange(v); + } + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + T prev=expected; + expected=(T)BOOST_INTERLOCKED_COMPARE_EXCHANGE((long *)(&i), (long)desired, (long)expected); + bool success=(prev==expected); + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T r, memory_order order=memory_order_seq_cst) volatile + { + return (T)BOOST_INTERLOCKED_EXCHANGE((long *)&i, (long)r); + } + T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile + { + return (T)BOOST_INTERLOCKED_EXCHANGE_ADD((long *)&i, c); + } + + bool is_lock_free(void) const volatile {return true;} + + typedef T integral_type; +private: + T i; +}; + +# if defined(_M_IA64) || defined(_M_AMD64) + +#if defined( BOOST_USE_WINDOWS_H ) + +# include + +# define BOOST_INTERLOCKED_EXCHANGE_ADD64 InterlockedExchangeAdd64 +# define BOOST_INTERLOCKED_EXCHANGE64 InterlockedExchange64 +# define BOOST_INTERLOCKED_COMPARE_EXCHANGE64 InterlockedCompareExchange64 + +#else + +extern "C" boost::int64_t __cdecl _InterlockedExchangeAdd64(boost::int64_t volatile *, boost::int64_t); +extern "C" boost::int64_t __cdecl _InterlockedExchange64(boost::int64_t volatile *, boost::int64_t); +extern "C" boost::int64_t __cdecl _InterlockedCompareExchange64(boost::int64_t volatile *, boost::int64_t, boost::int64_t); + +# pragma intrinsic( _InterlockedExchangeAdd64 ) +# pragma intrinsic( _InterlockedExchange64 ) +# pragma intrinsic( _InterlockedCompareExchange64 ) + +# define BOOST_INTERLOCKED_EXCHANGE_ADD64 _InterlockedExchangeAdd64 +# define BOOST_INTERLOCKED_EXCHANGE64 _InterlockedExchange64 +# define BOOST_INTERLOCKED_COMPARE_EXCHANGE64 _InterlockedCompareExchange64 + +#endif + +template +class __declspec(align(64)) atomic_interlocked_64 { +public: + explicit atomic_interlocked_64(T v) : i(v) {} + atomic_interlocked_64() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=*reinterpret_cast(&i); + fence_after_load(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + if (order!=memory_order_seq_cst) { + *reinterpret_cast(&i)=v; + } else { + exchange(v); + } + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + T prev=expected; + expected=(T)BOOST_INTERLOCKED_COMPARE_EXCHANGE64((boost::int64_t *)(&i), (boost::int64_t)desired, (boost::int64_t)expected); + bool success=(prev==expected); + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T r, memory_order order=memory_order_seq_cst) volatile + { + return (T)BOOST_INTERLOCKED_EXCHANGE64((boost::int64_t *)&i, (boost::int64_t)r); + } + T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile + { + return (T)BOOST_INTERLOCKED_EXCHANGE_ADD64((boost::int64_t *)&i, c); + } + + bool is_lock_free(void) const volatile {return true;} + + typedef T integral_type; +private: + T i; +}; + +// _InterlockedCompareExchange128 is available only starting with VS2008 +#if BOOST_MSVC >= 1500 && defined(BOOST_ATOMIC_HAVE_SSE2) + +#if defined( BOOST_USE_WINDOWS_H ) + +# include +# include + +# define BOOST_INTERLOCKED_COMPARE_EXCHANGE128 InterlockedCompareExchange128 + +# pragma intrinsic( _mm_load_si128 ) +# pragma intrinsic( _mm_store_si128 ) + +#else + +# include + +extern "C" unsigned char __cdecl _InterlockedCompareExchange128( + boost::int64_t volatile *Destination, + boost::int64_t ExchangeHigh, boost::int64_t ExchangeLow, + boost::int64_t *Comparand) +extern "C" __m128i _mm_load_si128(__m128i const*_P); +extern "C" void _mm_store_si128(__m128i *_P, __m128i _B); + +# pragma intrinsic( _InterlockedCompareExchange128 ) +# pragma intrinsic( _mm_load_si128 ) +# pragma intrinsic( _mm_store_si128 ) + +# define BOOST_INTERLOCKED_COMPARE_EXCHANGE128 _InterlockedCompareExchange128 + +#endif + +template +class __declspec(align(128)) atomic_interlocked_128 { +public: + explicit atomic_interlocked_128(T v) : i(v) {} + atomic_interlocked_128() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v; + if (order!=memory_order_seq_cst) { + v = _mm_load_si128(*(__m128i*)(&i)); + } + else { + v = *reinterpret_cast(&i); + } + fence_after_load(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + if (order!=memory_order_seq_cst) { + *reinterpret_cast(&i)=v; + } + else { + _mm_store_si128(*(__m128i*)(&i), v); + } + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + boost::int64_t* desired_raw = &desired; + T prev = i; + bool success = BOOST_INTERLOCKED_COMPARE_EXCHANGE128( + (boost::int64_t volatile *)(&i), + desired_raw[1], desired_raw[0], (boost::int64_t*)&expected); + if (!success) + expected = prev; + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T r, memory_order order=memory_order_seq_cst) volatile + { + boost::int64_t* desired_raw = &r; + T prev = i; + + while (!BOOST_INTERLOCKED_COMPARE_EXCHANGE128( + (boost::int64_t volatile*)&i, desired_raw[1], desired_raw[0], + (boost::int64_t*)&i)) + {} + + return prev; + } + T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile + { + T expected = i; + __m128i desired; + + do { + desired = _mm_add_epi32(*(__m128i*)(&expected), *(__m128i*)(&c)); + } while (!compare_exchange_strong(expected, *(T*)(&desired), order, memory_order_relaxed)); + + return expected; + } + + bool is_lock_free(void) const volatile {return true;} + + typedef T integral_type; +private: + T i; +}; +#endif + +#endif + +template +class platform_atomic_integral : public build_atomic_from_add > { +public: + typedef build_atomic_from_add > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral: public build_atomic_from_larger_type, T> { +public: + typedef build_atomic_from_larger_type, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template +class platform_atomic_integral: public build_atomic_from_larger_type, T> { +public: + typedef build_atomic_from_larger_type, T> super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +# if defined(_M_IA64) || defined(_M_AMD64) +template +class platform_atomic_integral + : public build_atomic_from_add > +{ +public: + typedef build_atomic_from_add > super; + + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +template<> +class platform_atomic_integral + : public build_atomic_from_add > +{ +public: + typedef build_atomic_from_add > super; + + explicit platform_atomic_integral(void* v) : super(v) {} + platform_atomic_integral(void) {} +}; + +#if BOOST_MSVC >= 1500 && defined(BOOST_ATOMIC_HAVE_SSE2) + +template +class platform_atomic_integral + : public build_atomic_from_add > +{ +public: + typedef build_atomic_from_add > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + +#endif + +#endif + +} +} +} + +#endif diff --git a/include/boost/atomic/detail/linux-arm.hpp b/include/boost/atomic/detail/linux-arm.hpp new file mode 100644 index 0000000..142c20a --- /dev/null +++ b/include/boost/atomic/detail/linux-arm.hpp @@ -0,0 +1,171 @@ +#ifndef BOOST_DETAIL_ATOMIC_LINUX_ARM_HPP +#define BOOST_DETAIL_ATOMIC_LINUX_ARM_HPP + +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// Copyright (c) 2009 Helge Bahmann +// Copyright (c) 2009 Phil Endecott +// ARM Code by Phil Endecott, based on other architectures. + +#include +#include +#include + +namespace boost { +namespace detail { +namespace atomic { + + +// Different ARM processors have different atomic instructions. In particular, +// architecture versions before v6 (which are still in widespread use, e.g. the +// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap. +// On Linux the kernel provides some support that lets us abstract away from +// these differences: it provides emulated CAS and barrier functions at special +// addresses that are garaunteed not to be interrupted by the kernel. Using +// this facility is slightly slower than inline assembler would be, but much +// faster than a system call. +// +// For documentation, see arch/arm/kernel/entry-armv.S in the kernel source +// (search for "User Helpers"). + + +typedef void (kernel_dmb_t)(void); +#define BOOST_ATOMIC_KERNEL_DMB (*(kernel_dmb_t *)0xffff0fa0) + +static inline void fence_before(memory_order order) +{ + switch(order) { + // FIXME I really don't know which of these cases should call + // kernel_dmb() and which shouldn't... + case memory_order_consume: + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + BOOST_ATOMIC_KERNEL_DMB(); + default:; + } +} + +static inline void fence_after(memory_order order) +{ + switch(order) { + // FIXME I really don't know which of these cases should call + // kernel_dmb() and which shouldn't... + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + BOOST_ATOMIC_KERNEL_DMB(); + default:; + } +} + +#undef BOOST_ATOMIC_KERNEL_DMB + + +template +class atomic_linux_arm_4 { + +// typedef int (kernel_cmpxchg_t)(T oldval, T newval, T *ptr); + typedef int (kernel_cmpxchg_t)(T oldval, T newval, volatile T *ptr); +# define BOOST_ATOMIC_KERNEL_CMPXCHG (*(kernel_cmpxchg_t *)0xffff0fc0) + // Returns 0 if *ptr was changed. + +public: + explicit atomic_linux_arm_4(T v) : i(v) {} + atomic_linux_arm_4() {} + T load(memory_order order=memory_order_seq_cst) const volatile + { + T v=const_cast(i); + fence_after(order); + return v; + } + void store(T v, memory_order order=memory_order_seq_cst) volatile + { + fence_before(order); + const_cast(i)=v; + } + bool compare_exchange_strong( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + // Aparently we can consider kernel_cmpxchg to be strong if it is retried + // by the kernel after being interrupted, which I think it is. + // Also it seems that when an ll/sc implementation is used the kernel + // loops until the store succeeds. + bool success = BOOST_ATOMIC_KERNEL_CMPXCHG(expected,desired,&i)==0; + if (!success) expected = load(memory_order_relaxed); + return success; + } + bool compare_exchange_weak( + T &expected, + T desired, + memory_order success_order, + memory_order failure_order) volatile + { + return compare_exchange_strong(expected, desired, success_order, failure_order); + } + T exchange(T replacement, memory_order order=memory_order_seq_cst) volatile + { + // Copied from build_exchange. + T o=load(memory_order_relaxed); + do {} while(!compare_exchange_weak(o, replacement, order, order)); + return o; + // Note that ARM has an atomic swap instruction that we could use here: + // T oldval; + // asm volatile ("swp\t%0, %1, [%2]" : "=&r"(oldval) : "r" (replacement), "r" (&i) : "memory"); + // return oldval; + // This instruction is deprecated in architecture >= 6. I'm unsure how inefficient + // its implementation is on those newer architectures. + // I don't think this would gain + // much since exchange() is not used often. + } + + bool is_lock_free(void) const volatile {return true;} + typedef T integral_type; +protected: +private: + T i; + +# undef BOOST_ATOMIC_KERNEL_CMPXCHG + +}; + +template +class platform_atomic_integral : public build_atomic_from_exchange > { +public: + typedef build_atomic_from_exchange > super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + + +template +class platform_atomic_integral : public build_atomic_from_larger_type, T > { +public: + typedef build_atomic_from_larger_type, T> super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + + +template +class platform_atomic_integral : public build_atomic_from_larger_type, T > { +public: + typedef build_atomic_from_larger_type, T> super; + explicit platform_atomic_integral(T v) : super(v) {} + platform_atomic_integral(void) {} +}; + + +typedef atomic_linux_arm_4 platform_atomic_address; + + +} +} +} + +#endif diff --git a/include/boost/atomic/detail/valid_integral_types.hpp b/include/boost/atomic/detail/valid_integral_types.hpp new file mode 100644 index 0000000..b091c9f --- /dev/null +++ b/include/boost/atomic/detail/valid_integral_types.hpp @@ -0,0 +1,45 @@ +#ifndef BOOST_DETAIL_ATOMIC_VALID_INTEGRAL_TYPES_HPP +#define BOOST_DETAIL_ATOMIC_VALID_INTEGRAL_TYPES_HPP + +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include + +namespace boost { +namespace detail { +namespace atomic { + +template struct is_integral_type {typedef void test;}; + +template<> struct is_integral_type {typedef int test;}; + +template<> struct is_integral_type {typedef int test;}; +template<> struct is_integral_type {typedef int test;}; +template<> struct is_integral_type {typedef int test;}; +template<> struct is_integral_type {typedef int test;}; +template<> struct is_integral_type {typedef int test;}; +template<> struct is_integral_type {typedef int test;}; +template<> struct is_integral_type {typedef int test;}; +template<> struct is_integral_type {typedef int test;}; +#ifdef BOOST_HAS_LONG_LONG +template<> struct is_integral_type {typedef int test;}; +template<> struct is_integral_type {typedef int test;}; +#endif +#ifdef BOOST_ATOMIC_HAVE_GNU_128BIT_INTEGERS +template<> struct is_integral_type<__uint128_t> {typedef int test;}; +template<> struct is_integral_type<__int128_t> {typedef int test;}; +#endif +#if BOOST_MSVC >= 1500 && (defined(_M_IA64) || defined(_M_AMD64)) && defined(BOOST_ATOMIC_HAVE_SSE2) +#include +template<> struct is_integral_type<__m128i> {typedef int test;}; +#endif +} +} +} + +#endif diff --git a/include/boost/atomic/platform.hpp b/include/boost/atomic/platform.hpp new file mode 100644 index 0000000..0cdfb56 --- /dev/null +++ b/include/boost/atomic/platform.hpp @@ -0,0 +1,42 @@ +// Copyright (c) 2009 Helge Bahmann +// +// Distributed under the Boost Software License, Version 1.0. +// See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include + +#if defined(__GNUC__) && (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) + + #include + +#elif defined(__GNUC__) && defined(__alpha__) + + #include + +#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__)) + + #include + +// This list of ARM architecture versions comes from Apple's arm/arch.h header. +// I don't know how complete it is. +#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_7A__)) + + #include + +#elif defined(__linux__) && defined(__arm__) + + #include + +#elif defined(BOOST_USE_WINDOWS_H) || defined(_WIN32_CE) || defined(BOOST_MSVC) || defined(BOOST_INTEL_WIN) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) + + #include + +#else + + #warning "Using slow fallback atomic implementation" + #include + +#endif diff --git a/include/fc/abstract_types.hpp b/include/fc/abstract_types.hpp new file mode 100644 index 0000000..0f54e7e --- /dev/null +++ b/include/fc/abstract_types.hpp @@ -0,0 +1,92 @@ +#ifndef _FC_ABSTRACT_TYPES_HPP_ +#define _FC_ABSTRACT_TYPES_HPP_ +#include +#include + +namespace fc { + + struct abstract_type { + virtual ~abstract_type(){} + virtual size_t size_of()const = 0; + /* + * @brief Inplace destructor (does not free memory) ((T*)dst)->~T(); + */ + virtual void destructor( void* dst )const = 0; + + /** @brief 'delete T' */ + virtual void destroy( void* dst )const = 0; + }; + + template + struct type : virtual abstract_type { + virtual size_t size_of()const { return sizeof(T); } + virtual void destructor( void* dst )const { ((T*)dst)->~T(); } + virtual void destroy( void* dst )const { delete ((T*)dst); } + }; + + struct abstract_moveable_type : virtual abstract_type { + virtual void move_construct( void* dst, void* src )const = 0; + virtual void move( void* dst, void* src )const = 0; + }; + + template + struct moveable_type : virtual type, virtual abstract_moveable_type { + static abstract_moveable_type& instance() { static moveable_type inst; return inst; } + virtual void destruct( void* dst )const { ((T*)dst)->~T(); } + virtual void move_construct( void* dst, void* src )const { slog( "move construct" ); new ((char*)dst) T( fc::move(*((T*)src)) ); } + virtual void move( void* dst, void* src )const { *((T*)dst) = fc::move(*((T*)src)); } + }; + + struct abstract_value_type : virtual abstract_moveable_type { + virtual void construct( void* dst )const = 0; + virtual void copy_construct( void* dst, const void* src )const = 0; + virtual void assign( void* dst, const void* src )const = 0; + }; + + /** + * Default constructable, moveable, copyable, assignable. + */ + template + struct value_type : virtual moveable_type, virtual abstract_value_type { + static abstract_value_type& instance() { static value_type inst; return inst; } + + virtual void construct( void* dst )const { new ((char*)dst) T(); } + virtual void copy_construct( void* dst, const void* src )const { new ((char*)dst) T( *((const T*)src) ); } + virtual void assign( void* dst, const void* src )const { *((T*)dst) = *((const T*)src); } + }; + + struct abstract_less_than_comparable_type { + virtual bool less_than( const void* left, const void* right )const = 0; + }; + + + template + struct less_than_comparable_type : abstract_less_than_comparable_type { + virtual bool less_than( const void* left, const void* right )const { + return *((const T*)left) < *((const T*)right); + } + }; + + struct abstract_equal_comparable_type { + virtual bool equal( const void* left, const void* right )const = 0; + }; + + template + struct equal_comparable_type : abstract_equal_comparable_type { + virtual bool equal( const void* left, const void* right )const { + return *((const T*)left) == *((const T*)right); + } + }; + + struct abstract_callable_type { + virtual void call( const void* self )const = 0; + }; + + template + struct callable_type : virtual abstract_callable_type, virtual value_type { + virtual void call( const void* self )const { (*((const T*)self))(); } + }; + +} // namespace fc + +#endif diff --git a/include/fc/aligned.hpp b/include/fc/aligned.hpp new file mode 100644 index 0000000..d8b211e --- /dev/null +++ b/include/fc/aligned.hpp @@ -0,0 +1,14 @@ +#ifndef _FC_ALIGNED_HPP_ +#define _FC_ALIGNED_HPP_ +namespace fc { + + template + struct aligned { + union { + T _align; + char _data[S]; + } _store; + }; + +} +#endif // _FC_ALIGNED_HPP_ diff --git a/include/fc/any.hpp b/include/fc/any.hpp new file mode 100644 index 0000000..5461cb6 --- /dev/null +++ b/include/fc/any.hpp @@ -0,0 +1,13 @@ +#ifndef _FC_ANY_HPP_ +#define _FC_ANY_HPP_ + +namespace fc { namespace reflect { + + // provides value semantics + struct any { + + }; + +} } + +#endif // _FC_ANY_HPP_ diff --git a/include/fc/console_defines.h b/include/fc/console_defines.h new file mode 100644 index 0000000..545768c --- /dev/null +++ b/include/fc/console_defines.h @@ -0,0 +1,561 @@ +#ifndef _MACE_CMT_CONSOLE_DEFINES_H_ +#define _MACE_CMT_CONSOLE_DEFINES_H_ + +/// @cond INTERNAL_DEV +/** + @file console_defines.h + This header contains definitions for console styles and colors. + @ingroup tconsole +*/ + + +/** + @defgroup console_styles Console Styles + @brief Defines styles that can be used within text printed to the Console. + + Note that styles will not show up when printing to files (in fact, you will + get a lot of garbage characters if you do this). Also, not all consoles + support styled text. + @{ +*/ +#if COLOR_CONSOLE + +#ifndef WIN32 + +/** + @def CONSOLE_DEFAULT + @brief Sets all styles and colors to the console defaults. + + (const char*) +*/ +#define CONSOLE_DEFAULT "\033[0m" +/** + @def CONSOLE_BOLD + @brief Print bold console text (or brighten foreground color if present). + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BOLD "\033[1m" + +/** + @def CONSOLE_HALF_BRIGHT + @brief Print half-bright console text. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_HALF_BRIGHT "\033[2m" + +/** + @def CONSOLE_ITALIC + @brief Print italic console text. + + @ingroup tconsole + (const char*) Typically not supported. + + (const char*) +*/ +#define CONSOLE_ITALIC "\033[3m" + +/** + @def CONSOLE_UNDERLINE + @brief Print underlined console text. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_UNDERLINE "\033[4m" + +/** + @def CONSOLE_BLINK + @brief Print blinking console text. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BLINK "\033[5m" + +/** + @def CONSOLE_RAPID_BLINK + @brief Print rapidly blinking console text. Typically not supported. + + (const char*) +*/ +#define CONSOLE_RAPID_BLINK "\033[6m" + +/** + @def CONSOLE_REVERSED + @brief Print console text with foreground and background colors reversed. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_REVERSED "\033[7m" + +/** + @def CONSOLE_CONCEALED + @brief Print concealed console text. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_CONCEALED "\033[8m" + +/** + @def CONSOLE_STRIKETHROUGH + @brief Print strikethrough console text. Typically not supported. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_STRIKETHROUGH "\033[9m" + +/// @} + + +/** + @defgroup console_colors Console Colors + @brief Defines colors that can be used within text printed to the Console. + + @ingroup tconsole + Note that colors will not show up when printing to files (in fact, you will + get a lot of garbage characters if you do this). Also, not all consoles + support colored text. + @{ +*/ + +/** + @def CONSOLE_BLACK + @brief Print text with black foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BLACK "\033[30m" + +/** + @def CONSOLE_RED + @brief Print text with red foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_RED "\033[31m" + +/** + @def CONSOLE_GREEN + @brief Print text with green foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_GREEN "\033[32m" + +/** + @def CONSOLE_BROWN + @brief Print text with brown foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BROWN "\033[33m" + +/** + @def CONSOLE_BLUE + @brief Print text with blue foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BLUE "\033[34m" + +/** + @def CONSOLE_MAGENTA + @brief Print text with magenta foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_MAGENTA "\033[35m" + +/** + @def CONSOLE_CYAN + @brief Print text with cyan foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_CYAN "\033[36m" + +/** + @def CONSOLE_WHITE + @brief Print text with white foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_WHITE "\033[37m" + +/** + @def CONSOLE_BLACK_BG + @brief Print text with black background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BLACK_BG "\033[40m" + +/** + @def CONSOLE_RED_BG + @brief Print text with red background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_RED_BG "\033[41m" + +/** + @def CONSOLE_GREEN_BG + @brief Print text with green background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_GREEN_BG "\033[42m" + +/** + @def CONSOLE_BROWN_BG + @brief Print text with brown background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BROWN_BG "\033[43m" + +/** + @def CONSOLE_BLUE_BG + @brief Print text with blue background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BLUE_BG "\033[44m" + +/** + @def CONSOLE_MAGENTA_BG + @brief Print text with magenta background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_MAGENTA_BG "\033[45m" + +/** + @def CONSOLE_CYAN_BG + @brief Print text with cyan background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_CYAN_BG "\033[46m" + +/** + @def CONSOLE_WHITE_BG + @brief Print text with white background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_WHITE_BG "\033[47m" + + + +#else // WIN32 +#include +#include +//#include +#include + +/** + @def CONSOLE_DEFAULT + @brief Sets all styles and colors to the console defaults. + + (const char*) +*/ +#define CONSOLE_DEFAULT (FOREGROUND_BLUE | FOREGROUND_RED | FOREGROUND_GREEN) +/** + @def CONSOLE_BOLD + @brief Print bold console text (or brighten foreground color if present). + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BOLD FOREGROUND_INTENSITY + +/** + @def CONSOLE_HALF_BRIGHT + @brief Print half-bright console text. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_HALF_BRIGHT + +/** + @def CONSOLE_ITALIC + @brief Print italic console text. + + @ingroup tconsole + (const char*) Typically not supported. + + (const char*) +*/ +#define CONSOLE_ITALIC + +/** + @def CONSOLE_UNDERLINE + @brief Print underlined console text. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_UNDERLINE COMMON_LVB_UNDERSCORE + +/** + @def CONSOLE_BLINK + @brief Print blinking console text. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BLINK + +/** + @def CONSOLE_RAPID_BLINK + @brief Print rapidly blinking console text. Typically not supported. + + (const char*) +*/ +#define CONSOLE_RAPID_BLINK + +/** + @def CONSOLE_REVERSED + @brief Print console text with foreground and background colors reversed. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_REVERSED CONSOLE_LVB_REVERSE_VIDEO + +/** + @def CONSOLE_CONCEALED + @brief Print concealed console text. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_CONCEALED + +/** + @def CONSOLE_STRIKETHROUGH + @brief Print strikethrough console text. Typically not supported. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_STRIKETHROUGH + +/// @} + + +/** + @defgroup console_colors Console Colors + @brief Defines colors that can be used within text printed to the Console. + + @ingroup tconsole + Note that colors will not show up when printing to files (in fact, you will + get a lot of garbage characters if you do this). Also, not all consoles + support colored text. + @{ +*/ + +/** + @def CONSOLE_BLACK + @brief Print text with black foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BLACK 0 + +/** + @def CONSOLE_RED + @brief Print text with red foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_RED FOREGROUND_RED + +/** + @def CONSOLE_GREEN + @brief Print text with green foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_GREEN FOREGROUND_GREEN + +/** + @def CONSOLE_BROWN + @brief Print text with brown foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BROWN (FOREGROUND_RED | FOREGROUND_GREEN) + +/** + @def CONSOLE_BLUE + @brief Print text with blue foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BLUE FOREGROUND_BLUE + +/** + @def CONSOLE_MAGENTA + @brief Print text with magenta foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_MAGENTA (CONSOLE_RED | CONSOLE_BLUE) + +/** + @def CONSOLE_CYAN + @brief Print text with cyan foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_CYAN (CONSOLE_BLUE | CONSOLE_GREEN) + +/** + @def CONSOLE_WHITE + @brief Print text with white foreground. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_WHITE (CONSOLE_RED | CONSOLE_BLUE | CONSOLE_GREEN) + +/** + @def CONSOLE_BLACK_BG + @brief Print text with black background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BLACK_BG 0 + +/** + @def CONSOLE_RED_BG + @brief Print text with red background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_RED_BG (BACKGROUND_RED) + +/** + @def CONSOLE_GREEN_BG + @brief Print text with green background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_GREEN_BG (BACKGROUND_GREEN) + +/** + @def CONSOLE_BROWN_BG + @brief Print text with brown background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BROWN_BG (BACKGROUND_RED | BACKGROUND_GREEN) + +/** + @def CONSOLE_BLUE_BG + @brief Print text with blue background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_BLUE_BG (BACKGROUND_BLUE) + +/** + @def CONSOLE_MAGENTA_BG + @brief Print text with magenta background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_MAGENTA_BG "\033[45m" + +/** + @def CONSOLE_CYAN_BG + @brief Print text with cyan background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_CYAN_BG "\033[46m" + +/** + @def CONSOLE_WHITE_BG + @brief Print text with white background. + + @ingroup tconsole + (const char*) +*/ +#define CONSOLE_WHITE_BG "\033[47m" +#endif + + +/// @} +#else // On Window's no color output WIN32 +#define CONSOLE_DEFAULT "" +#define CONSOLE_BOLD "" +#define CONSOLE_HALF_BRIGHT "" +#define CONSOLE_ITALIC "" +#define CONSOLE_UNDERLINE "" +#define CONSOLE_BLINK "" +#define CONSOLE_RAPID_BLINK "" +#define CONSOLE_REVERSED "" +#define CONSOLE_CONCEALED "" +#define CONSOLE_STRIKETHROUGH "" +#define CONSOLE_BLACK "" +#define CONSOLE_RED "" +#define CONSOLE_GREEN "" +#define CONSOLE_BROWN "" +#define CONSOLE_BLUE "" +#define CONSOLE_MAGENTA "" +#define CONSOLE_CYAN "" +#define CONSOLE_WHITE "" +#define CONSOLE_BLACK_BG "" +#define CONSOLE_RED_BG "" +#define CONSOLE_GREEN_BG "" +#define CONSOLE_BROWN_BG "" +#define CONSOLE_BLUE_BG "" +#define CONSOLE_MAGENTA_BG "" +#define CONSOLE_CYAN_BG "" +#define CONSOLE_WHITE_BG "" + +/// @} +/// @endcond INTERNAL_DEV +#endif // NOT DEFINED WIN32 +#endif // _BOOST_CMT_CONSOLE_DEFINES_H_ diff --git a/include/fc/error.hpp b/include/fc/error.hpp new file mode 100644 index 0000000..cc95cad --- /dev/null +++ b/include/fc/error.hpp @@ -0,0 +1,17 @@ +#ifndef _FC_ERROR_HPP_ +#define _FC_ERROR_HPP_ + +namespace fc { + struct future_wait_timeout: public std::exception{}; + struct task_canceled: public std::exception{}; + struct thread_quit: public std::exception{}; + struct wait_any_error: public std::exception{}; + struct bad_cast: public std::exception{ + const char* what()const throw(){ return "bad cast"; } + }; + struct range_error: public std::exception{ + const char* what()const throw(){ return "range error"; } + }; +} + +#endif // _FC_ERROR_HPP_ diff --git a/include/fc/example.hpp b/include/fc/example.hpp new file mode 100644 index 0000000..0feaa7e --- /dev/null +++ b/include/fc/example.hpp @@ -0,0 +1,12 @@ +#ifndef _EXAMPLE_HPP_ +#define _EXAMPLE_HPP_ +#include + + struct example { + int a; + int b; + }; + + FC_REFLECTABLE( example ) + +#endif // _EXAMPLE_HPP_ diff --git a/include/fc/exception.hpp b/include/fc/exception.hpp new file mode 100644 index 0000000..fe5d40b --- /dev/null +++ b/include/fc/exception.hpp @@ -0,0 +1,54 @@ +#ifndef _FC_EXCEPTION_HPP_ +#define _FC_EXCEPTION_HPP_ +#include +#include + +// provided for easy integration with boost. +namespace boost { class exception_ptr; } + +namespace fc { + /** + * Simply including boost/exception_ptr.hpp is enough to significantly + * lengthen compile times. This header defines an 'opaque' exception + * type that provides the most 'general' exception handling needs without + * requiring a significant amount of code to be included. + */ + class exception_ptr { + public: + exception_ptr(); + exception_ptr( const boost::exception_ptr& c ); + exception_ptr( boost::exception_ptr&& c ); + exception_ptr( const exception_ptr& c ); + exception_ptr( exception_ptr&& c ); + ~exception_ptr(); + + exception_ptr& operator=(const boost::exception_ptr& c); + exception_ptr& operator=(boost::exception_ptr&& c); + + exception_ptr& operator=(const exception_ptr& c); + exception_ptr& operator=(exception_ptr&& c); + + fc::string diagnostic_information()const; + + operator bool()const; + + operator const boost::exception_ptr& ()const; + operator boost::exception_ptr& (); + private: + char my[sizeof(void*)*2]; + }; + + exception_ptr current_exception(); + template + inline exception_ptr copy_exception( T&& e ) { + try { throw e; } catch (...) { return current_exception(); } + return exception_ptr(); + } + void rethrow_exception( const exception_ptr& e ); + +} // namespace fc + +#define FC_THROW( X ) throw (X) + + +#endif // _FC_EXCEPTION_HPP_ diff --git a/include/fc/function.hpp b/include/fc/function.hpp new file mode 100644 index 0000000..da50863 --- /dev/null +++ b/include/fc/function.hpp @@ -0,0 +1,50 @@ +#ifndef _FC_FUNCTION_HPP_ +#define _FC_FUNCTION_HPP_ + +namespace fc { + + namespace detail { + template + void call( void* functor ) { + (*static_cast(functor*))(); + } + } + + class function { + public: + template + function( Functor&& f ) { + static_assert( sizeof(f) <= sizeof(store) ); + new ((void*)&store[0]) Functor( fc::forward(f) ); + call = &detail::call; + copy = &detail::copy; + move = &detail::move; + } + + function( const function& f ) + :call(f.call),move(f.move),copy(f.copy){ + copy( &f.store[0], &store[0] ); + } + + function( function&& f ) + :call(f.call),move(f.move),copy(f.copy){ + move( &f.store[0], &store[0] ); + } + + function& operator = ( function&& f ) { + + } + + void operator()()const { call(&store[0]); } + + private: + uint64_t store[8]; + void (*call)(void*); + void (*move)(void* src, void* dst); + void (*copy)(const void*, void* dst); + void (*destroy)(void*); + }; + +} + +#endif // _FC_FUNCTION_HPP_ diff --git a/include/fc/future.hpp b/include/fc/future.hpp new file mode 100644 index 0000000..900b702 --- /dev/null +++ b/include/fc/future.hpp @@ -0,0 +1,243 @@ +#ifndef _FC_FUTURE_HPP_ +#define _FC_FUTURE_HPP_ +#include +#include +#include +#include +#include +#include + +namespace fc { + class abstract_thread; + class void_t; + class priority; + class exception_ptr; + class thread; + + namespace detail { + class completion_handler { + public: + virtual ~completion_handler(){}; + virtual void on_complete( const void* v, const fc::exception_ptr& e ) = 0; + }; + + template + class completion_handler_impl : public completion_handler { + public: + completion_handler_impl( Functor&& f ):_func(fc::move(f)){} + completion_handler_impl( const Functor& f ):_func(f){} + + virtual void on_complete( const void* v, const fc::exception_ptr& e ) { + _func( *static_cast(v), e); + } + private: + Functor _func; + }; + template + class completion_handler_impl : public completion_handler { + public: + completion_handler_impl( Functor&& f ):_func(fc::move(f)){} + completion_handler_impl( const Functor& f ):_func(f){} + virtual void on_complete( const void* v, const fc::exception_ptr& e ) { + _func(e); + } + private: + Functor _func; + }; + } + + class promise_base : virtual public retainable { + public: + typedef shared_ptr ptr; + promise_base(const char* desc=""); + + const char* get_desc()const; + + void cancel(); + bool ready()const; + bool error()const; + + void set_exception( const fc::exception_ptr& e ); + + protected: + void _wait( const microseconds& timeout_us ); + void _wait_until( const time_point& timeout_us ); + void _enqueue_thread(); + void _notify(); + void _set_timeout(); + void _set_value(const void* v); + + void _on_complete( detail::completion_handler* c ); + private: + friend class thread; + friend struct context; + friend class thread_d; + + bool _ready; + mutable spin_yield_lock _spin_yield; + thread* _blocked_thread; + time_point _timeout; + fc::exception_ptr _except; + bool _canceled; + const char* _desc; + detail::completion_handler* _compl; + }; + + template + class promise : virtual public promise_base { + public: + typedef shared_ptr< promise > ptr; + promise( const char* desc = "" ):promise_base(desc){} + promise( const T& val ){ set_value(val); } + promise( T&& val ){ set_value(fc::move(val) ); } + ~promise(){} + + const T& wait(const microseconds& timeout = microseconds::max() ){ + this->_wait( timeout ); + return *result; + } + const T& wait_until(const time_point& tp ) { + this->_wait_until( tp ); + return *result; + } + + void set_value( const T& v ) { + result = v; + _set_value(&*result); + } + + void set_value( T&& v ) { + result = fc::move(v); + _set_value(&*result); + } + + template + void on_complete( CompletionHandler&& c ) { + _on_complete( new detail::completion_handler_impl(fc::forward(c)) ); + } + protected: + optional result; + }; + + template<> + class promise : public promise_base { + public: + typedef shared_ptr< promise > ptr; + promise( const char* desc = "" ):promise_base(desc){} + promise( const void_t& v ){ set_value(); } + + void wait(const microseconds& timeout = microseconds::max() ){ + this->_wait( timeout ); + } + void wait_until(const time_point& tp ) { + this->_wait_until( tp ); + } + + void set_value(){ this->_set_value(nullptr); } + void set_value( const void_t& v ) { this->_set_value(nullptr); } + + template + void on_complete( CompletionHandler&& c ) { + _on_complete( new detail::completion_handler_impl(fc::forward(c)) ); + } + }; + + /** + * @brief a placeholder for the result of an asynchronous operation. + * + * By calling future::wait() you will block the current fiber until + * the asynchronous operation completes. + * + * If you would like to use an asynchronous interface instead of the synchronous + * 'wait' method you could specify a CompletionHandler which is a method that takes + * two parameters, a const reference to the value and an exception_ptr. If the + * exception_ptr is set, the value reference is invalid and accessing it is + * 'undefined'. + * + * Promises have pointer semantics, futures have reference semantics that + * contain a shared pointer to a promise. + */ + template + class future { + public: + future( const shared_ptr>& p ):m_prom(p){} + future( shared_ptr>&& p ):m_prom(fc::move(p)){} + future(){} + + /// @pre valid() + /// @post ready() + /// @throws timeout + const T& wait( const microseconds& timeout = microseconds::max() ){ + return m_prom->wait(timeout); + } + + /// @pre valid() + /// @post ready() + /// @throws timeout + const T& wait_until( const time_point& tp ) { + return m_prom->wait_until(tp); + } + + bool valid()const { return !!m_prom; } + + /// @pre valid() + bool ready()const { return m_prom->ready(); } + + /// @pre valid() + bool error()const { return m_prom->error(); } + + /** + * @pre valid() + * + * The given completion handler will be called from some + * arbitrary thread and should not 'block'. Generally + * it should post an event or start a new async operation. + */ + template + void on_complete( CompletionHandler&& c ) { + m_prom->on_complete( fc::forward(c) ); + } + private: + shared_ptr> m_prom; + }; + + template<> + class future { + public: + future( const shared_ptr>& p ):m_prom(p){} + future( shared_ptr>&& p ):m_prom(fc::move(p)){} + future(){} + + /// @pre valid() + /// @post ready() + /// @throws timeout + void wait( const microseconds& timeout = microseconds::max() ){ + m_prom->wait(timeout); + } + + /// @pre valid() + /// @post ready() + /// @throws timeout + void wait_until( const time_point& tp ) { + m_prom->wait_until(tp); + } + + bool valid()const { return !!m_prom; } + + /// @pre valid() + bool ready()const { return m_prom->ready(); } + + /// @pre valid() + bool error()const { return m_prom->error(); } + + template + void on_complete( CompletionHandler&& c ) { + m_prom->on_complete( fc::forward(c) ); + } + + private: + shared_ptr> m_prom; + }; +} + +#endif // _FC_FUTURE_HPP_ diff --git a/include/fc/fwd.hpp b/include/fc/fwd.hpp new file mode 100644 index 0000000..c6d2911 --- /dev/null +++ b/include/fc/fwd.hpp @@ -0,0 +1,45 @@ +#ifndef FC_FWD_HPP_ +#define FC_FWD_HPP_ +#include + +namespace fc { + +/** + * @brief Used to forward declare value types. + * + */ +template +class fwd { + public: + template fwd( U&& u ); + fwd(); + + fwd( const fwd& f ); + fwd( fwd&& f ); + + operator const T&()const; + operator T&(); + + T& operator*(); + const T& operator*()const; + const T* operator->()const; + + T* operator->(); + bool operator !()const; + + template + T& operator = ( U&& u ); + + T& operator = ( fwd&& u ); + T& operator = ( const fwd& u ); + + ~fwd(); + + private: + aligned _store; +}; + + +} // namespace fc + +#endif diff --git a/include/fc/fwd_impl.hpp b/include/fc/fwd_impl.hpp new file mode 100644 index 0000000..f45d971 --- /dev/null +++ b/include/fc/fwd_impl.hpp @@ -0,0 +1,117 @@ +#ifndef _FC_FWD_IMPL_HPP_ +#define _FC_FWD_IMPL_HPP_ + +#include +#include +#include + +namespace fc { + + namespace detail { + template + struct add { + typedef decltype( *((A*)0) + *((typename fc::remove_reference::type*)0) ) type; + }; + template + struct add_eq { + typedef decltype( *((A*)0) += *((typename fc::remove_reference::type*)0) ) type; + }; + + template + struct sub { + typedef decltype( *((A*)0) - *((typename fc::remove_reference::type*)0) ) type; + }; + + template + struct sub_eq { + typedef decltype( *((A*)0) -= *((typename fc::remove_reference::type*)0) ) type; + }; + template + struct insert_op { + typedef decltype( *((A*)0) << *((typename fc::remove_reference::type*)0) ) type; + }; + template + struct extract_op { + A* a; + U* u; + typedef decltype( *a >> *u ) type; + }; + } + + + template + auto operator + ( const fwd& x, U&& u ) -> typename detail::add::type { return *x+fc::forward(u); } + + template + auto operator - ( const fwd& x, U&& u ) -> typename detail::sub::type { return *x-fc::forward(u); } + + template + auto operator << ( U& u, const fwd& f ) -> typename detail::insert_op::type { return u << *f; } + + template + auto operator >> ( U& u, fwd& f ) -> typename detail::extract_op::type { return u >> *f; } + + template + bool fwd::operator !()const { return !(**this); } + + + template + template + fwd::fwd( U&& u ) { + static_assert( sizeof(_store) >= sizeof(*this), "Failed to reserve enough space" ); + new (this) T( fc::forward(u) ); + } + template + fwd::fwd() { + static_assert( sizeof(_store) >= sizeof(*this), "Failed to reserve enough space" ); + new (this) T(); + } + template + fwd::fwd( const fwd& f ){ + new (this) T( *f ); + } + template + fwd::fwd( fwd&& f ){ + new (this) T( fc::move(*f) ); + } + + + + template + fwd::operator T&() { return *(( T*)this); } + template + fwd::operator const T&()const { return *((const T*)this); } + + template + T& fwd::operator*() { return *((T*)this); } + template + const T& fwd::operator*()const { return *((const T*)this); } + template + const T* fwd::operator->()const { return ((const T*)this); } + + template + T* fwd::operator->(){ return ((T*)this); } + + + template + fwd::~fwd() { + ((T*)this)->~T(); + } + template + template + T& fwd::operator = ( U&& u ) { + return **this = fc::forward(u); + } + + template + T& fwd::operator = ( fwd&& u ) { + return **this = fc::move(*u); + } + template + T& fwd::operator = ( const fwd& u ) { + return **this = *u; + } + +} // namespace fc + +#endif //_FC_FWD_IMPL_HPP_ diff --git a/include/fc/fwd_reflect.hpp b/include/fc/fwd_reflect.hpp new file mode 100644 index 0000000..62b5bfe --- /dev/null +++ b/include/fc/fwd_reflect.hpp @@ -0,0 +1,21 @@ +#ifndef _FC_FWD_REFLECT_HPP_ +#define _FC_FWD_REFLECT_HPP_ +#include +#include + +namespace fc { + template + class reflector> : public detail::reflector_impl, reflector> >{ + public: + virtual const char* name()const { return instance().name(); } + virtual void visit( void* s, const abstract_visitor& v )const { + instance().visit(s,v); + } + virtual void visit( const void* s, const abstract_const_visitor& v )const { + instance().visit(s,v); + } + + static reflector& instance() { return reflector::instance(); } + }; +} // namespace fc +#endif //_FC_FWD_REFLECT_HPP_ diff --git a/include/fc/invokeable.hpp b/include/fc/invokeable.hpp new file mode 100644 index 0000000..ee7d9e4 --- /dev/null +++ b/include/fc/invokeable.hpp @@ -0,0 +1,17 @@ +#ifndef _FC_INVOKEABLE_HPP_ +#define _FC_INVOKEABLE_HPP_ + +namespace fc { + + class invokeable { + public: + virtual ~invokeable(){}; + + virtual void invoke( const promise::ptr& prom, const string& name, size_t num_params, reflect::cref* params ); + + void invoke( const std::string& name ) { invoke( promise::ptr(), name, 0, 0 ); } + }; + +} + +#endif // _FC_INVOKEABLE_HPP_ diff --git a/include/fc/json.hpp b/include/fc/json.hpp new file mode 100644 index 0000000..072dc5b --- /dev/null +++ b/include/fc/json.hpp @@ -0,0 +1,28 @@ +#ifndef _FC_JSON_HPP_ +#define _FC_JSON_HPP_ +#include +#include +#include + +namespace fc { + class istream; + class ostream; + class cref; + + namespace json { + string to_string( const cref& o ); + value_fwd from_string( const string& s ); + value_fwd from_string( const char* s, const char* e ); + void from_string( const string&, const ref& o ); + + template + T from_string( const string& s ) { + T tmp; from_string( s, tmp ); + return tmp; + } + string escape_string( const string& ); + string unescape_string( const string& ); + void write( ostream& out, const cref& val ); +} } + +#endif diff --git a/include/fc/json_rpc_connection.hpp b/include/fc/json_rpc_connection.hpp new file mode 100644 index 0000000..a70f7ae --- /dev/null +++ b/include/fc/json_rpc_connection.hpp @@ -0,0 +1,71 @@ +#ifndef _JSON_RPC_CONNECTION_HPP_ +#define _JSON_RPC_CONNECTION_HPP_ +#include +#include +#include +#include + +namespace fc { namespace json { + namespace detail { + struct pending_result : virtual public promise_base { + typedef shared_ptr ptr; + virtual void handle_result( const fc::string& ) = 0; + void handle_error( const fc::string& ); + int64_t id; + pending_result::ptr next; + }; + template + struct pending_result_impl : virtual public promise, virtual public pending_result { + virtual void handle_result( const fc::string& s ) { + set_value( fc::json::from_string(s) ); + } + }; + template<> + struct pending_result_impl : virtual public promise, virtual public pending_result { + virtual void handle_result( const fc::string& ) { + set_value(); + } + }; + } + + /** + This class is designed to be used like this: + @code + class my_api { + + future function( const string& arg, int arg2 ) { + _con->invoke( "function", {&arg,&arg2} ); + } + private: + rpc_connection* _con; + }; + @endcode + + */ + class rpc_connection : virtual public retainable { + public: + rpc_connection(); + rpc_connection( istream& i, ostream& o ); + rpc_connection( rpc_connection&& c ); + ~rpc_connection(); + + rpc_connection& operator=(rpc_connection&& m); + + void init( istream& i, ostream& o ); + + template + future invoke( const fc::string& method, const cptr (¶ms)[N] ) { + auto r = new detail::pending_result_impl(); + invoke( detail::pending_result::ptr(r), method, N, params ); + return promise::ptr( r, true ); + } + + private: + void invoke( detail::pending_result::ptr&& p, const fc::string& m, + uint16_t nparam, const cptr* param ); + class rpc_connection_d* my; + }; +} } // fc::json + + +#endif // _JSON_RPC_CONNECTION_HPP_ diff --git a/include/fc/log.hpp b/include/fc/log.hpp new file mode 100644 index 0000000..ab3f6c6 --- /dev/null +++ b/include/fc/log.hpp @@ -0,0 +1,33 @@ +#ifndef _FC_LOG_HPP_ +#define _FC_LOG_HPP_ +#include + +namespace boost { class mutex; } + +namespace fc { + /** wrapper on printf */ + void log( const char* color, const char* file_name, size_t line_num, const char* method_name, const char* format, ... ); + + /** used to add extra fields to be printed (thread,fiber,time,etc) */ + void add_log_field( void (*f)() ); + void remove_log_field( void (*f)() ); + + boost::mutex& log_mutex(); +} + +#ifndef __func__ +#define __func__ __FUNCTION__ +#endif + +#ifndef WIN32 +#define COLOR_CONSOLE 1 +#endif +#include + +#define dlog(...) do { fc::log( CONSOLE_DEFAULT, __FILE__, __LINE__, __func__, __VA_ARGS__ ); }while(false) +#define slog(...) do { fc::log( CONSOLE_DEFAULT, __FILE__, __LINE__, __func__, __VA_ARGS__ ); }while(false) +#define wlog(...) do { fc::log( CONSOLE_BROWN, __FILE__, __LINE__, __func__, __VA_ARGS__ ); }while(false) +#define elog(...) do { fc::log( CONSOLE_RED, __FILE__, __LINE__, __func__, __VA_ARGS__ ); }while(false) + + +#endif // _FC_LOG_HPP_ diff --git a/include/fc/map.hpp b/include/fc/map.hpp new file mode 100644 index 0000000..1cdb745 --- /dev/null +++ b/include/fc/map.hpp @@ -0,0 +1,18 @@ +#ifndef _FC_MAP_HPP_ +namespace fc { + + namespace detail { + class map_impl { + public: + + + }; + } + template + class map : public map_impl { + + + }; + + +} diff --git a/include/fc/optional.hpp b/include/fc/optional.hpp new file mode 100644 index 0000000..bc2bc62 --- /dev/null +++ b/include/fc/optional.hpp @@ -0,0 +1,90 @@ +#ifndef _FC_OPTIONAL_HPP_ +#define _FC_OPTIONAL_HPP_ +#include + +namespace fc { + /** + * @brief provides stack-based nullable value similar to boost::optional + * + * Simply including boost::optional adds 35,000 lines to each object file, using + * fc::optional adds less than 400. + */ + template + class optional { + public: + optional():_valid(0){} + ~optional(){ if( _valid ) (**this).~T(); } + + optional( const optional& o ) + :_valid(false) { + if( o._valid ) new (&**this) T( *o ); + _valid = o._valid; + } + + optional( optional&& o ) + :_valid(false) { + if( o._valid ) new (&**this) T( fc::move(*o) ); + _valid = o._valid; + } + + template + optional( U&& u ) + :_valid(false) { + new (&**this) T( fc::forward(u) ); + _valid = true; + } + + template + optional& operator=( U&& u ) { + if( &u == &**this ) return *this; + if( !_valid ) { + new (&**this) T( fc::forward(u) ); + _valid = true; + } else { + **this = u; + } + return *this; + } + + optional& operator=( const optional& o ) { + if( _valid && o.valid ) { **this = *o; } + else if( !_valid && o._valid ) { + *this = **o; + } // else !_valid && !o._valid == same! + return *this; + } + + bool operator!()const { return !_valid; } + + T& operator*() { void* v = &_value[0]; return *static_cast(v); } + const T& operator*()const { const void* v = &_value[0]; return *static_cast(v); } + + T& operator->() { void* v = &_value[0]; return *static_cast(v); } + const T& operator->()const { const void* v = &_value[0]; return *static_cast(v); } + + private: + // force alignment... to 8 byte boundaries + double _value[(sizeof(T)+sizeof(double)-1)/sizeof(double)]; + bool _valid; + }; + + template + bool operator == ( const optional& left, const optional& right ) { + return (!left == !right) || (!!left && *left == *right); + } + template + bool operator == ( const optional& left, const U& u ) { + return !!left && *left == u; + } + template + bool operator != ( const optional& left, const optional& right ) { + return (!left != !right) || (!!left && *left != *right); + } + template + bool operator != ( const optional& left, const U& u ) { + return !left || *left != u; + } + +} // namespace fc + +#endif diff --git a/include/fc/priority.hpp b/include/fc/priority.hpp new file mode 100644 index 0000000..fa5df77 --- /dev/null +++ b/include/fc/priority.hpp @@ -0,0 +1,19 @@ +#ifndef _FC_PRIORITY_HPP_ +#define _FC_PRIORITY_HPP_ + +namespace fc { + /** + * An integer value used to sort asynchronous tasks. The higher the + * prioirty the sooner it will be run. + */ + class priority { + public: + explicit priority( int v = 0):value(v){} + priority( const priority& p ):value(p.value){} + bool operator < ( const priority& p )const { + return value < p.value; + } + int value; + }; +} +#endif // _FC_PRIORITY_HPP_ diff --git a/include/fc/reflect.hpp b/include/fc/reflect.hpp new file mode 100644 index 0000000..e9cdd63 --- /dev/null +++ b/include/fc/reflect.hpp @@ -0,0 +1,197 @@ +#ifndef _FC_REFLECT_HPP_ +#define _FC_REFLECT_HPP_ +#include +#include +#include +#include + +namespace fc { + + class string; + + class abstract_visitor; + class abstract_const_visitor; + class abstract_reflector; + + // provides reference semantics + class ref { + public: + template + ref( T& v ); + + ref( const ref& v ) + :_obj(v._obj),_reflector(v._reflector){} + + ref( void* o, abstract_reflector& r ) + :_obj(o),_reflector(r){} + + void* _obj; + abstract_reflector& _reflector; + + private: + ref& operator=(const ref& o); + }; + + class cref { + public: + template + cref( const T& v ); + + cref( const cref& v ) + :_obj(v._obj),_reflector(v._reflector){} + + cref( const ref& v ) + :_obj(v._obj),_reflector(v._reflector){} + + cref( const void* o, abstract_reflector& r ) + :_obj(o),_reflector(r){} + + const void* _obj; + abstract_reflector& _reflector; + + private: + cref& operator=(const cref& o); + }; + + + class abstract_reflector : virtual public abstract_value_type { + public: + virtual ~abstract_reflector(){} + virtual const char* name()const = 0; + virtual void visit( void* s, const abstract_visitor& v )const = 0; + virtual void visit( const void* s, const abstract_const_visitor& v )const = 0; + virtual ref get_member(void*, uint64_t) = 0; + virtual cref get_member(const void*, uint64_t) = 0; + virtual ref get_member(void*, const char*) = 0; + virtual cref get_member(const void*, const char*) = 0; + virtual size_t member_count(const void*) = 0; + + }; + + class abstract_visitor { + public: + virtual ~abstract_visitor(){} + virtual void visit()const=0; + virtual void visit( char& c )const=0; + virtual void visit( uint8_t& c )const=0; + virtual void visit( uint16_t& c )const=0; + virtual void visit( uint32_t& c )const=0; + virtual void visit( uint64_t& c )const=0; + virtual void visit( int8_t& c )const=0; + virtual void visit( int16_t& c )const=0; + virtual void visit( int32_t& c )const=0; + virtual void visit( int64_t& c )const=0; + virtual void visit( double& c )const=0; + virtual void visit( float& c )const=0; + virtual void visit( bool& c )const=0; + virtual void visit( fc::string& c )const=0; + virtual void visit( const char* member, int idx, int size, const ref& v)const=0; + virtual void visit( int idx, int size, const ref& v)const=0; + virtual void array_size( int size )const=0; + virtual void object_size( int size )const=0; + }; + + class abstract_const_visitor { + public: + virtual ~abstract_const_visitor(){} + virtual void visit()const=0; + virtual void visit( const char& c )const=0; + virtual void visit( const uint8_t& c )const=0; + virtual void visit( const uint16_t& c )const=0; + virtual void visit( const uint32_t& c )const=0; + virtual void visit( const uint64_t& c )const=0; + virtual void visit( const int8_t& c )const=0; + virtual void visit( const int16_t& c )const=0; + virtual void visit( const int32_t& c )const=0; + virtual void visit( const int64_t& c )const=0; + virtual void visit( const double& c )const=0; + virtual void visit( const float& c )const=0; + virtual void visit( const bool& c )const=0; + virtual void visit( const fc::string& c )const=0; + virtual void visit( const char* member, int idx, int size, const cref& v)const=0; + virtual void visit( int idx, int size, const cref& v)const=0; + virtual void array_size( int size )const=0; + virtual void object_size( int size )const=0; + }; + + namespace detail { + template + class reflector_impl : virtual public value_type, virtual public abstract_reflector { + virtual ref get_member(void*, uint64_t) { + int x = 0; + return x; + } + virtual cref get_member(const void*, uint64_t) { + int x = 0; + return x; + } + // throw if field is not found + virtual ref get_member(void*, const char*) { + int x = 0; + return x; + // init static hash map the first time it is called... + // lookup field in hash map, return ref + //return ref(); + } + // throw if field is not found + virtual cref get_member(const void*, const char*) { + int x = 0; + return x; + // init static hash map the first time it is called... + // lookup field in hash map, return ref + //return cref(); + } + // throw if field is not found + virtual size_t member_count(const void*) { + // init static hash map the first time it is called... + // lookup field in hash map, return ref + return 0; + } + }; + } + + + template + struct get_typename {}; + template<> struct get_typename { static const char* name() { return "int32_t"; } }; + template<> struct get_typename { static const char* name() { return "int64_t"; } }; + template<> struct get_typename { static const char* name() { return "int16_t"; } }; + template<> struct get_typename { static const char* name() { return "int8_t"; } }; + template<> struct get_typename { static const char* name() { return "uint32_t"; } }; + template<> struct get_typename { static const char* name() { return "uint64_t"; } }; + template<> struct get_typename { static const char* name() { return "uint16_t"; } }; + template<> struct get_typename { static const char* name() { return "uint8_t"; } }; + template<> struct get_typename { static const char* name() { return "double"; } }; + template<> struct get_typename { static const char* name() { return "float"; } }; + template<> struct get_typename { static const char* name() { return "bool"; } }; + template<> struct get_typename { static const char* name() { return "string"; } }; + + template + class reflector : public detail::reflector_impl >{ + public: + virtual const char* name()const { return get_typename::name(); } + virtual void visit( void* s, const abstract_visitor& v )const { + v.visit( *((T*)s) ); + } + virtual void visit( const void* s, const abstract_const_visitor& v )const { + v.visit( *((const T*)s) ); + } + + static reflector& instance() { static reflector inst; return inst; } + }; + + template reflector& reflect( const T& ) { return reflector::instance(); } + + template + ref::ref( T& v ) :_obj(&v),_reflector(reflector::instance()){} + + template + cref::cref( const T& v ) :_obj(&v),_reflector(reflector::instance()){} + + template + class reflector>; + +} // namespace fc + + +#endif // _REFLECT_HPP_ diff --git a/include/fc/reflect_cast.hpp b/include/fc/reflect_cast.hpp new file mode 100644 index 0000000..56c1e56 --- /dev/null +++ b/include/fc/reflect_cast.hpp @@ -0,0 +1,40 @@ +#ifndef _REFLECT_CAST_HPP_ +#define _REFLECT_CAST_HPP_ +#include + +namespace fc { + /** + * This is specialized for each type to implement a cast + * from a reflected value. + * + * By default the cast will only work for 'exact' matches of + * type. Use duck_cast for a more flexible field-by-field + * cast. + */ + template + class const_cast_visitor : public abstract_const_visitor { + public: + const_cast_visitor( T& s ):_s(s){} + + virtual void visit()=0; + virtual void visit( const char& c )const=0; + virtual void visit( const uint8_t& c )const=0; + virtual void visit( const uint16_t& c )const=0; + virtual void visit( const uint32_t& c )const=0; + virtual void visit( const uint64_t& c )const=0; + virtual void visit( const int8_t& c )const=0; + virtual void visit( const int16_t& c )const=0; + virtual void visit( const int32_t& c )const=0; + virtual void visit( const int64_t& c )const=0; + virtual void visit( const double& c )const=0; + virtual void visit( const float& c )const=0; + virtual void visit( const bool& c )const=0; + virtual void visit( const fc::string& c )const=0; + virtual void visit( const char* member, int idx, int size, const cref& v)const=0; + virtual void visit( int idx, int size, const cref& v)const=0; + }; + + + +} // namespace fc +#endif // _REFLECT_CAST_HPP_ diff --git a/include/fc/reflect_fwd.hpp b/include/fc/reflect_fwd.hpp new file mode 100644 index 0000000..b91149d --- /dev/null +++ b/include/fc/reflect_fwd.hpp @@ -0,0 +1,31 @@ +#ifndef _FC_REFLECT_FWD_HPP_ +#define _FC_REFLECT_FWD_HPP_ +/** + * @file reflect_fwd.hpp + * @brief forward declares types defined in reflect.hpp + * + * You should include this file in your headers to accelerate your + * compile times over including reflect.hpp + */ + +namespace fc { + class abstract_reflector; + template class reflector; + class abstract_visitor; + class abstract_const_visitor; + class ref; + class cref; +} + +#define FC_REFLECTABLE( TYPE ) \ +namespace fc{ \ + template<> class reflector : virtual public detail::reflector_impl > { \ + public:\ + virtual const char* name()const; \ + virtual void visit( void* s, const abstract_visitor& v )const; \ + virtual void visit( const void* s, const abstract_const_visitor& v )const; \ + static reflector& instance(); \ + };\ +} + +#endif// _FC_REFLECT_FWD_HPP_ diff --git a/include/fc/reflect_impl.hpp b/include/fc/reflect_impl.hpp new file mode 100644 index 0000000..1fd38fe --- /dev/null +++ b/include/fc/reflect_impl.hpp @@ -0,0 +1,32 @@ +#ifndef _FC_REFLECT_IMPL_HPP_ +#define _FC_REFLECT_IMPL_HPP_ +#include +#include +#include + +/** + * @file reflect_impl.hpp + * @brief defines the FC_REFLECT() macro. + * + * This header uses the boost preprocessor library and + * + */ + +#define FC_REFLECT_FIELD( r, data, i, elem ) \ + v.visit( BOOST_PP_STRINGIZE(elem), i, data, e-> elem ); + +#define FC_REFLECT( NAME, MEMBERS ) \ +namespace fc { \ + const char* reflector::name()const { return BOOST_PP_STRINGIZE(NAME); } \ + void reflector::visit( void* s, const abstract_visitor& v )const { \ + NAME* e = (NAME*)s; \ + BOOST_PP_SEQ_FOR_EACH_I( FC_REFLECT_FIELD, BOOST_PP_SEQ_SIZE(MEMBERS), MEMBERS ) \ + } \ + void reflector::visit( const void* s, const abstract_const_visitor& v )const { \ + const NAME* e = (const NAME*)s; \ + BOOST_PP_SEQ_FOR_EACH_I( FC_REFLECT_FIELD, BOOST_PP_SEQ_SIZE(MEMBERS), MEMBERS ) \ + } \ + reflector& reflector::instance() { static reflector inst; return inst; } \ +} + +#endif // _FC_REFLECT_IMPL_HPP_ diff --git a/include/fc/reflect_ptr.hpp b/include/fc/reflect_ptr.hpp new file mode 100644 index 0000000..31a3d11 --- /dev/null +++ b/include/fc/reflect_ptr.hpp @@ -0,0 +1,48 @@ +#ifndef _FC_REFLECT_PTR_HPP_ +#define _FC_REFLECT_PTR_HPP_ +#include + +namespace fc { + + struct ptr { + ptr():_obj(0),_reflector(0){} + + template + ptr( T* v ) + :_obj(v),_reflector(&reflect(*v)){} + + ptr( const ptr& v ) + :_obj(v._obj),_reflector(v._reflector){} + + ref operator*()const { return ref( _obj, *_reflector); } + + private: + friend struct cptr; + void* _obj; + abstract_reflector* _reflector; + }; + + // provides pointer semantics + struct cptr { + cptr():_obj(0),_reflector(0){} + template + + cptr( const T* v ) + :_obj(v),_reflector(&reflect(*v)){} + + cptr( const cptr& v ) + :_obj(v._obj),_reflector(v._reflector){} + + cptr( const ptr& v ) + :_obj(v._obj),_reflector(v._reflector){} + + cref operator*()const { return cref( _obj, *_reflector); } + + private: + const void* _obj; + abstract_reflector* _reflector; + }; + +} + +#endif // _FC_REFLECT_PTR_HPP diff --git a/include/fc/reflect_ref.hpp b/include/fc/reflect_ref.hpp new file mode 100644 index 0000000..e69de29 diff --git a/include/fc/reflect_tmp.hpp b/include/fc/reflect_tmp.hpp new file mode 100644 index 0000000..87d2c38 --- /dev/null +++ b/include/fc/reflect_tmp.hpp @@ -0,0 +1,125 @@ +struct s { + int a; + int b; +}; + +class visitor { + void operator()( const char*, void*, abstract_reflector& r ); +}; + +class abstract_reflector { + virtual void visit( void* s, visitor v ) = 0; +}; + + + +template +class reflector_impl : public abstract_reflector { + public: + virtual void visit( void* s, visitor v ) { + visit( *((S*)s), v ); + } + static Derived& instance() { + static Derived i; + return i; + } +}; + +template<> +class reflector : public reflector_impl< s, reflector > { + void visit( s&, visitor v ) { + v( "a", &s.a, reflector::instance() ) + } + const char* name() { return "s"; } +} + +class abstract_visitor { + // fundamental types called directly + virtual void operator()( double& d ); + virtual void operator()( float& d ); + virtual void operator()( int& d ); + + // objects call this operator for each named member.. + virtual void operator()( const char* member, int idx, void* d, abstract_reflector& v); + + // called for each item in a collection + virtual void operator()( int idx, void* d, abstract_reflector& v); +}; + +class json_visitor : public visitor{ + virtual void operator()( double& d ) { + + } + virtual void operator()( float& d ) { + + } + virtual void operator()( int& d ) { + } + virtual void operator()( void* d, abstract_reflector& v) { + to_json( d, v ); + } +}; + +namespace detail { + string to_json( const void*, abstract_reflector& r ) { + r.visit( v, to_json_visitor( v ) ); + } + void from_json( void*, abstract_reflector& r ); +} + +template +string to_json( const T& v) { + return detail::to_json( &v, reflect(v) ); +} + +struct param { + void* arg; + reflector* ref; +}; + +class invoker_impl { +}; + +class my_interface { + my_interface( invoker::ptr inv ); + + virtual int some_func( Arg a ) { + // this can go in cpp... + return inv->invoke( "some_func", a ); + } +}; + +/** + +*/ +class invoker { + /** + * If variadic templates are supported... use them here. + */ + template + future invoke( string name ) { + auto p = new promise(...) + invoke( p, name, 0 ); + return p; + } + + template + future invoke( const string& name, P1&& p ) { + auto p = new promise(...) + pair params[1]; + params[0].first = &p; + params[0].second = reflector::instance() ); + inv->invoke( p, name, 1, params ); + return p; + } + virtual void invoke( promise::ptr p, const string& s, int num_params = 0, param* params = NULL) = 0; + + /// up to max params... +}; + +class json_rpc_client : public invoker { + +} + + + diff --git a/include/fc/reflect_value.hpp b/include/fc/reflect_value.hpp new file mode 100644 index 0000000..41ec0ab --- /dev/null +++ b/include/fc/reflect_value.hpp @@ -0,0 +1,9 @@ +#ifndef _FC_REFLECT_VALUE_HPP_ +#define _FC_REFLECT_VALUE_HPP_ +#include +#include + +FC_REFLECTABLE( fc::value ) +FC_REFLECTABLE( fc::value::member ) + +#endif // _FC_REFLECT_VALUE_HPP_ diff --git a/include/fc/reflect_vector.hpp b/include/fc/reflect_vector.hpp new file mode 100644 index 0000000..8421b73 --- /dev/null +++ b/include/fc/reflect_vector.hpp @@ -0,0 +1,27 @@ +#ifndef _FC_REFLECT_VECTOR_HPP_ +#define _FC_REFLECT_VECTOR_HPP_ +#include +#include +namespace fc { + template + class reflector> : public detail::reflector_impl, reflector> >{ + public: + virtual const char* name()const { + static fc::string s = fc::string("vector<") + reflector::instance().name() + '>'; + return s.c_str(); + } + virtual void visit( void* s, const abstract_visitor& v )const { + vector& vec = *((vector*)s); + size_t si = vec.size(); + for( size_t i = 0; i < si; ++i ) v.visit( i, si, vec.at(i) ); + } + virtual void visit( const void* s, const abstract_const_visitor& v )const { + const vector& vec = *((const vector*)s); + size_t si = vec.size(); + v.array_size(si); + for( size_t i = 0; i < si; ++i ) v.visit( i, si, vec.at(i) ); + } + static reflector& instance() { static reflector inst; return inst; } + }; +} // namespace fc +#endif // _FC_REFLECT_VECTOR_HPP_ diff --git a/include/fc/server.hpp b/include/fc/server.hpp new file mode 100644 index 0000000..a2976c1 --- /dev/null +++ b/include/fc/server.hpp @@ -0,0 +1,27 @@ + + +class istream { + public: + template + istream( T& s ) { + + } + + istream& read( char* buf, uint64_t s ); + int64_t readsome( char* buf, uint64_t s ); + bool eof()const; + + private: + struct vtable { + void* (*read)(void*, char* buf, uint64_t s ); + int64_t (*readsome)(void*, char* buf, uint64_t s ); + bool (*eof)(void* ) + }; + + vtable& _vtable; + void* _stream; +}; + + + + diff --git a/include/fc/shared_ptr.hpp b/include/fc/shared_ptr.hpp new file mode 100644 index 0000000..f864682 --- /dev/null +++ b/include/fc/shared_ptr.hpp @@ -0,0 +1,75 @@ +#ifndef _FC_SHARED_PTR_HPP_ +#define _FC_SHARED_PTR_HPP_ +#include + +namespace fc { + + /** + * @brief used to create reference counted types. + * + * Must be a virtual base class that is initialized with the + * + */ + class retainable { + public: + retainable(); + void retain(); + void release(); + int32_t retain_count()const; + + protected: + virtual ~retainable(){}; + + private: + volatile int32_t _ref_count; + }; + + template + class shared_ptr { + public: + shared_ptr( T* t, bool inc = false ) + :_ptr(t) { if( inc ) t->retain(); } + + shared_ptr():_ptr(0){} + shared_ptr( const shared_ptr& p ) { + _ptr = p._ptr; + if( _ptr ) _ptr->retain(); + } + shared_ptr( shared_ptr&& p ) { + _ptr = p._ptr; + p._ptr = 0; + } + ~shared_ptr() { + if( _ptr ) _ptr->release(); + } + shared_ptr& reset( T* v = 0 ) { + if( v == _ptr ) return; + if( _ptr ) _ptr->release(); + _ptr = v; + if( _ptr ) _ptr->retain(); + } + + shared_ptr& operator=(const shared_ptr& p ) { + shared_ptr tmp(p); + fc::swap(tmp,*this); + return *this; + } + shared_ptr& operator=(shared_ptr&& p ) { + fc::swap(*this,p); + return *this; + } + T& operator* ()const { return *_ptr; } + T* operator-> ()const { return _ptr; } + + bool operator==( const shared_ptr& p )const { return get() == p.get(); } + bool operator<( const shared_ptr& p )const { return get() < p.get(); } + T * get() const { return _ptr; } + + bool operator!()const { return _ptr == 0; } + operator bool()const { return _ptr != 0; } + private: + T* _ptr; + }; +} + +#endif diff --git a/include/fc/spin_lock.hpp b/include/fc/spin_lock.hpp new file mode 100644 index 0000000..489e9e6 --- /dev/null +++ b/include/fc/spin_lock.hpp @@ -0,0 +1,41 @@ +#ifndef _FC_SPIN_LOCK_HPP_ +#define _FC_SPIN_LOCK_HPP_ + +namespace boost { + template class atomic; +} + +namespace fc { + class microseconds; + class time_point; + + /** + * @class spin_lock + * @brief modified spin-lock that yields on failure, but becomes a 'spin lock' + * if there are no other tasks to yield to. + * + * This kind of lock is lighter weight than a full mutex, but potentially slower + * than a staight spin_lock. + * + * This spin_lock does not block the current thread, but instead attempts to use + * an atomic operation to aquire the lock. If unsuccessful, then it yields to + * other tasks before trying again. If there are no other tasks then yield is + * a no-op and spin_lock becomes a spin-lock. + */ + class spin_lock { + public: + spin_lock(); + bool try_lock(); + bool try_lock_for( const microseconds& rel_time ); + bool try_lock_until( const time_point& abs_time ); + void lock(); + void unlock(); + + private: + enum lock_store {locked,unlocked}; + int _lock; + }; + +} // namespace fc + +#endif // _FC_SPIN_LOCK_HPP_ diff --git a/include/fc/spin_yield_lock.hpp b/include/fc/spin_yield_lock.hpp new file mode 100644 index 0000000..7d53709 --- /dev/null +++ b/include/fc/spin_yield_lock.hpp @@ -0,0 +1,41 @@ +#ifndef _FC_SPIN_YIELD_LOCK_HPP_ +#define _FC_SPIN_YIELD_LOCK_HPP_ + +namespace boost { + template class atomic; +} + +namespace fc { + class microseconds; + class time_point; + + /** + * @class spin_yield_lock + * @brief modified spin-lock that yields on failure, but becomes a 'spin lock' + * if there are no other tasks to yield to. + * + * This kind of lock is lighter weight than a full mutex, but potentially slower + * than a staight spin_lock. + * + * This spin_yield_lock does not block the current thread, but instead attempts to use + * an atomic operation to aquire the lock. If unsuccessful, then it yields to + * other tasks before trying again. If there are no other tasks then yield is + * a no-op and spin_yield_lock becomes a spin-lock. + */ + class spin_yield_lock { + public: + spin_yield_lock(); + bool try_lock(); + bool try_lock_for( const microseconds& rel_time ); + bool try_lock_until( const time_point& abs_time ); + void lock(); + void unlock(); + + private: + enum lock_store {locked,unlocked}; + int _lock; + }; + +} // namespace fc + +#endif // _FC_SPIN_YIELD_LOCK_HPP_ diff --git a/include/fc/stream.hpp b/include/fc/stream.hpp new file mode 100644 index 0000000..0b9955b --- /dev/null +++ b/include/fc/stream.hpp @@ -0,0 +1,167 @@ +#ifndef _FC_STREAM_HPP_ +#define _FC_STREAM_HPP_ +#include +#include + +namespace fc { + class string; + namespace detail { + template + struct has_close { + typedef char (&no_tag)[1]; + typedef char (&yes_tag)[2]; + + template struct has_close_helper{}; + + template + static no_tag has_member_helper(...); + + template + static yes_tag has_member_helper( has_close_helper* p); + + enum closed_value { value = sizeof(has_member_helper(0)) == sizeof(yes_tag) }; + }; + + template::value> + struct if_close { static void close( C& c ) { c.close(); } }; + + template + struct if_close { static void close( C& ) { } }; + + + + class abstract_istream { + public: + abstract_istream(); + virtual ~abstract_istream(); + size_t readsome( char* buf, size_t len ); + + virtual size_t readsome_impl( char* buf, size_t len ) = 0; + + //private: + // store a boost::iostreams device that will do + // the actual reading/writing for the stream operators + //void* _store[51]; + char _store[51*sizeof(void*)]; + }; + + template + class istream : public abstract_istream { + public: + istream( IStream& i ):_in(i){} + + virtual size_t readsome_impl( char* buf, size_t len ) { + return _in.readsome(buf,len); + } + + private: + IStream& _in; + }; + + class abstract_ostream { + public: + abstract_ostream(); + virtual ~abstract_ostream(); + size_t write( const char* buf, size_t len ); + void close(); + void flush(); + + virtual void close_impl() = 0; + virtual void flush_impl() = 0; + virtual size_t write_impl( const char* buf, size_t len ) = 0; +// private: + // store a boost::iostreams device that will do + // the actual reading/writing for the stream operators + void* _store[50]; + }; + + template + class ostream : public abstract_ostream { + public: + ostream( OStream& o ):_out(o){} + + virtual size_t write_impl( const char* buf, size_t len ) { + _out.write(buf,len); + return len; + } + virtual void close_impl() { if_close::close(_out); } + virtual void flush_impl() { _out.flush(); } + + private: + OStream& _out; + }; + + } + + class istream { + public: + template + istream( IStream& is ) { + static_assert( sizeof(detail::istream(is)) <= sizeof(_store), "Failed to reserve enough space"); + new ((void*)&_store[0]) detail::istream(is); + } + ~istream(); + + size_t readsome( char* buf, size_t len ); + + friend istream& operator>>( istream&, int64_t& ); + friend istream& operator>>( istream&, uint64_t& ); + friend istream& operator>>( istream&, int32_t& ); + friend istream& operator>>( istream&, uint32_t& ); + friend istream& operator>>( istream&, int16_t& ); + friend istream& operator>>( istream&, uint16_t& ); + friend istream& operator>>( istream&, int8_t& ); + friend istream& operator>>( istream&, uint8_t& ); + friend istream& operator>>( istream&, float& ); + friend istream& operator>>( istream&, double& ); + friend istream& operator>>( istream&, bool& ); + friend istream& operator>>( istream&, char& ); + friend istream& operator>>( istream&, fc::string& ); + + private: + istream( const istream& ); + istream& operator=(const istream& ); + void* _store[54]; + }; + + class ostream { + public: + template + ostream( OStream& os ) { + static_assert( sizeof(detail::ostream(os)) <= sizeof(_store), "Failed to reserve enough space"); + new ((void*)&_store[0]) detail::ostream(os); + } + + ~ostream(); + + size_t write( const char* buf, size_t len ); + void close(); + void flush(); + + friend ostream& operator<<( ostream&, int64_t ); + friend ostream& operator<<( ostream&, uint64_t ); + friend ostream& operator<<( ostream&, int32_t ); + friend ostream& operator<<( ostream&, uint32_t ); + friend ostream& operator<<( ostream&, int16_t ); + friend ostream& operator<<( ostream&, uint16_t ); + friend ostream& operator<<( ostream&, int8_t ); + friend ostream& operator<<( ostream&, uint8_t ); + friend ostream& operator<<( ostream&, float ); + friend ostream& operator<<( ostream&, double ); + friend ostream& operator<<( ostream&, bool ); + friend ostream& operator<<( ostream&, char ); + friend ostream& operator<<( ostream&, const char* ); + friend ostream& operator<<( ostream&, const fc::string& ); + + private: + ostream( const ostream& o ); + ostream& operator=(const ostream& o); + char _store[54*sizeof(void*)]; + }; + + extern ostream cout; + extern ostream cerr; + extern istream cin; +} + +#endif // _FC_STREAM_HPP_ diff --git a/include/fc/string.hpp b/include/fc/string.hpp new file mode 100644 index 0000000..e451995 --- /dev/null +++ b/include/fc/string.hpp @@ -0,0 +1,62 @@ +#ifndef _FC_STRING_HPP_ +#define _FC_STRING_HPP_ +#include + +namespace fc { + /** + * Including results in 4000 lines of code + * that must be included to build your header. This + * class hides all of those details while maintaining + * compatability with std::string. Using fc::string + * instead of std::string can accelerate compile times + * 10x. + */ + class string { + public: + typedef char* iterator; + typedef const char* const_iterator; + + string(); + string( const string& c ); + string( string&& c ); + string( const char* c ); + string( const_iterator b, const_iterator e ); + ~string(); + + iterator begin(); + iterator end(); + + const_iterator begin()const; + const_iterator end()const; + + char& operator[](uint64_t idx); + const char& operator[](uint64_t idx)const; + + string& operator =( const string& c ); + string& operator =( string&& c ); + + void reserve( uint64_t ); + uint64_t size()const; + + void resize( uint64_t s ); + void clear(); + + const char* c_str()const; + + bool operator == ( const char* s )const; + bool operator == ( const string& s )const; + bool operator != ( const string& s )const; + + string& operator+=( const string& s ); + string& operator+=( char c ); + + friend string operator + ( const string&, const string& ); + friend string operator + ( const string&, char c ); + + private: + void* my; + }; + +} // namespace FC + +#endif // _FC_STRING_HPP_ diff --git a/include/fc/task.hpp b/include/fc/task.hpp new file mode 100644 index 0000000..146867a --- /dev/null +++ b/include/fc/task.hpp @@ -0,0 +1,86 @@ +#ifndef _FC_TASK_HPP_ +#define _FC_TASK_HPP_ +#include +#include + +namespace fc { + struct context; + + class task_base : virtual public promise_base { + public: + ~task_base(); + void run(); + protected: + uint64_t _posted_num; + priority _prio; + time_point _when; + void _set_active_context(context*); + context* _active_context; + task_base* _next; + + task_base(void* func); + // opaque internal / private data used by + // thread/thread_private + friend class thread; + friend class thread_d; + char _spinlock_store[sizeof(void*)]; + + // avoid rtti info for every possible functor... + promise_base* _promise_impl; + void* _functor; + void (*_destroy_functor)(void*); + void (*_run_functor)(void*, void* ); + }; + + namespace detail { + template + struct functor_destructor { + static void destroy( void* v ) { ((T*)v)->~T(); } + }; + template + struct functor_run { + static void run( void* functor, void* prom ) { + ((promise*)prom)->set_value( (*((T*)functor))() ); + } + }; + template + struct void_functor_run { + static void run( void* functor, void* prom ) { + (*((T*)functor))(); + ((promise*)prom)->set_value(); + } + }; + } + + template + class task : virtual public task_base, virtual public promise { + public: + template + task( Functor&& f ):task_base(&_functor[0]) { + static_assert( sizeof(f) <= sizeof(_functor), "sizeof(Functor) is larger than FunctorSize" ); + new ((char*)&_functor[0]) Functor( fc::forward(f) ); + _destroy_functor = &detail::functor_destructor::destroy; + + _promise_impl = static_cast*>(this); + _run_functor = &detail::functor_run::run; + } + char _functor[FunctorSize]; + }; + template + class task : virtual public task_base, virtual public promise { + public: + template + task( Functor&& f ):task_base(&_functor[0]) { + static_assert( sizeof(f) <= sizeof(_functor), "sizeof(Functor) is larger than FunctorSize" ); + new ((char*)&_functor[0]) Functor( fc::forward(f) ); + _destroy_functor = &detail::functor_destructor::destroy; + + _promise_impl = static_cast*>(this); + _run_functor = &detail::void_functor_run::run; + } + char _functor[FunctorSize]; + }; + +} + +#endif // _FC_TASK_HPP_ diff --git a/include/fc/thread.hpp b/include/fc/thread.hpp new file mode 100644 index 0000000..e81e95a --- /dev/null +++ b/include/fc/thread.hpp @@ -0,0 +1,159 @@ +#ifndef _FC_THREAD_HPP_ +#define _FC_THREAD_HPP_ +#include + +namespace fc { + class string; + class time_point; + class microseconds; + + template class vector; // forward declare + + class thread { + public: + thread( const char* name = "" ); + thread( thread&& m ); + thread& operator=(thread&& t ); + + /** + * Returns the current thread. + */ + static thread& current(); + + + /** + * @brief returns the name given by @ref set_name() for this thread + */ + const string& name()const; + + /** + * @brief associates a name with this thread. + */ + void set_name( const string& n ); + + /** + * @brief print debug info about the state of every context / promise. + * + * This method is helpful to figure out where your program is 'hung' by listing + * every async operation (context) and what it is blocked on (future). + * + * @note debug info is more useful if you provide a description for your + * async tasks and promises. + */ + void debug( const fc::string& d ); + + + /** + * Calls function f in this thread and returns a future that can + * be used to wait on the result. + * + * @param f the operation to perform + * @param prio the priority relative to other tasks + */ + template + auto async( Functor&& f, const char* desc ="", priority prio = priority()) -> fc::future { + typedef decltype(f()) Result; + fc::task* tsk = new fc::task( fc::forward(f) ); + async_task(tsk,prio,desc); + return fc::future(fc::shared_ptr< fc::promise >(tsk,true) ); + } + + + /** + * Calls function f in this thread and returns a future that can + * be used to wait on the result. + * + * @param f the method to be called + * @param prio the priority of this method relative to others + * @param when determines when this call will happen, as soon as + * possible after when + */ + template + auto schedule( Functor&& f, const fc::time_point& when, + const char* desc = "", priority prio = priority()) -> fc::future { + typedef decltype(f()) Result; + fc::task* tsk = new fc::task( fc::forward(f) ); + async_task(tsk,prio,when,desc); + return fc::future(fc::shared_ptr< fc::promise >(tsk,true) ); + } + + /** + * This method will cancel all pending tasks causing them to throw cmt::error::thread_quit. + * + * If the current thread is not this thread, then the current thread will + * wait for this thread to exit. + * + * This is a blocking wait via boost::thread::join + * and other tasks in the current thread will not run while + * waiting for this thread to quit. + * + * @todo make quit non-blocking of the calling thread by eliminating the call to boost::thread::join + */ + void quit(); + + /** + * @return true unless quit() has been called. + */ + bool is_running()const; + + priority current_priority()const; + ~thread(); + + private: + thread( class thread_d* ); + friend class promise_base; + friend class thread_d; + friend void yield(); + friend void usleep(const microseconds&); + friend void sleep_until(const time_point&); + friend void exec(); + friend int wait_any( fc::vector&& v, const microseconds& ); + friend int wait_any_until( fc::vector&& v, const time_point& tp ); + void wait_until( promise_base::ptr && v, const time_point& tp ); + void notify( const promise_base::ptr& v ); + + void yield(bool reschedule=true); + void sleep_until( const time_point& t ); + void exec(); + int wait_any_until( fc::vector&& v, const time_point& ); + + void async_task( task_base* t, const priority& p, const char* desc ); + void async_task( task_base* t, const priority& p, const time_point& tp, const char* desc ); + class thread_d* my; + }; + + /** + * Yields to other ready tasks before returning. + */ + void yield(); + + /** + * Yields to other ready tasks for u microseconds. + */ + void usleep( const microseconds& u ); + + /** + * Yields until the specified time in the future. + */ + void sleep_until( const time_point& tp ); + + /** + * Enters the main loop processing tasks until quit() is called. + */ + void exec(); + + /** + * Wait until either f1 or f2 is ready. + * + * @return 0 if f1 is ready, 1 if f2 is ready or throw on error. + */ + int wait_any( fc::vector&& v, const microseconds& timeout_us = microseconds::max() ); + int wait_any_until( fc::vector&& v, const time_point& tp ); + + template + auto async( Functor&& f, const char* desc ="", priority prio = priority()) -> fc::future { + return fc::thread::current().async( fc::forward(f), desc, prio ); + } +} + +#endif diff --git a/include/fc/thread_d.hpp b/include/fc/thread_d.hpp new file mode 100644 index 0000000..d033468 --- /dev/null +++ b/include/fc/thread_d.hpp @@ -0,0 +1,391 @@ +#include +#include + +#include +#include "context.hpp" +#include +#include +#include +#include + +namespace fc { + class thread_d { + public: + thread_d(fc::thread& s) + :self(s), boost_thread(0), + task_in_queue(0), + done(false), + current(0), + pt_head(0), + ready_head(0), + ready_tail(0), + blocked(0) + { + static char cnt = 0; + name = fc::string("th_") + char('a'+cnt); + cnt++; + } + fc::thread& self; + boost::thread* boost_thread; + bc::stack_allocator stack_alloc; + boost::mutex task_ready_mutex; + boost::condition_variable task_ready; + + boost::atomic task_in_queue; + std::vector task_pqueue; + std::vector task_sch_queue; + std::vector sleep_pqueue; + std::vector free_list; + + bool done; + std::string name; + cmt::context* current; + + cmt::context* pt_head; + + cmt::context* ready_head; + cmt::context* ready_tail; + + cmt::context* blocked; + + time_point check_for_timeouts(); + + + void debug( const std::string& s ) { + boost::unique_lock lock(detail::log_mutex()); + + std::cerr<<"--------------------- "<cur_task ) std::cerr<<'('<cur_task->get_desc()<<')'; + std::cerr<<" ---------------------------\n"; + std::cerr<<" Ready\n"; + cmt::context* c = ready_head; + while( c ) { + std::cerr<<" "<cur_task ) std::cerr<<'('<cur_task->get_desc()<<')'; + cmt::context* p = c->caller_context; + while( p ) { + std::cerr<<" -> "<caller_context; + } + std::cerr<<"\n"; + c = c->next; + } + std::cerr<<" Blocked\n"; + c = blocked; + while( c ) { + std::cerr<<" ctx: "<< c; + if( c->cur_task ) std::cerr<<'('<cur_task->get_desc()<<')'; + std::cerr << " blocked on prom: "; + for( uint32_t i = 0; i < c->blocking_prom.size(); ++i ) { + std::cerr<blocking_prom[i].prom<<'('<blocking_prom[i].prom->get_desc()<<')'; + if( i + 1 < c->blocking_prom.size() ) { + std::cerr<<","; + } + } + + cmt::context* p = c->caller_context; + while( p ) { + std::cerr<<" -> "<caller_context; + } + std::cerr<<"\n"; + c = c->next_blocked; + } + std::cerr<<"-------------------------------------------------\n"; + } + + // insert at from of blocked linked list + inline void add_to_blocked( cmt::context* c ) { + c->next_blocked = blocked; + blocked = c; + } + + void pt_push_back(cmt::context* c) { + c->next = pt_head; + pt_head = c; + /* + cmt::context* n = pt_head; + int i = 0; + while( n ) { + ++i; + n = n->next; + } + wlog( "idle context...%2% %1%", c, i ); + */ + } + cmt::context::ptr ready_pop_front() { + cmt::context::ptr tmp = 0; + if( ready_head ) { + tmp = ready_head; + ready_head = tmp->next; + if( !ready_head ) + ready_tail = 0; + tmp->next = 0; + } + return tmp; + } + void ready_push_front( const cmt::context::ptr& c ) { + c->next = ready_head; + ready_head = c; + if( !ready_tail ) + ready_tail = c; + } + void ready_push_back( const cmt::context::ptr& c ) { + c->next = 0; + if( ready_tail ) { + ready_tail->next = c; + } else { + ready_head = c; + } + ready_tail = c; + } + struct task_priority_less { + bool operator()( const task::ptr& a, const task::ptr& b ) { + return a->prio.value < b->prio.value ? true : (a->prio.value > b->prio.value ? false : a->posted_num > b->posted_num ); + } + }; + struct task_when_less { + bool operator()( const task::ptr& a, const task::ptr& b ) { + return a->when < b->when; + } + }; + + void enqueue( const task::ptr& t ) { + time_point now = system_clock::now(); + task::ptr cur = t; + while( cur ) { + if( cur->when > now ) { + task_sch_queue.push_back(cur); + std::push_heap( task_sch_queue.begin(), + task_sch_queue.end(), task_when_less() ); + } else { + task_pqueue.push_back(cur); + BOOST_ASSERT( this == thread::current().my ); + std::push_heap( task_pqueue.begin(), + task_pqueue.end(), task_priority_less() ); + } + cur = cur->next; + } + } + task* dequeue() { + // get a new task + BOOST_ASSERT( this == thread::current().my ); + + task* pending = 0; + + pending = task_in_queue.exchange(0,boost::memory_order_consume); + if( pending ) { enqueue( pending ); } + + task::ptr p(0); + if( task_sch_queue.size() ) { + if( task_sch_queue.front()->when <= system_clock::now() ) { + p = task_sch_queue.front(); + std::pop_heap(task_sch_queue.begin(), task_sch_queue.end(), task_when_less() ); + task_sch_queue.pop_back(); + return p; + } + } + if( task_pqueue.size() ) { + p = task_pqueue.front(); + std::pop_heap(task_pqueue.begin(), task_pqueue.end(), task_priority_less() ); + task_pqueue.pop_back(); + } + return p; + } + + /** + * This should be before or after a context switch to + * detect quit/cancel operations and throw an exception. + */ + void check_fiber_exceptions() { + if( current && current->canceled ) { + BOOST_THROW_EXCEPTION( error::task_canceled() ); + } else if( done ) { + BOOST_THROW_EXCEPTION( error::thread_quit() ); + } + } + + /** + * Find the next available context and switch to it. + * If none are available then create a new context and + * have it wait for something to do. + */ + bool start_next_fiber( bool reschedule = false ) { + check_for_timeouts(); + if( !current ) current = new cmt::context( &fc::thread::current() ); + + // check to see if any other contexts are ready + if( ready_head ) { + cmt::context* next = ready_pop_front(); + BOOST_ASSERT( next != current ); + if( reschedule ) ready_push_back(current); + + // jump to next context, saving current context + cmt::context* prev = current; + current = next; + bc::jump_fcontext( &prev->my_context, &next->my_context, 0 ); + current = prev; + BOOST_ASSERT( current ); + } else { // all contexts are blocked, create a new context + // that will process posted tasks... + if( reschedule ) ready_push_back(current); + + cmt::context* next; + if( pt_head ) { + next = pt_head; + pt_head = pt_head->next; + next->next = 0; + } else { + next = new cmt::context( &thread_d::start_process_tasks, stack_alloc, + &fc::thread::current() ); + } + cmt::context* prev = current; + current = next; + bc::jump_fcontext( &prev->my_context, &next->my_context, (intptr_t)this ); + current = prev; + BOOST_ASSERT( current ); + } + + if( current->canceled ) + BOOST_THROW_EXCEPTION( cmt::error::task_canceled() ); + + return true; + } + + static void start_process_tasks( intptr_t my ) { + thread_d* self = (thread_d*)my; + try { + self->process_tasks(); + } catch ( ... ) { + std::cerr<<"fiber exited with uncaught exception:\n "<< + boost::current_exception_diagnostic_information() <free_list.push_back(self->current); + self->start_next_fiber( false ); + } + + bool run_next_task() { + time_point timeout_time = check_for_timeouts(); + task* next = dequeue(); + if( next ) { + next->set_active_context( current ); + current->cur_task = next; + next->run(); + current->cur_task = 0; + next->set_active_context(0); + delete next; + return true; + } + return false; + } + bool has_next_task() { + if( task_pqueue.size() || + (task_sch_queue.size() && task_sch_queue.front()->when <= system_clock::now()) || + task_in_queue.load( boost::memory_order_relaxed ) ) + return true; + return false; + } + void clear_free_list() { + for( uint32_t i = 0; i < free_list.size(); ++i ) { + delete free_list[i]; + } + free_list.clear(); + } + void process_tasks() { + while( !done || blocked ) { + if( run_next_task() ) continue; + + // if I have something else to do other than + // process tasks... do it. + if( ready_head ) { + pt_push_back( current ); + start_next_fiber(false); + continue; + } + + clear_free_list(); + + { // lock scope + boost::unique_lock lock(task_ready_mutex); + if( has_next_task() ) continue; + time_point timeout_time = check_for_timeouts(); + + if( timeout_time == time_point::max() ) { + task_ready.wait( lock ); + } else if( timeout_time != time_point::min() ) { + task_ready.wait_until( lock, timeout_time ); + } + } + } + } + + void yield_until( const time_point& tp, bool reschedule ) { + check_fiber_exceptions(); + + if( tp <= system_clock::now() ) + return; + + if( !current ) { + current = new cmt::context(&cmt::thread::current()); + } + + current->resume_time = tp; + current->clear_blocking_promises(); + + sleep_pqueue.push_back(current); + std::push_heap( sleep_pqueue.begin(), + sleep_pqueue.end(), sleep_priority_less() ); + + start_next_fiber(reschedule); + + // clear current context from sleep queue... + for( uint32_t i = 0; i < sleep_pqueue.size(); ++i ) { + if( sleep_pqueue[i] == current ) { + sleep_pqueue[i] = sleep_pqueue.back(); + sleep_pqueue.pop_back(); + std::make_heap( sleep_pqueue.begin(), + sleep_pqueue.end(), sleep_priority_less() ); + break; + } + } + + current->resume_time = time_point::max(); + check_fiber_exceptions(); + } + + void wait( const promise_base::ptr& p, const time_point& timeout ) { + if( p->ready() ) return; + if( timeout < system_clock::now() ) + BOOST_THROW_EXCEPTION( cmt::error::future_wait_timeout() ); + + if( !current ) { + current = new cmt::context(&cmt::thread::current()); + } + + //slog( " %1% blocking on %2%", current, p.get() ); + current->add_blocking_promise(p.get(),true); + + // if not max timeout, added to sleep pqueue + if( timeout != time_point::max() ) { + current->resume_time = timeout; + sleep_pqueue.push_back(current); + std::push_heap( sleep_pqueue.begin(), + sleep_pqueue.end(), + sleep_priority_less() ); + } + + // elog( "blocking %1%", current ); + add_to_blocked( current ); + // debug("swtiching fibers..." ); + + + start_next_fiber(); + // slog( "resuming %1%", current ); + + //slog( " %1% unblocking blocking on %2%", current, p.get() ); + current->remove_blocking_promise(p.get()); + + check_fiber_exceptions(); + } + }; +} // namespace fc diff --git a/include/fc/time.hpp b/include/fc/time.hpp new file mode 100644 index 0000000..70690d2 --- /dev/null +++ b/include/fc/time.hpp @@ -0,0 +1,37 @@ +#ifndef _FC_TIME_HPP_ +#define _FC_TIME_HPP_ +#include + +namespace fc { + class microseconds { + public: + explicit microseconds( int64_t c = 0) :_count(c){} + static microseconds max() { return microseconds(0x7fffffffffffffffll); } + friend microseconds operator + (const microseconds& l, const microseconds& r ) { return microseconds(l._count+r._count); } + + bool operator==(const microseconds& c)const { return _count == c._count; } + int64_t count()const { return _count; } + private: + friend class time_point; + int64_t _count; + }; + + class time_point { + public: + explicit time_point( microseconds e = microseconds() ) :elapsed(e){} + static time_point now(); + static time_point max() { return time_point( microseconds::max() ); } + static time_point min() { return time_point(); } + const microseconds& time_since_epoch()const { return elapsed; } + bool operator > ( const time_point& t )const { return elapsed._count > t.elapsed._count; } + bool operator < ( const time_point& t )const { return elapsed._count < t.elapsed._count; } + bool operator <=( const time_point& t )const { return elapsed._count <=t.elapsed._count; } + bool operator ==( const time_point& t )const { return elapsed._count ==t.elapsed._count; } + bool operator !=( const time_point& t )const { return elapsed._count !=t.elapsed._count; } + friend time_point operator + ( const time_point& t, const microseconds& m ) { return time_point(t.elapsed+m); } + friend microseconds operator - ( const time_point& t, const time_point& m ) { return microseconds(t.elapsed.count() - m.elapsed.count()); } + private: + microseconds elapsed; + }; +} +#endif // _FC_TIME_HPP_ diff --git a/include/fc/unique_lock.hpp b/include/fc/unique_lock.hpp new file mode 100644 index 0000000..07e5984 --- /dev/null +++ b/include/fc/unique_lock.hpp @@ -0,0 +1,35 @@ +#ifndef _FC_UNIQUE_LOCK_HPP_ +#define _FC_UNIQUE_LOCK_HPP_ + +namespace fc { + + /** + * Including Boost's unique lock drastically increases compile times + * for something that is this trivial! + */ + template + class unique_lock { + public: + unique_lock( T& l ):_lock(l) { _lock.lock(); } + ~unique_lock() { _lock.unlock(); } + private: + unique_lock( const unique_lock& ); + unique_lock& operator=( const unique_lock& ); + T& _lock; + }; + +} + +/** + * Emulate java with the one quirk that the open bracket must come before + * the synchronized 'keyword'. + * + * + { synchronized( lock_type ) + + } + * + */ +#define synchronized(X) fc::unique_lock __lock(((X))); + +#endif diff --git a/include/fc/utility.hpp b/include/fc/utility.hpp new file mode 100644 index 0000000..d556805 --- /dev/null +++ b/include/fc/utility.hpp @@ -0,0 +1,39 @@ +#ifndef _FC_UTILITY_HPP_ +#define _FC_UTILITY_HPP_ +#include +#include + +typedef decltype(sizeof(int)) size_t; +namespace std { + typedef decltype(sizeof(int)) size_t; +} + +namespace fc { + template struct remove_reference { typedef T type; }; + template struct remove_reference { typedef T type; }; + template struct remove_reference { typedef const T type; }; + template struct remove_reference { typedef T type; }; + + template + typename fc::remove_reference::type&& move( T&& t ) { return static_cast::type&&>(t); } + + template + inline T&& forward( U&& u ) { return static_cast(u); } + + namespace detail { + template char is_class_helper(void(T::*)()); + template double is_class_helper(...); + } + template + struct is_class { + enum { value = sizeof(char) == sizeof(detail::is_class_helper(0)) }; + }; + + template + void swap( T& a, T& b ) { + T tmp = fc::move(a); + a = fc::move(b); + b = fc::move(tmp); + } +} +#endif // _FC_UTILITY_HPP_ diff --git a/include/fc/value.hpp b/include/fc/value.hpp new file mode 100644 index 0000000..ca777a6 --- /dev/null +++ b/include/fc/value.hpp @@ -0,0 +1,141 @@ +#ifndef _FC_VALUE_HPP_ +#define _FC_VALUE_HPP_ +#include +#include +#include +#include +#include + +namespace fc { + class string; + + /** + * @brief dynamic type that will store any reflected type. + * + * A struct can be stored directly or 'exploded' to be stored + * as individual elements. Direct storage is more effecient (no + * need to allocate/manage keys), but does not support adding / removing + * keys. + * + */ + class value { + public: + struct member { + member(); + member(const char* key); + member(string&& key ); + + const string& key()const; + value& val(); + const value& val()const; + + private: + friend class value; + friend class reflector; + fwd _key; + fwd _val; + }; + typedef member* iterator; + typedef const member* const_iterator; + + value(); + + template + explicit value( T&& t ):_obj(nullptr),_obj_type(nullptr) { + *this = cref(fc::forward(t)); + } + + value( value&& v ); + value( const value& v ); + value( const cref& v ); + ~value(); + + value& operator=( value&& v ); + value& operator=( const value& v ); + value& operator=( const cref& v ); + + template + value& operator=( T&& t ) { + value temp(fc::forward(t)); + swap(temp,*this); + return *this; + } + + template + value& push_back( T&& v ) { return push_back( value( forward(v) ) ); } + value& push_back( value&& v ); + value& push_back( const value& v ); + + /** + * These methods will create the key if it + * does not exist. + * @{ + */ + /** + * @pre value is null or an object + */ + value& operator[]( const string& key ); + /** + * @pre value is null or an object + */ + value& operator[]( const char* key ); + /** + * @pre value is null or an array or index is 0 + */ + value& operator[]( uint64_t index ); + value& operator[]( int index ); + /** @} */ + + value& operator[]( string&& key ); + + const value& operator[]( const string& key )const; + const value& operator[]( const char* key )const; + const value& operator[]( uint64_t )const; + + bool key_exists( const string& key ); + bool key_exists( const char* key ); + bool is_array()const; + bool is_object()const; + bool is_null()const; + bool is_string()const; + bool is_real()const; + bool is_float()const; + bool is_double()const; + bool is_integer()const; + bool is_int64()const; + bool is_int32()const; + bool is_int16()const; + bool is_int8()const; + bool is_boolean()const; + + template + bool is()const { + return _obj_type == reflector::instance(); + } + + fwd,8> get_keys()const; + + iterator find( const char* key ); + const_iterator find( const char* key )const; + iterator begin(); + const_iterator begin()const; + const_iterator end()const; + + void* ptr(); + const void* ptr()const; + abstract_reflector* type()const; + private: + template friend const T& value_cast( const value& v ); + template friend T& value_cast( value& v ); + template friend T* value_cast( value* v ); + template friend const T* value_cast( const value* v ); + template friend T reinterpret_value_cast( const value& v ); + + void* _obj; + abstract_reflector* _obj_type; + }; + +}; + + +#endif // _MACE_VALUE_HPP_ diff --git a/include/fc/value_cast.hpp b/include/fc/value_cast.hpp new file mode 100644 index 0000000..f348989 --- /dev/null +++ b/include/fc/value_cast.hpp @@ -0,0 +1,100 @@ +#ifndef _FC_VALUE_CAST_HPP_ +#define _FC_VALUE_CAST_HPP_ +#include +#include +#include +#include + +namespace fc { + + template + const T& value_cast( const value& v ) { + if( &reflector::instance() == v._obj_type ) { + if( v._obj_type->size_of() <= 8 ) { + slog( "stack..." ); + return *((const T*)&v._obj); + } + slog( "heap..." ); + return *((const T*)v._obj); + } + FC_THROW( bad_cast() ); + } + template + T& value_cast( value& v ) { + if( &reflector::instance() == v._obj_type ) { + if( v._obj_type->size_of() <= 8 ) { + slog( "stack..." ); + return *((T*)&v._obj); + } + slog( "heap..." ); + return *((T*)v._obj); + } + FC_THROW( bad_cast() ); + } + + template + T* value_cast( value* v ) { + } + + template + const T* value_cast( const value* v ) { + } + + + template class reinterpret_value_visitor; + + #define CAST_VISITOR_DECL(X) \ + template<> class reinterpret_value_visitor : public abstract_const_visitor { \ + private: X& _s; \ + public: \ + reinterpret_value_visitor( X& s ):_s(s){} \ + virtual void visit()const; \ + virtual void visit( const char& c )const; \ + virtual void visit( const uint8_t& c )const; \ + virtual void visit( const uint16_t& c )const; \ + virtual void visit( const uint32_t& c )const; \ + virtual void visit( const uint64_t& c )const; \ + virtual void visit( const int8_t& c )const; \ + virtual void visit( const int16_t& c )const; \ + virtual void visit( const int32_t& c )const; \ + virtual void visit( const int64_t& c )const; \ + virtual void visit( const double& c )const; \ + virtual void visit( const float& c )const; \ + virtual void visit( const bool& c )const; \ + virtual void visit( const string& c )const; \ + virtual void visit( const char* member, int idx, int size, const cref& v)const;\ + virtual void visit( int idx, int size, const cref& v)const; \ + virtual void array_size( int size )const{} \ + virtual void object_size( int size )const{} \ + } + + CAST_VISITOR_DECL(int64_t); + CAST_VISITOR_DECL(int32_t); + CAST_VISITOR_DECL(int16_t); + CAST_VISITOR_DECL(int8_t); + CAST_VISITOR_DECL(uint64_t); + CAST_VISITOR_DECL(uint32_t); + CAST_VISITOR_DECL(uint16_t); + CAST_VISITOR_DECL(uint8_t); + CAST_VISITOR_DECL(double); + CAST_VISITOR_DECL(float); + CAST_VISITOR_DECL(bool); + CAST_VISITOR_DECL(string); + + + template + T reinterpret_value_cast( const value& v ) { + if( v.is_null() ) FC_THROW( bad_cast() ); + T r; + reinterpret_value_visitor vis(r); + if( v._obj_type->size_of() > sizeof(v._obj) ) + v._obj_type->visit( v._obj, vis ); + else + v._obj_type->visit( &v._obj, vis ); + return r; + } + +} + + +#endif // _FC_VALUE_CAST_HPP_ diff --git a/include/fc/value_fwd.hpp b/include/fc/value_fwd.hpp new file mode 100644 index 0000000..10cc607 --- /dev/null +++ b/include/fc/value_fwd.hpp @@ -0,0 +1,10 @@ +#ifndef _FC_VALUE_FWD_HPP_ +#define _FC_VALUE_FWD_HPP_ +#include + +namespace fc { + class value; + typedef fwd value_fwd; +} + +#endif // _MACE_VALUE_FWD_HPP_ diff --git a/include/fc/vector.hpp b/include/fc/vector.hpp new file mode 100644 index 0000000..03981d9 --- /dev/null +++ b/include/fc/vector.hpp @@ -0,0 +1,348 @@ +#ifndef _FC_VECTOR_HPP_ +#define _FC_VECTOR_HPP_ +#include +#include +#include +#include +#include + +namespace fc { + namespace detail { + template + struct data { + uint64_t size; + uint64_t capacity; + T first; + + static data* allocate( uint64_t cap ) { + data* d = nullptr; + if( cap ){ + d = (data*)malloc(sizeof(data) + sizeof(T)*(cap-1)); + d->capacity = cap; + } else { + d = (data*)malloc(sizeof(data)); + d->capacity = 1; + } + d->size = 0; + return d; + } + static data* reallocate( data* d, uint64_t cap ) { + if( cap ){ + d = (data*)realloc(d,sizeof(data) + sizeof(T)*(cap-1)); + d->capacity = cap; + } else { + d = (data*)realloc(d,sizeof(data)); + d->capacity = 1; + } + if( d->size > d->capacity ) + d->size = d->capacity; + return d; + } + private: + data(){}; + }; + + template + struct vector_impl { + public: + vector_impl():_data(nullptr){} + vector_impl( vector_impl&& c):_data(c._data){c._data =nullptr; } + vector_impl( const vector_impl& c):_data(nullptr) { + if( c.size() ) { + _data = data::allocate( c.size() ); + memcpy(begin(),c.begin(),c.size() ); + } + } + vector_impl(uint64_t s):_data(nullptr){ + resize(s); + } + ~vector_impl() { + clear(); + } + typedef T* iterator; + typedef const T* const_iterator; + + + uint64_t size()const { return _data ? _data->size : 0; } + uint64_t capacity()const { return _data ? _data->capacity : 0; } + + T& back() { return (&_data->first)[-1+_data->size]; } + const T& back()const { return (&_data->first)[-1+_data->size]; } + T& front() { return (&_data->first)[0]; } + const T& front()const { return (&_data->first)[0]; } + + iterator begin() { return _data ? &front() : 0;} + const_iterator begin()const { return _data ? &front() : 0;} + const_iterator end()const { return _data ? (&back())+1: 0;} + + T& operator[]( uint64_t i ) { return (&_data->first)[i]; } + const T& operator[]( uint64_t i )const { return (&_data->first)[i]; } + + T& at( uint64_t i ) { return (&_data->first)[i]; } + const T& at( uint64_t i )const { return (&_data->first)[i]; } + + void pop_back() { erase( &back() ); } + + void clear() { + if( _data != nullptr ) + free(_data); + _data = nullptr; + } + + void reserve( uint64_t i ) { + _data = data::reallocate( _data, i ); + } + + void resize( uint64_t i ) { + if( capacity() < i ) + _data = data::reallocate( _data, i ); + _data->size = i; + } + + template + void push_back( U&& v ) { + resize( size()+1 ); + back() = fc::forward(v); + } + + template + iterator insert( const_iterator loc, U&& t ) { + uint64_t pos = loc - begin(); + resize( size()+1 ); + char* src = &at(pos); + if( src != &back() ) + memmove( src+1, src, (&back() - src) ); + &back = fc::forward(t); + return &at(pos); + } + + iterator insert( iterator pos, const_iterator first, const_iterator last ) { + if( first >= last ) return pos; + + uint64_t loc = pos - begin(); + uint64_t right_size = size() - loc; + resize( size() + (last-first) ); + char* src = &at(loc); + uint64_t s = last-first; + memmove( src + s, src, right_size ); + memcpy( src, first, s ); + _data->size += (last-first); + return src; + } + + iterator erase( iterator pos ) { + memmove( pos, pos+1, (&back() - pos) ); + _data->size--; + return pos; + } + + iterator erase( iterator first, iterator last ) { + if( first != last ) { + memmove( first, first + (last-first), (&back() - last) ); + _data->size -= last-first; + } + return first; + } + + vector_impl& operator=( vector_impl&& v ) { + fc::swap(_data,v._data); + return *this; + } + vector_impl& operator=( const vector_impl& v ) { + vector_impl tmp(v); + fc::swap(tmp._data,_data); + return *this; + } + protected: + data* _data; + }; + + template + class vector_impl { + public: + vector_impl():_data(nullptr){} + vector_impl( vector_impl&& c):_data(c._data){c._data =nullptr; } + vector_impl( const vector_impl& c):_data(nullptr) { + if( c.size() ) { + _data = data::allocate( c.size() ); + auto i = begin(); + auto ci = c.begin(); + auto ce = c.end(); + while( ci != ce ) { + new (i) T(*ci); + ++i; + ++_data->size; + ++ci; + } + } + } + + vector_impl(uint64_t s):_data(nullptr){ + resize(s); + } + ~vector_impl() { + clear(); + } + typedef T* iterator; + typedef const T* const_iterator; + + + uint64_t size()const { return _data ? _data->size : 0; } + uint64_t capacity()const { return _data ? _data->capacity : 0; } + + T& back() { return (&_data->first)[-1+_data->size]; } + const T& back()const { return (&_data->first)[-1+_data->size]; } + T& front() { return (&_data->first)[0]; } + const T& front()const { return (&_data->first)[0]; } + + iterator begin() { return _data ? &front() : 0;} + const_iterator begin()const { return _data ? &front() : 0;} + const_iterator end()const { return _data ? (&back())+1: 0;} + + T& operator[]( uint64_t i ) { return (&_data->first)[i]; } + const T& operator[]( uint64_t i )const { return (&_data->first)[i]; } + + T& at( uint64_t i ) { return (&_data->first)[i]; } + const T& at( uint64_t i )const { return (&_data->first)[i]; } + + void pop_back() { erase( &back() ); } + + + void clear() { + if( this->_data != nullptr ) { + auto c = this->begin(); + auto e = this->end(); + while( c != e ) { + (*c).~T(); + ++c; + } + free(this->_data); + } + this->_data = nullptr; + } + + void reserve( uint64_t i ) { + if( nullptr != this->_data && i <= this->_data->capacity ) + return; + + auto _ndata = data::allocate( i ); + auto nc = &_ndata->first; + auto c = this->begin(); + auto e = this->end(); + while( c != e ) { + new (nc) T(fc::move( *c )); + (*c).~T(); + ++_ndata->size; + ++c; + ++nc; + } + fc::swap( _ndata, this->_data ); + free(_ndata); + } + + void resize( uint64_t i ) { + this->reserve(i); + while( i < this->_data->size ) { + this->back().~T(); + --this->_data->size; + } + while( this->_data->size < i ) { + new (&this->back()+1) T(); + ++this->_data->size; + } + } + + template + void push_back( U&& v ) { + this->reserve( this->size()+1 ); + new (&back()+1) T(fc::forward(v)); + ++this->_data->size; + } + + template + iterator insert( const_iterator loc, U&& t ) { + uint64_t pos = loc - this->begin(); + this->reserve( this->size()+1 ); + loc = this->begin() + pos; + if( this->size() != 0 ) { + new (this->end()) T( fc::move(this->back()) ); + auto cur = this->back(); + ++this->_data->size; + while( cur != loc ) { + *cur = fc::move( *(cur-1) ); + } + *cur = fc::forward(t); + } else { + new (this->end()) T( fc::forward(t) ); + ++this->_data->size; + } + return &this->at(pos); + } + + iterator insert( iterator pos, const_iterator first, const_iterator last ) { + //static_assert( false, "Not Implemented" ); + return 0; + } + + iterator erase( iterator pos ) { + if( pos == this->end() ) { return pos; } + auto next = pos + 1; + while( next != this->end() ) { + *pos = fc::move(*next); + ++pos; ++next; + } + pos->~T(); + this->_data->size--; + return pos; + } + + iterator erase( iterator first, iterator last ) { + iterator c = first; + iterator m = last; + iterator e = this->end(); + while( c != e ) { + if( m != e ) *c = fc::move( *m ); + else c->~T(); + ++c; + ++m; + } + this->_data->size -= last-first; + return last; + } + vector_impl& operator=( vector_impl&& v ) { + fc::swap(_data,v._data); + return *this; + } + vector_impl& operator=( const vector_impl& v ) { + vector_impl tmp(v); + fc::swap(tmp._data,_data); + return *this; + } + private: + data* _data; + }; + } + + template + class vector : public detail::vector_impl::value> { + public: + vector(){} + vector( uint64_t s ):detail::vector_impl::value>(s){} + vector( const vector& v ):detail::vector_impl::value>(v){} + vector( vector&& v ):detail::vector_impl::value>(fc::move(v)){} + + vector& operator=( vector&& v ) { + *((base*)this) = fc::move(v); + return *this; + } + vector& operator=( const vector& v ) { + *((base*)this) = v; + return *this; + } + private: + typedef detail::vector_impl::value> base; + }; + +}; + +#endif // _FC_VECTOR_HPP_ diff --git a/include/fc/vector_fwd.hpp b/include/fc/vector_fwd.hpp new file mode 100644 index 0000000..361d3c9 --- /dev/null +++ b/include/fc/vector_fwd.hpp @@ -0,0 +1,8 @@ +#ifndef _FC_VECTOR_FWD_HPP_ +#define _FC_VECTOR_FWD_HPP_ +namespace fc { + template class vector; + template class reflector; + template class reflector< fc::vector >; +}; +#endif // _FC_VECTOR_FWD_HPP_ diff --git a/include/fc/vector_g.hpp b/include/fc/vector_g.hpp new file mode 100644 index 0000000..0876022 --- /dev/null +++ b/include/fc/vector_g.hpp @@ -0,0 +1,140 @@ +#ifndef _FC_VECTOR_HPP_ +#define _FC_VECTOR_HPP_ +#include + +namespace fc { + class vector_impl { + public: + size_t size()const; + size_t capacity()const; + void pop_back(); + void clear(); + void resize( size_t ); + void reserve( size_t ); + + protected: + vector_impl( abstract_value_type& v, size_t size ); + vector_impl( const vector_impl& ); + vector_impl( vector_impl&& ); + ~vector_impl(); + + vector_impl& operator=( const vector_impl& v ); + vector_impl& operator=( vector_impl&& v ); + + void _push_back( const void* v ); + void _push_back_m( void* v ); + + void* _back(); + const void* _back()const; + + void* _at(size_t); + const void* _at(size_t)const; + + void* _insert( void* pos, const void* t ); + void* _insert( void* pos, void* t ); + void* _erase( void* pos ); + void* _erase( void* first, void* last ); + + struct vector_impl_d* my; + }; + + class vector_pod_impl { + public: + size_t size()const; + size_t capacity()const; + void pop_back(); + void clear(); + void resize( size_t ); + void reserve( size_t ); + + protected: + vector_pod_impl( unsigned int size_of, size_t size ); + vector_pod_impl( const vector_pod_impl& ); + vector_pod_impl( vector_pod_impl&& ); + ~vector_pod_impl(); + + vector_pod_impl& operator=( const vector_pod_impl& v ); + vector_pod_impl& operator=( vector_pod_impl&& v ); + + void _push_back( const void* v ); + void _push_back_m( void* v ); + + void* _back(); + const void* _back()const; + + void* _at(size_t); + const void* _at(size_t)const; + + void* _insert( void* pos, const void* t ); + void* _erase( void* pos ); + void* _erase( void* first, void* last ); + + struct vector_pod_impl_d* my; + }; + + template + class vector_base : public vector_impl { + public: + vector_base( size_t s ):vector_impl( value_type::instance(), s ){}; + vector_base( const vector_base& c ):vector_impl( c ){}; + vector_base( vector_base&& c ):vector_impl( fc::move(c) ){}; + + vector_base& operator=( const vector_base& v ){ vector_impl::operator=(v); return *this; } + vector_base& operator=( vector_base&& v ) { vector_impl::operator=(fc::move(v)); return *this; } + }; + + template + class vector_base : public vector_pod_impl { + public: + vector_base( size_t s ):vector_pod_impl( sizeof(T), s ){}; + vector_base( const vector_base& c ):vector_pod_impl( c ){}; + vector_base( vector_base&& c ):vector_pod_impl( fc::move(c) ){}; + + vector_base& operator=( const vector_base& v ){ vector_pod_impl::operator=(v); return *this; } + vector_base& operator=( vector_base&& v ) { vector_pod_impl::operator=(fc::move(v)); return *this; } + }; + + template + class vector : public vector_base::value > { + public: + vector( size_t size = 0 ):vector_base::value>( size ){} + vector( const vector& v ):vector_base::value>(v){} + vector( vector&& v ):vector_base::value>(fc::move(v)){} + + vector& operator=( const vector& v ){ vector_base::value>::operator=(v); return *this; } + vector& operator=( vector&& v ) { vector_base::value>::operator=(fc::move(v)); return *this; } + + typedef T* iterator; + typedef const T* const_iterator; + + T* begin() { return &front(); } + const T* begin()const { return &front(); } + const T* end()const { return &back() + 1; } + + void push_back( const T& t ) { _push_back(&t); } + void push_back( T&& t ) { _push_back_m(&t); } + + T& back() { return *((T*)this->_back()); } + const T& back()const { return *((const T*)this->_back()); } + + T& front() { return *((T*)this->_at(0)); } + const T& front()const { return *((const T*)this->_at(0)); } + + T& operator[]( size_t p ) { return *((T*)this->_at(p)); } + const T& operator[]( size_t p )const { return *((const T*)this->_at(p)); } + + T& at( size_t p ) { return *((T*)this->_at(p)); } + const T& at( size_t p )const { return *((const T*)this->_at(p)); } + + iterator insert( iterator pos, const T& t ) { return (iterator*)this->_insert( pos, &t ); } + iterator insert( iterator pos, T&& t ) { return (iterator*)this->_insert_m(pos, &t); } + iterator erase( iterator pos ) { return (iterator*)this->_erase(pos); } + iterator erase( iterator first, iterator last ) { return (iterator*)this->_erase(first,last); } + }; + namespace reflect { + template class reflector; + template class reflector>; + } +} // namespace fc + +#endif // _FC_VECTOR_HPP_ diff --git a/include/fc/wait_any.hpp b/include/fc/wait_any.hpp new file mode 100644 index 0000000..f31428b --- /dev/null +++ b/include/fc/wait_any.hpp @@ -0,0 +1,15 @@ +#ifndef _FC_WAIT_ANY_HPP_ +#define _FC_WAIT_ANY_HPP_ +#include +#include + +namespace fc { + template + int wait_any( fc::future& f1, fc::future& f2, const microseconds& timeout_us = microseconds::max() ) { + fc::vector p(2); + p[0] = static_pointer_cast(f1.promise()); + p[1] = static_pointer_cast(f2.promise()); + return wait( fc::move(p), timeout_us ); + } +} +#endif // _FC_WAIT_ANY_HPP_ diff --git a/src/bkup_json.cpp b/src/bkup_json.cpp new file mode 100644 index 0000000..89d4384 --- /dev/null +++ b/src/bkup_json.cpp @@ -0,0 +1,174 @@ +#include +#include +#include + +// TODO: replace sstream with light/fast compiling version +#include + +namespace fc { namespace json { + + class const_visitor : public fc::abstract_const_visitor { + public: + fc::ostream& out; + const_visitor( fc::ostream& o ):out(o){} + + virtual void visit()const{} + virtual void visit( const char& c )const{ out << '"' << c << '"'; } + virtual void visit( const uint8_t& c )const{ out << int(c); } + virtual void visit( const uint16_t& c )const{ out << c; } + virtual void visit( const uint32_t& c )const{ out << c; } + virtual void visit( const uint64_t& c )const{ out << c; } + virtual void visit( const int8_t& c )const{ out << int(c); } + virtual void visit( const int16_t& c )const{ out << c;} + virtual void visit( const int32_t& c )const{ out << c;} + virtual void visit( const int64_t& c )const{ out << c;} + virtual void visit( const double& c )const{ out << c;} + virtual void visit( const float& c )const{ out << c;} + virtual void visit( const bool& c )const{ out << (c?"true":"false"); } + virtual void visit( const fc::string& c )const{ out << '"'<= '0' && c <= '9' ) + return c - '0'; + if( c >= 'a' && c <= 'f' ) + return c - 'a' + 10; + if( c >= 'A' && c <= 'F' ) + return c - 'A' + 10; + return c; + } + + string escape_string( const string& s ) { + // calculate escape string size. + uint32_t ecount = 0; + for( auto i = s.begin(); i != s.end(); ++i ) { + if( ' '<= *i && *i <= '~' && *i !='\\' && *i != '"' ) { + ecount+=1; + } else { + switch( *i ) { + case '\t' : + case '\n' : + case '\r' : + case '\\' : + case '"' : + ecount += 2; break; + default: + ecount += 4; + } + } + } + // unless the size changed, just return it. + if( ecount == s.size() ) { return s; } + + // reserve the bytes + string out; out.reserve(ecount); + + // print it out. + for( auto i = s.begin(); i != s.end(); ++i ) { + if( ' '<= *i && *i <= '~' && *i !='\\' && *i != '"' ) { + out += *i; + } else { + out += '\\'; + switch( *i ) { + case '\t' : out += 't'; break; + case '\n' : out += 'n'; break; + case '\r' : out += 'r'; break; + case '\\' : out += '\\'; break; + case '"' : out += '"'; break; + default: + out += "x"; + const char* const hexdig = "0123456789abcdef"; + out += hexdig[*i >> 4]; + out += hexdig[*i & 0xF]; + } + } + } + return out; + } + string unescape_string( const string& s ) { + string out; out.reserve(s.size()); + for( auto i = s.begin(); i != s.end(); ++i ) { + if( *i != '\\' ) { + if( *i != '"' ) out += *i; + } + else { + ++i; + if( i == out.end() ) return out; + switch( *i ) { + case 't' : out += '\t'; break; + case 'n' : out += '\n'; break; + case 'r' : out += '\r'; break; + case '\\' : out += '\\'; break; + case '"' : out += '"'; break; + case 'x' : { + ++i; if( i == out.end() ) return out; + char c = from_hex(*i); + ++i; if( i == out.end() ) { out += c; return out; } + c = c<<4 | from_hex(*i); + out += c; + break; + } + default: + out += '\\'; + out += *i; + } + } + } + return out; + } + + + + +} } diff --git a/src/context.hpp b/src/context.hpp new file mode 100644 index 0000000..dc1060c --- /dev/null +++ b/src/context.hpp @@ -0,0 +1,147 @@ +#ifndef _FC_CONTEXT_HPP_ +#define _FC_CONTEXT_HPP_ +#include +#include +#include +#include + +namespace fc { + class thread; + class promise_base; + class task_base; + + namespace bc = boost::ctx; + + /** + * maintains information associated with each context such as + * where it is blocked, what time it should resume, priority, + * etc. + */ + struct context { + typedef fc::context* ptr; + + + context( void (*sf)(intptr_t), bc::stack_allocator& alloc, fc::thread* t ) + : caller_context(0), + stack_alloc(&alloc), + next_blocked(0), + next(0), + ctx_thread(t), + canceled(false), + complete(false), + cur_task(0) + { + my_context.fc_stack.base = alloc.allocate( bc::minimum_stacksize() ); + // slog( "new stack %1% bytes at %2%", bc::minimum_stacksize(), my_context.fc_stack.base ); + my_context.fc_stack.limit = + static_cast( my_context.fc_stack.base) - bc::minimum_stacksize(); + make_fcontext( &my_context, sf ); + } + + context( fc::thread* t) + :caller_context(0), + stack_alloc(0), + next_blocked(0), + next(0), + ctx_thread(t), + canceled(false), + complete(false), + cur_task(0) + {} + + ~context() { + if(stack_alloc) { + stack_alloc->deallocate( my_context.fc_stack.base, bc::minimum_stacksize() ); + // slog("deallocate stack" ); + } + } + + struct blocked_promise { + blocked_promise( promise_base* p=0, bool r=true ) + :prom(p),required(r){} + + promise_base* prom; + bool required; + }; + + /** + * @todo Have a list of promises so that we can wait for + * P1 or P2 and either will unblock instead of requiring both + * @param req - require this promise to 'unblock', otherwise try_unblock + * will allow it to be one of many that could 'unblock' + */ + void add_blocking_promise( promise_base* p, bool req = true ) { + for( auto i = blocking_prom.begin(); i != blocking_prom.end(); ++i ) { + if( i->prom == p ) { + i->required = req; + return; + } + } + blocking_prom.push_back( blocked_promise(p,req) ); + } + /** + * If all of the required promises and any optional promises then + * return true, else false. + * @todo check list + */ + bool try_unblock( promise_base* p ) { + if( blocking_prom.size() == 0 ) { + return true; + } + bool req = false; + for( uint32_t i = 0; i < blocking_prom.size(); ++i ) { + if( blocking_prom[i].prom == p ) { + blocking_prom[i].required = false; + return true; + } + req = req || blocking_prom[i].required; + } + return !req; + } + + void remove_blocking_promise( promise_base* p ) { + for( auto i = blocking_prom.begin(); i != blocking_prom.end(); ++i ) { + if( i->prom == p ) { + blocking_prom.erase(i); + return; + } + } + } + + void timeout_blocking_promises() { + for( auto i = blocking_prom.begin(); i != blocking_prom.end(); ++i ) { + i->prom->set_exception( boost::copy_exception( future_wait_timeout() ) ); + } + } + template + void except_blocking_promises( const Exception& e ) { + for( auto i = blocking_prom.begin(); i != blocking_prom.end(); ++i ) { + i->prom->set_exception( boost::copy_exception( e ) ); + } + } + void clear_blocking_promises() { + blocking_prom.clear(); + } + + bool is_complete()const { return complete; } + + + + bc::fcontext_t my_context; + fc::context* caller_context; + bc::stack_allocator* stack_alloc; + priority prio; + //promise_base* prom; + std::vector blocking_prom; + time_point resume_time; + fc::context* next_blocked; + fc::context* next; + fc::thread* ctx_thread; + bool canceled; + bool complete; + task_base* cur_task; + }; + +} // naemspace fc + +#endif // _FC_CONTEXT_HPP_ diff --git a/src/exception.cpp b/src/exception.cpp new file mode 100644 index 0000000..3b8bc6a --- /dev/null +++ b/src/exception.cpp @@ -0,0 +1,69 @@ +#include +#include + +namespace fc { + #define bexcept void* e = &my[0]; (*((boost::exception_ptr*)e)) + #define cbexcept const void* e = &my[0]; (*((const boost::exception_ptr*)e)) + + exception_ptr::exception_ptr() { + new (&my[0]) boost::exception_ptr(); + } + exception_ptr::exception_ptr( const boost::exception_ptr& c ){ + static_assert( sizeof(my) >= sizeof(c), "boost::exception_ptr is larger than space reserved for it" ); + new (&my[0]) boost::exception_ptr(c); + } + exception_ptr::exception_ptr( boost::exception_ptr&& c ){ + new (&my[0]) boost::exception_ptr(fc::move(c)); + } + exception_ptr::exception_ptr( const exception_ptr& c ){ + new (&my[0]) boost::exception_ptr(c); + } + exception_ptr::exception_ptr( exception_ptr&& c ){ + new (&my[0]) boost::exception_ptr(fc::move(c)); + } + exception_ptr::~exception_ptr(){ + bexcept.~exception_ptr(); + } + exception_ptr& exception_ptr::operator=(const boost::exception_ptr& c){ + bexcept = c; + return *this; + } + exception_ptr& exception_ptr::operator=(boost::exception_ptr&& c){ + bexcept = fc::move(c); + return *this; + } + + exception_ptr& exception_ptr::operator=(const exception_ptr& c){ + bexcept = c; + return *this; + } + exception_ptr& exception_ptr::operator=(exception_ptr&& c){ + bexcept = fc::move(c); + return *this; + } + + fc::string exception_ptr::diagnostic_information()const{ + const void* e = &my[0]; + return boost::diagnostic_information( *((const boost::exception_ptr*)e) ).c_str(); + } + + exception_ptr::operator const boost::exception_ptr& ()const{ + const void* e = &my[0]; + return (*((const boost::exception_ptr*)e)); + } + exception_ptr::operator boost::exception_ptr& (){ + void* e = &my[0]; + return (*((boost::exception_ptr*)e)); + } + + exception_ptr current_exception() { + return boost::current_exception(); + } + void rethrow_exception( const exception_ptr& e ) { + boost::rethrow_exception( static_cast(e) ); + } + exception_ptr::operator bool()const { + const void* e = &my[0]; + return (*((boost::exception_ptr*)e)); + } +} diff --git a/src/future.cpp b/src/future.cpp new file mode 100644 index 0000000..b6b6126 --- /dev/null +++ b/src/future.cpp @@ -0,0 +1,91 @@ +#include +#include +#include +#include +#include +#include + +#include + +namespace fc { + + promise_base::promise_base( const char* desc ) + : _ready(false), + _blocked_thread(nullptr), + _timeout(time_point::max()), + _canceled(false), + _desc(desc), + _compl(nullptr) + { + } + + const char* promise_base::get_desc()const{ + return _desc; + } + + void promise_base::cancel(){ + _canceled = true; + } + bool promise_base::ready()const { + return _ready; + } + bool promise_base::error()const { + { synchronized(_spin_yield) + return _except; + } + } + + void promise_base::set_exception( const fc::exception_ptr& e ){ + _except = e; + _set_value(nullptr); + } + + void promise_base::_wait( const microseconds& timeout_us ){ + if( timeout_us == microseconds::max() ) _wait_until( time_point::max() ); + else _wait_until( time_point::now() + timeout_us ); + } + void promise_base::_wait_until( const time_point& timeout_us ){ + { synchronized(_spin_yield) + if( _ready ) { + if( _except ) fc::rethrow_exception( _except ); + return; + } + _enqueue_thread(); + } + thread::current().wait_until( ptr(this,true), timeout_us ); + if( _ready ) { + if( _except ) fc::rethrow_exception( _except ); + return; + } + FC_THROW( future_wait_timeout() ); + } + void promise_base::_enqueue_thread(){ + _blocked_thread =&thread::current(); + } + void promise_base::_notify(){ + if( _blocked_thread ) + _blocked_thread->notify(ptr(this,true)); + } + void promise_base::_set_timeout(){ + if( _ready ) + return; + set_exception( fc::copy_exception( future_wait_timeout() ) ); + } + void promise_base::_set_value(const void* s){ + BOOST_ASSERT( !_ready ); + { synchronized(_spin_yield) + _ready = true; + } + _notify(); + if( _compl ) { + _compl->on_complete(s,_except); + } + } + void promise_base::_on_complete( detail::completion_handler* c ) { + { synchronized(_spin_yield) + delete _compl; + _compl = c; + } + } +} + diff --git a/src/json.cpp b/src/json.cpp new file mode 100644 index 0000000..299108c --- /dev/null +++ b/src/json.cpp @@ -0,0 +1,573 @@ +#include +#include +#include +#include +#include + +#include + +// TODO: replace sstream with light/fast compiling version +#include + +namespace fc { namespace json { + + + struct range { + range( const char* s, const char* e ) + :start(s),end(e){ } + + operator bool()const { return start < end; } + char operator*()const { return *start; } + + range& operator++() { ++start; return *this; } + range& operator++(int) { ++start; return *this; } + + operator string() { return string(start,end); } + + const char* start; + const char* end; + }; + + + + class const_visitor : public fc::abstract_const_visitor { + public: + fc::ostream& out; + const_visitor( fc::ostream& o ):out(o){} + + virtual void visit()const{} + virtual void visit( const char& c )const{ out << '"' << c << '"'; } + virtual void visit( const uint8_t& c )const{ out << int(c); } + virtual void visit( const uint16_t& c )const{ out << c; } + virtual void visit( const uint32_t& c )const{ out << c; } + virtual void visit( const uint64_t& c )const{ out << c; } + virtual void visit( const int8_t& c )const{ out << int(c); } + virtual void visit( const int16_t& c )const{ out << c;} + virtual void visit( const int32_t& c )const{ out << c;} + virtual void visit( const int64_t& c )const{ out << c;} + virtual void visit( const double& c )const{ out << c;} + virtual void visit( const float& c )const{ out << c;} + virtual void visit( const bool& c )const{ out << (c?"true":"false"); } + virtual void visit( const fc::string& c )const{ + out << '"'<< escape_string(c)<<'"'; + } + virtual void array_size( int size )const { + if( size == 0 ) { out <<"[]"; } + } + virtual void object_size( int size )const { + if( size == 0 ) { out <<"{}"; } + } + virtual void visit( const char* member, int idx, int size, const cref& v)const{ + if( !idx ) out <<"{"; + out<<'"'<= '0' && c <= '9' ) + return c - '0'; + if( c >= 'a' && c <= 'f' ) + return c - 'a' + 10; + if( c >= 'A' && c <= 'F' ) + return c - 'A' + 10; + return c; + } + + value to_value( char* start, char* end/*, error_collector& ec*/ ); + + /** + * Any unescaped quotes are dropped. + * Because unescaped strings are always shorter, we can simply reuse + * the memory of s. + * + * @param s a null terminated string that contains one or more escape chars + */ + char* inplace_unescape_string( char* s ) { + while( *s == '"' ) ++s; + char* out = s; + + for( auto i = s; *i != '\0'; ++i ) { + if( *i != '\\' ) { + if( *i != '"' ) { + *out = *i; + ++out; + } + } + else { + ++i; + if( *i == '\0' ) { *out = '\0'; return s; } + switch( *i ) { + case 't' : *out = '\t'; ++out; break; + case 'n' : *out = '\n'; ++out; break; + case 'r' : *out = '\r'; ++out; break; + case '\\': *out = '\\'; ++out; break; + case '"' : *out = '"'; ++out; break; + case 'x' : { + ++i; if( *i == '\0' ){ *out = '\0'; return s; } + char c = from_hex(*i); + ++i; if( *i == '\0' ){ *out = c; ++out; *out = '\0'; return s; } + c = c<<4 | from_hex(*i); + *out = c; + ++out; + break; + } + default: + *out = '\\'; + ++out; + *out = *i; + ++out; + } + } + } + *out = '\0'; + return s; + } + + + string to_string( const cref& o ) { + std::stringstream ss; + { + fc::ostream os(ss); + o._reflector.visit( o._obj, fc::json::const_visitor(os) ); + } + return ss.str().c_str(); + } + + + string pretty_print( const char* v, size_t s, uint8_t indent ) { + int level = 0; + const char* e= v + s; + std::stringstream ss; + bool first = false; + bool quote = false; + bool escape = false; + while( v < e ) { + switch( *v ) { + case '\\': + if( !escape ) { + if( quote ) + escape = true; + } else { escape = false; } + ss<<*v; + break; + case ':': + if( !quote ) { + ss<<": "; + } else { + ss<<':'; + } + break; + case '"': + if( first ) { + ss<<'\n'; + for( int i = 0; i < level*indent; ++i ) ss<<' '; + first = false; + } + if( !escape ) { + quote = !quote; + } + escape = false; + ss<<'"'; + break; + case '{': + case '[': + ss<<*v; + if( !quote ) { + ++level; + first = true; + }else { + escape = false; + } + break; + case '}': + case ']': + if( !quote ) { + if( *(v-1) != '[' && *(v-1) != '{' ) { + ss<<'\n'; + } + --level; + if( !first ) { + for( int i = 0; i < level*indent; ++i ) ss<<' '; + } + ss<<*v; + break; + } else { + escape = false; + ss<<*v; + } + case ',': + if( !quote ) { + ss<<','; + first = true; + } else { + escape = false; + ss<<','; + } + break; + default: + if( first ) { + ss<<'\n'; + for( int i = 0; i < level*indent; ++i ) ss<<' '; + first = false; + } + ss << *v; + } + } + return ss.str().c_str(); + } + + string to_pretty_string( const cref& o, uint8_t indent ) { + auto s = to_string(o); + return pretty_print( s.c_str(), s.size(), indent ); + } + + + + /** + * Ignores leading white space. + * If it starts with [,", or reads until matching ],", or + * If it starts with something else it reads until [{",}]: or whitespace only + * allowing a starting - or a single . + * + * @note internal json syntax errors are not caught, only bracket errors + * are caught by this method. This makes it easy for error recovery + * when values are read recursivley. + * + * @param in start of input + * @param end end of input + * @param oend output parameter to the end of the value + * + * @return the range of inputs for the value + */ + range read_value( const range& in ) { + range start = in; + // ignore leading whitespace + bool done = false; + while( !done && start ) { + switch( *start ) { + case ' ': + case '\t': + case '\n': + case '\r': + case ',': + ++start; + default: + done = true; + } + } + if( !start ) return start; + range out = start; + + bool found_dot = false; + // check for literal vs object, array or string + switch( *start ) { + case ':': + case ',': + case '=': + out.end = start.start + 1; + return out; + case '[': + case '{': + case '"': + break; + default: { // literal + // read until non-literal character + // allow it to start with - + // allow only one '.' + while( start ) { + switch( *start ) { + case '[': case ']': + case '{': case '}': + case ':': case '=': + case ',': case '"': + case ' ': case '\t': case '\n': case '\r': { + out.end = start.start; + return out; + } + case '.': + if( found_dot ) { + out.end = start.start; + return out; + } + found_dot = true; + break; + case '-': + if( out.start-start.start ){ + out.end = start.start; + return out; + } + } + ++start; + } + out.end = start.start; + return out; + } + } // end literal check + + int depth = 0; + bool in_quote = false; + bool in_escape = false; + // read until closing ] or " ignoring escaped " + while( start ) { + if( !in_quote ) { + switch( *start) { + case '[': + case '{': ++depth; break; + case ']': + case '}': --depth; break; + case '"': + ++depth; + in_quote = true; + break; + default: // do nothing; + break; + } + } else { // in quote + switch( *start ) { + case '"': if( !in_escape ) { + --depth; + in_quote = false; + break; + } + case '\\': + in_escape = !in_escape; + break; + default: + in_escape = false; + } + } + ++start; + if( !depth ) { return range( out.start, start.start ); } + } + if( depth != 0 ) { + // TODO: Throw Parse Error! + elog("Parse Error!!"); + } + return range( out.start, start.start ); + } + + + + + void write( ostream& out, const cref& val ) { + val._reflector.visit( val._obj, fc::json::const_visitor(out) ); + } + + void read( istream& in, ref& val ) { + + } + + + string escape_string( const string& s ) { + // calculate escape string size. + uint32_t ecount = 0; + for( auto i = s.begin(); i != s.end(); ++i ) { + if( ' '<= *i && *i <= '~' && *i !='\\' && *i != '"' ) { + ecount+=1; + } else { + switch( *i ) { + case '\t' : + case '\n' : + case '\r' : + case '\\' : + case '"' : + ecount += 2; break; + default: + ecount += 4; + } + } + } + // unless the size changed, just return it. + if( ecount == s.size() ) { return s; } + + // reserve the bytes + string out; out.reserve(ecount); + + // print it out. + for( auto i = s.begin(); i != s.end(); ++i ) { + if( ' '<= *i && *i <= '~' && *i !='\\' && *i != '"' ) { + out += *i; + } else { + out += '\\'; + switch( *i ) { + case '\t' : out += 't'; break; + case '\n' : out += 'n'; break; + case '\r' : out += 'r'; break; + case '\\' : out += '\\'; break; + case '"' : out += '"'; break; + default: + out += "x"; + const char* const hexdig = "0123456789abcdef"; + out += hexdig[*i >> 4]; + out += hexdig[*i & 0xF]; + } + } + } + return out; + } + string unescape_string( const string& s ) { + string out; out.reserve(s.size()); + for( auto i = s.begin(); i != s.end(); ++i ) { + if( *i != '\\' ) { + if( *i != '"' ) out += *i; + } + else { + ++i; + if( i == out.end() ) return out; + switch( *i ) { + case 't' : out += '\t'; break; + case 'n' : out += '\n'; break; + case 'r' : out += '\r'; break; + case '\\' : out += '\\'; break; + case '"' : out += '"'; break; + case 'x' : { + ++i; if( i == out.end() ) return out; + char c = from_hex(*i); + ++i; if( i == out.end() ) { out += c; return out; } + c = c<<4 | from_hex(*i); + out += c; + break; + } + default: + out += '\\'; + out += *i; + } + } + } + return out; + } + + range skip_separator( range r, char c ) { + while( r ) { + switch( *r ) { + case ' ': case '\n': case '\t': case '\r': + ++r; + continue; + default: + if( *r == c ) { ++r; } + else { + // wlog( "Expected ',' but found '%c'", *r ); + } + return r; + } + } + } + + /** + * [A,B,C] + */ + value read_array( const range& r ) { + BOOST_ASSERT( *r == '[' ); + BOOST_ASSERT( *(r.end-1) == ']' ); + value out; + range cur_range = read_value( r ); + + while( cur_range ) { + out.push_back( *from_string( cur_range.start, cur_range.end ) ); + cur_range = read_value( range( cur_range.end, r.end) ); + } + return out; + } + + /** + * @pre *input == { + * @pre *input.end-1 == } + */ + value read_object( const range& in ) { + BOOST_ASSERT( *in == '{' ); + BOOST_ASSERT( *(in.end-1) == '}' ); + value v; + range key = read_value( ++range(in) ); + range rest(key.end,in.end-1); + while( rest ) { + range val = skip_separator( range(key.end,in.end), ':' ); + val = read_value( val ); + v[string(key)] = *from_string( val.start, val.end ); + range key = read_value( rest ); + rest.start = key.end; + } + return v; + } + + value_fwd from_string( const string& s ) { + return from_string( s.c_str(), s.c_str() + s.size() ); + } + + value_fwd from_string( const char* start, const char* end ) { + if( start == end ) return value(); + + range s = read_value( range(start,end) ); + switch( s.start[0] ) { + case '[': + return read_array( s ); + case '{': + return read_object( s ); + case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { + for( const char* n = s.start+1; n != s.end; ++n ) { + if( *n == '.' ) { + return boost::lexical_cast(std::string(s.start,s.end)); + } + } + return boost::lexical_cast(std::string(s.start,s.end)); + } + case '-': { + for( const char* n = s.start+1; n != s.end; ++n ) { + if( *n == '.' ) { + return (value)boost::lexical_cast(std::string(s.start,s.end)); + } + } + return (value)boost::lexical_cast(std::string(s.start,s.end)); + } + case '.': { + return (value)boost::lexical_cast(std::string(s.start,s.end)); + } + case '\"': { + return (value)unescape_string( string(s.start,s.end) ); + } + case 'n': { + if( strncmp(s.start,"null",4 ) ) return value(); + } + case 't': { + if( strncmp(s.start,"true",4 ) ) return true; + } + case 'f': { + if( strncmp(s.start,"false",5 ) ) return false; + } + default: + wlog( "return unable to parse... return as string" ); + return value( string( s.start, s.end) ); + } + } + +} } // fc::json diff --git a/src/json_rpc_connection.cpp b/src/json_rpc_connection.cpp new file mode 100644 index 0000000..3013900 --- /dev/null +++ b/src/json_rpc_connection.cpp @@ -0,0 +1,63 @@ +#include + +namespace fc { namespace json { + + class rpc_connection_d { + public: + rpc_connection_d() + :_in(0),_out(0),_next_req_id(0){} + istream* _in; + ostream* _out; + int64_t _next_req_id; + detail::pending_result::ptr _pr_head; + detail::pending_result::ptr _pr_tail; + }; + rpc_connection::rpc_connection() { + my = new rpc_connection_d(); + } + rpc_connection::rpc_connection( istream& i, ostream& o ) { + my = new rpc_connection_d(); + init( i, o ); + } + rpc_connection::rpc_connection( rpc_connection&& c ) + :my(c.my) { + c.my = 0; + } + rpc_connection::~rpc_connection() { + delete my; + } + + rpc_connection& rpc_connection::operator=(rpc_connection&& m) { + fc::swap(m.my,my); + return *this; + } + + void rpc_connection::init( istream& i, ostream& o ) { + my->_in = &i; + my->_out = &o; + } + void rpc_connection::invoke( detail::pending_result::ptr&& p, const fc::string& m, + uint16_t nparam, const cptr* param ) { + p->id = ++my->_next_req_id; + + my->_pr_tail->next = fc::move(p); + my->_pr_tail = my->_pr_tail->next; + + ostream& out = *my->_out; + out << "{\"id\":"<id<<",\"method\":"< 0 ) { + out <<",\"params\":["; + uint16_t back = nparam -1; + for( uint16_t i = 0; i < back; ++i ) { + fc::json::write( out, *(param[i]) ); + out <<','; + } + fc::json::write( out, *(param[back]) ); + out<<']'; + } + out<<"}\n"; + out.flush(); + } + + +} } // fc::json diff --git a/src/log.cpp b/src/log.cpp new file mode 100644 index 0000000..6dfcc30 --- /dev/null +++ b/src/log.cpp @@ -0,0 +1,45 @@ +#include +#include +#include +#include +#include + +namespace fc { + const char* short_name( const char* file_name ) { + const char* end = file_name + strlen(file_name); + --end; + while( end >= file_name ) { + if( *end == '/' || *end == '\\' ) { + return end + 1; + } + --end; + } + return file_name; + } + + #ifdef WIN32 + #define isatty _isatty + #define fileno _fileno + #endif // WIN32 + + void log( const char* color, const char* file_name, size_t line_num, const char* method_name, const char* format, ... ) { + if(isatty(fileno(stderr))) + fprintf( stderr, "%s", color ); + + fprintf( stderr, "%s:%zd %s ", short_name(file_name), line_num, method_name ); + va_list args; + va_start(args,format); + vfprintf( stderr, format, args ); + va_end(args); + if (isatty(fileno(stderr))) + fprintf( stderr, "%s", CONSOLE_DEFAULT ); + fprintf( stderr, "\n" ); + } + + /** used to add extra fields to be printed (thread,fiber,time,etc) */ + void add_log_field( void (*f)( ) ) { + } + + void remove_log_field( void (*f)( ) ) { + } +} diff --git a/src/shared_ptr.cpp b/src/shared_ptr.cpp new file mode 100644 index 0000000..9100d29 --- /dev/null +++ b/src/shared_ptr.cpp @@ -0,0 +1,23 @@ +#include +#include +#include + +namespace fc { + retainable::retainable() + :_ref_count(1) { } + + void retainable::retain() { + ((boost::atomic*)&_ref_count)->fetch_add(1, boost::memory_order_relaxed ); + } + + void retainable::release() { + if( 1 == ((boost::atomic*)&_ref_count)->fetch_sub(1, boost::memory_order_release ) ) { + boost::atomic_thread_fence(boost::memory_order_acquire); + delete this; + } + } + + int32_t retainable::retain_count()const { + return _ref_count; + } +} diff --git a/src/spin_lock.cpp b/src/spin_lock.cpp new file mode 100644 index 0000000..8bf72c6 --- /dev/null +++ b/src/spin_lock.cpp @@ -0,0 +1,46 @@ +#include +#include +#include +#include +#include + +namespace fc { + #define define_self boost::atomic* self = (boost::atomic*)&_lock + spin_lock::spin_lock() + { + define_self; + new (self) boost::atomic(); + static_assert( sizeof(boost::atomic) == sizeof(_lock), "" ); + self->store(unlocked); + } + + bool spin_lock::try_lock() { + define_self; + return self->exchange(locked, boost::memory_order_acquire)!=locked; + } + + bool spin_lock::try_lock_for( const fc::microseconds& us ) { + return try_lock_until( fc::time_point::now() + us ); + } + + bool spin_lock::try_lock_until( const fc::time_point& abs_time ) { + while( abs_time > time_point::now() ) { + if( try_lock() ) + return true; + } + return false; + } + + void spin_lock::lock() { + define_self; + while( self->exchange(locked, boost::memory_order_acquire)==locked) { } + } + + void spin_lock::unlock() { + define_self; + self->store(unlocked, boost::memory_order_release); + } + + #undef define_self + +} // namespace fc diff --git a/src/spin_yield_lock.cpp b/src/spin_yield_lock.cpp new file mode 100644 index 0000000..07d2560 --- /dev/null +++ b/src/spin_yield_lock.cpp @@ -0,0 +1,51 @@ +#include +#include +#include +#include +#include + +namespace fc { + void yield(); + + #define define_self boost::atomic* self = (boost::atomic*)&_lock + + spin_yield_lock::spin_yield_lock() + { + define_self; + new (self) boost::atomic(); + static_assert( sizeof(boost::atomic) == sizeof(_lock), "" ); + self->store(unlocked); + } + + bool spin_yield_lock::try_lock() { + define_self; + return self->exchange(locked, boost::memory_order_acquire)!=locked; + } + + bool spin_yield_lock::try_lock_for( const fc::microseconds& us ) { + return try_lock_until( fc::time_point::now() + us ); + } + + bool spin_yield_lock::try_lock_until( const fc::time_point& abs_time ) { + while( abs_time > time_point::now() ) { + if( try_lock() ) + return true; + yield(); + } + return false; + } + + void spin_yield_lock::lock() { + define_self; + while( self->exchange(locked, boost::memory_order_acquire)==locked) { + yield(); + } + } + + void spin_yield_lock::unlock() { + define_self; + self->store(unlocked, boost::memory_order_release); + } + #undef define_self + +} // namespace fc diff --git a/src/stream.cpp b/src/stream.cpp new file mode 100644 index 0000000..efc4423 --- /dev/null +++ b/src/stream.cpp @@ -0,0 +1,187 @@ +#include +#include +#include +#include + +namespace fc { + namespace detail { + namespace io = boost::iostreams; + class cin_source : public io::source { + public: + typedef char type; + + template + cin_source(T):_cin_thread(NULL){} + cin_source():_cin_thread(NULL){} + + cin_source( const cin_source& s ):_cin_thread(s._cin_thread){} + + std::streamsize read( char* s, std::streamsize n ) { + //if( !_cin_thread ) _cin_thread = new fc::thread("cin"); + + //if( _cin_thread != &fc::thread::current() ) { + // return _cin_thread->async( [=](){ return this->read( s, n ); } ).wait(); + // } + int r = std::cin.readsome(s,n); + if( std::cin && r <= 0 ) { + std::cin.read( s, 1 ); + if( std::cin.eof() ) return -1; + return 1; + } + return r; + } + private: + fc::thread* _cin_thread; + }; + std::istream& get_cin_stream() { + static io::stream cin_stream;// = cin_source(); + cin_stream.open(NULL); + return cin_stream; + } + + class abstract_source : public io::source { + public: + typedef char type; + abstract_source( abstract_istream& ais ):_ais(ais){} + + std::streamsize read( char* s, std::streamsize n ) { + return _ais.readsome_impl( s, n ); + } + abstract_istream& _ais; + }; + abstract_istream::abstract_istream() { + static_assert( sizeof(_store) >= sizeof( io::stream ), "Failed to allocate enough space" ); + (new (&_store[0]) io::stream( *this )); + } + size_t abstract_istream::readsome( char* buf, size_t len ) { + auto iost = (io::stream*)(&_store[0]); + iost->read(buf,len); + return len; + } + + class abstract_sink : public io::sink { + public: + struct category : io::sink::category, io::flushable_tag {}; + typedef char type; + abstract_sink( abstract_ostream& aos ):_aos(aos){} + + std::streamsize write( const char* s, std::streamsize n ) { + return _aos.write_impl( s, n ); + } + void close() { _aos.close_impl(); } + bool flush() { _aos.flush_impl(); return true; } + + abstract_ostream& _aos; + }; + + abstract_ostream::abstract_ostream() { + static_assert( sizeof(_store) >= sizeof( io::stream ), "Failed to allocate enough space" ); + (new (&_store[0]) io::stream( *this )); + } + size_t abstract_ostream::write( const char* buf, size_t len ) { + auto iost = (io::stream*)(&_store[0]); + iost->write(buf,len); + return len; + } + void abstract_ostream::flush() { + auto iost = (io::stream*)(&_store[0]); + iost->flush(); + } + void abstract_ostream::close() { + auto iost = (io::stream*)(&_store[0]); + iost->close(); + } + + + abstract_istream::~abstract_istream() { + } + abstract_ostream::~abstract_ostream() { + auto iost = (io::stream*)(&_store[0]); + iost->~stream(); + } + } + + istream::~istream(){ + detail::abstract_istream* i = (detail::abstract_istream*)&_store[0]; + i->~abstract_istream(); + } + + size_t istream::readsome( char* buf, size_t len ){ + detail::abstract_istream* i = (detail::abstract_istream*)&_store[0]; + return i->readsome(buf,len); + } + #define read_help \ + detail::abstract_istream* aos = (detail::abstract_istream*)&i._store[0];\ + auto iist = (detail::io::stream*)(&aos->_store[0]); \ + (*iist) >> s; \ + return i; + + istream& operator>>( istream& i, int64_t& s){ read_help } + istream& operator>>( istream& i, uint64_t& s){ read_help } + istream& operator>>( istream& i, int32_t& s){ read_help } + istream& operator>>( istream& i, uint32_t& s){ read_help } + istream& operator>>( istream& i, int16_t& s){ read_help } + istream& operator>>( istream& i, uint16_t& s){ read_help } + istream& operator>>( istream& i, int8_t& s){ read_help } + istream& operator>>( istream& i, uint8_t& s){ read_help } + istream& operator>>( istream& i, float& s){ read_help } + istream& operator>>( istream& i, double& s){ read_help } + istream& operator>>( istream& i, bool& s){ read_help } + istream& operator>>( istream& i, char& s){ read_help } + istream& operator>>( istream& i, fc::string& s){ + std::string ss; + detail::abstract_istream* aos = (detail::abstract_istream*)&i._store[0]; + auto iist = (detail::io::stream*)(&aos->_store[0]); + (*iist) >> ss; + s = ss.c_str(); + return i; + } + + #undef read_help + + + ostream::~ostream(){ + detail::abstract_ostream* o = (detail::abstract_ostream*)&_store[0]; + close(); + o->~abstract_ostream(); + } + + size_t ostream::write( const char* buf, size_t len ){ + detail::abstract_ostream* o = (detail::abstract_ostream*)&_store[0]; + return o->write(buf,len); + } + void ostream::close(){ + detail::abstract_ostream* o = (detail::abstract_ostream*)&_store[0]; + o->close(); + } + void ostream::flush(){ + detail::abstract_ostream* o = (detail::abstract_ostream*)&_store[0]; + o->flush(); + } + #define print_help \ + detail::abstract_ostream* aos = (detail::abstract_ostream*)&o._store[0];\ + auto iost = (detail::io::stream*)(&aos->_store[0]); \ + (*iost) << s; \ + return o; + + ostream& operator<<( ostream& o, int64_t s ){ print_help } + ostream& operator<<( ostream& o, uint64_t s ){ print_help } + ostream& operator<<( ostream& o, int32_t s ){ print_help } + ostream& operator<<( ostream& o, uint32_t s ){ print_help } + ostream& operator<<( ostream& o, int16_t s ){ print_help } + ostream& operator<<( ostream& o, uint16_t s ){ print_help } + ostream& operator<<( ostream& o, int8_t s ){ print_help } + ostream& operator<<( ostream& o, uint8_t s ){ print_help } + ostream& operator<<( ostream& o, float s ){ print_help } + ostream& operator<<( ostream& o, double s ){ print_help } + ostream& operator<<( ostream& o, bool s ){ print_help } + ostream& operator<<( ostream& o, char s ){ print_help } + ostream& operator<<( ostream& o, const char* s ){ print_help } + ostream& operator<<( ostream& o, const fc::string& s ){ return o << s.c_str(); } + + #undef print_help + + ostream cout( std::cout ); + ostream cerr( std::cerr ); + istream cin( detail::get_cin_stream() ); +} //namespace fc diff --git a/src/string.cpp b/src/string.cpp new file mode 100644 index 0000000..169ed86 --- /dev/null +++ b/src/string.cpp @@ -0,0 +1,101 @@ +#include +#include + +#include +namespace detail { + void destroy( void* t ) { + using namespace std; + reinterpret_cast(t)->~string(); + } +} + +/** + * Implemented with std::string for now. + */ + +namespace fc { + + string::string() { + static_assert( sizeof(*this) >= sizeof(std::string), "failed to reserve enough space" ); + new (this) std::string(); + } + + string::string( const string& c ) { + static_assert( sizeof(my) >= sizeof(std::string), "failed to reserve enough space" ); + new (this) std::string(reinterpret_cast(c)); + } + + + string::string( string&& m ) { + static_assert( sizeof(my) >= sizeof(std::string), "failed to reserve enough space" ); + new (this) std::string(reinterpret_cast(m)); + } + + string::string( const char* c ){ + static_assert( sizeof(my) >= sizeof(std::string), "failed to reserve enough space" ); + new (this) std::string(c); + } + + string::string( const_iterator b, const_iterator e ) { + static_assert( sizeof(my) >= sizeof(std::string), "failed to reserve enough space" ); + new (this) std::string(b,e); + } + + + string::~string() { + ::detail::destroy( this ); + } + + string::iterator string::begin() { return &reinterpret_cast(this)->front(); } + string::iterator string::end() { return &reinterpret_cast(this)->back() +1; }// my->str.size(); } + string::const_iterator string::begin()const { return reinterpret_cast(this)->c_str(); } + string::const_iterator string::end()const { return reinterpret_cast(this)->c_str() + reinterpret_cast(this)->size(); } + + char& string::operator[](uint64_t idx) { return reinterpret_cast(this)->at(idx); } + const char& string::operator[](uint64_t idx)const { return reinterpret_cast(this)->at(idx); } + + void string::reserve(uint64_t r) { reinterpret_cast(this)->reserve(r); } + uint64_t string::size()const { return reinterpret_cast(this)->size(); } + void string::clear() { return reinterpret_cast(this)->clear(); } + void string::resize( uint64_t s ) { reinterpret_cast(this)->resize(s); } + + const char* string::c_str()const { return reinterpret_cast(this)->c_str(); } + + bool string::operator == ( const char* s )const { + return reinterpret_cast(*this) == s; + } + bool string::operator == ( const string& s )const { + return reinterpret_cast(*this) == reinterpret_cast(s); + } + bool string::operator != ( const string& s )const { + return reinterpret_cast(*this) != reinterpret_cast(s); + } + + string& string::operator =( const string& c ) { + reinterpret_cast(*this) = reinterpret_cast(c); + return *this; + } + string& string::operator =( string&& c ) { + reinterpret_cast(*this) = fc::move( reinterpret_cast(c) ); + return *this; + } + + string& string::operator+=( const string& s ) { + reinterpret_cast(*this) += reinterpret_cast(s); + return *this; + } + string& string::operator+=( char c ) { + reinterpret_cast(*this) += c; + return *this; + } + + string operator + ( const string& s, const string& c ) { + return string(s) += c; + } + string operator + ( const string& s, char c ) { + return string(s) += c; + } + +} // namespace fc + + diff --git a/src/task.cpp b/src/task.cpp new file mode 100644 index 0000000..f798665 --- /dev/null +++ b/src/task.cpp @@ -0,0 +1,29 @@ +#include +#include +#include +#include + +namespace fc { + task_base::task_base(void* func) + :_functor(func){ + new (&_spinlock_store[0]) fc::spin_lock(); + } + + void task_base::run() { + try { + _run_functor( _functor, _promise_impl ); + } catch ( ... ) { + _promise_impl->set_exception( current_exception() ); + } + } + task_base::~task_base() { + _destroy_functor( _functor ); + } + + void task_base::_set_active_context(context* c) { + void* p = &_spinlock_store[0]; + { synchronized( *((fc::spin_lock*)p)) + _active_context = c; + } + } +} diff --git a/src/thread.cpp b/src/thread.cpp new file mode 100644 index 0000000..da13d54 --- /dev/null +++ b/src/thread.cpp @@ -0,0 +1,330 @@ +#include +#include +#include "thread_d.hpp" + +namespace fc { + boost::mutex& log_mutex() { + static boost::mutex m; return m; + } + + thread::thread( const char* name ) { + promise::ptr p(new promise()); + boost::thread* t = new boost::thread( [this,p]() { + try { + this->my = new thread_d(*this); + p->set_value(); + exec(); + } catch ( ... ) { + elog( "Caught unhandled exception" ); + } + } ); + p->wait(); + my->boost_thread = t; + set_name(name); + } + thread::thread( thread_d* ) { + my = new thread_d(*this); + } + + thread::thread( thread&& m ) { + my = m.my; + m.my = 0; + } + + thread& thread::operator=(thread&& t ) { + fc::swap(t.my,my); + return *this; + } + + thread::~thread() { + delete my; + } + + thread& thread::current() { + // Apple does not support __thread by default, but some custom gcc builds + // for Mac OS X support it. Backup use boost::thread_specific_ptr + #if defined(__APPLE__) && (__GNUC__ <= 4 && __GNUC_MINOR__ < 4) + #warning using boost::thread_specific_ptr instead of __thread, use gcc 4.5 or newer for better performance. + static boost::thread_specific_ptr t; + if( !t.get() ) t.reset( new thread((thread_d*)0) ); + return *t.get(); + #else + #ifdef _MSC_VER + static __declspec(thread) thread* t = NULL; + #else + static __thread thread* t = NULL; + #endif + if( !t ) t = new thread((thread_d*)0); + return *t; + #endif + } + const string& thread::name()const { return my->name; } + void thread::set_name( const fc::string& n ) { my->name = n; } + void thread::debug( const fc::string& d ) { my->debug(d); } + + void thread::quit() { + wlog( "quit!" ); + if( ¤t() != this ) { + async( boost::bind( &thread::quit, this ) ).wait(); + if( my->boost_thread ) { + my->boost_thread->join(); + } + return; + } + + // break all promises, thread quit! + fc::context* cur = my->blocked; + while( cur ) { + fc::context* n = cur->next; + // this will move the context into the ready list. + //cur->prom->set_exception( boost::copy_exception( error::thread_quit() ) ); + cur->except_blocking_promises( thread_quit() ); + cur = n; + } + BOOST_ASSERT( my->blocked == 0 ); + //my->blocked = 0; + + + // move all sleep tasks to ready + for( uint32_t i = 0; i < my->sleep_pqueue.size(); ++i ) { + my->ready_push_front( my->sleep_pqueue[i] ); + } + my->sleep_pqueue.clear(); + + // move all idle tasks to ready + cur = my->pt_head; + while( cur ) { + fc::context* n = cur->next; + cur->next = 0; + my->ready_push_front( cur ); + cur = n; + } + + // mark all ready tasks (should be everyone)... as canceled + cur = my->ready_head; + while( cur ) { + cur->canceled = true; + cur = cur->next; + } + + my->done = true; + + // now that we have poked all fibers... switch to the next one and + // let them all quit. + while( my->ready_head ) { + my->start_next_fiber(true); + my->check_for_timeouts(); + } + my->clear_free_list(); + } + + void thread::exec() { + if( !my->current ) my->current = new fc::context(&fc::thread::current()); + my->process_tasks(); + delete my->current; + my->current = 0; + } + + bool thread::is_running()const { + return !my->done; + } + + priority thread::current_priority()const { + BOOST_ASSERT(my); + if( my->current ) return my->current->prio; + return priority(); + } + + void thread::yield(bool reschedule ) { + my->check_fiber_exceptions(); + my->start_next_fiber(reschedule); + my->check_fiber_exceptions(); + } + void thread::sleep_until( const time_point& tp ) { + my->check_fiber_exceptions(); + + BOOST_ASSERT( ¤t() == this ); + if( !my->current ) { + my->current = new fc::context(&fc::thread::current()); + } + + my->current->resume_time = tp; + my->current->clear_blocking_promises(); + + my->sleep_pqueue.push_back(my->current); + std::push_heap( my->sleep_pqueue.begin(), + my->sleep_pqueue.end(), sleep_priority_less() ); + + my->start_next_fiber(); + my->current->resume_time = time_point::max(); + + my->check_fiber_exceptions(); + } + int thread::wait_any_until( fc::vector&& p, const time_point& timeout) { + for( size_t i = 0; i < p.size(); ++i ) { + if( p[i]->ready() ) return i; + } + + if( timeout < time_point::now() ) + BOOST_THROW_EXCEPTION( future_wait_timeout() ); + + if( !my->current ) { + my->current = new fc::context(&fc::thread::current()); + } + + for( uint32_t i = 0; i < p.size(); ++i ) { + my->current->add_blocking_promise(p[i].get(),false); + }; + + // if not max timeout, added to sleep pqueue + if( timeout != time_point::max() ) { + my->current->resume_time = timeout; + my->sleep_pqueue.push_back(my->current); + std::push_heap( my->sleep_pqueue.begin(), + my->sleep_pqueue.end(), + sleep_priority_less() ); + } + my->add_to_blocked( my->current ); + my->start_next_fiber(); + + for( auto i = p.begin(); i != p.end(); ++i ) { + my->current->remove_blocking_promise(i->get()); + } + + my->check_fiber_exceptions(); + + for( uint32_t i = 0; i < p.size(); ++i ) { + if( p[i]->ready() ) return i; + } + BOOST_THROW_EXCEPTION( wait_any_error() ); + return -1; + } + + void thread::async_task( task_base* t, const priority& p, const char* desc ) { + async_task( t, p, time_point::max(), desc ); + } + + void thread::async_task( task_base* t, const priority& p, const time_point& tp, const char* desc ) { + task_base* stale_head = my->task_in_queue.load(boost::memory_order_relaxed); + do { t->_next = stale_head; + }while( !my->task_in_queue.compare_exchange_weak( stale_head, t, boost::memory_order_release ) ); + + // Because only one thread can post the 'first task', only that thread will attempt + // to aquire the lock and therefore there should be no contention on this lock except + // when *this thread is about to block on a wait condition. + if( this != ¤t() && !stale_head ) { + boost::unique_lock lock(my->task_ready_mutex); + my->task_ready.notify_one(); + } + } + + void yield() { + thread::current().yield(); + } + void usleep( const microseconds& u ) { + thread::current().sleep_until( time_point::now() + u); + } + void sleep_until( const time_point& tp ) { + thread::current().sleep_until(tp); + } + + void exec() { + return thread::current().exec(); + } + + int wait_any( fc::vector&& v, const microseconds& timeout_us ) { + return thread::current().wait_any_until( fc::move(v), time_point::now() + timeout_us ); + } + int wait_any_until( fc::vector&& v, const time_point& tp ) { + return thread::current().wait_any_until( fc::move(v), tp ); + } + void thread::wait_until( promise_base::ptr&& p, const time_point& timeout ) { + if( p->ready() ) return; + if( timeout < time_point::now() ) + BOOST_THROW_EXCEPTION( future_wait_timeout() ); + + if( !my->current ) { + my->current = new fc::context(&fc::thread::current()); + } + + //slog( " %1% blocking on %2%", my->current, p.get() ); + my->current->add_blocking_promise(p.get(),true); + + // if not max timeout, added to sleep pqueue + if( timeout != time_point::max() ) { + my->current->resume_time = timeout; + my->sleep_pqueue.push_back(my->current); + std::push_heap( my->sleep_pqueue.begin(), + my->sleep_pqueue.end(), + sleep_priority_less() ); + } + + // elog( "blocking %1%", my->current ); + my->add_to_blocked( my->current ); + // my->debug("swtiching fibers..." ); + + + my->start_next_fiber(); + // slog( "resuming %1%", my->current ); + + //slog( " %1% unblocking blocking on %2%", my->current, p.get() ); + my->current->remove_blocking_promise(p.get()); + + my->check_fiber_exceptions(); + } + + void thread::notify( const promise_base::ptr& p ) { + BOOST_ASSERT(p->ready()); + if( ¤t() != this ) { + this->async( boost::bind( &thread::notify, this, p ) ); + return; + } + //slog( " notify task complete %1%", p.get() ); + //debug( "begin notify" ); + // TODO: store a list of blocked contexts with the promise + // to accelerate the lookup.... unless it introduces contention... + + // iterate over all blocked contexts + + + fc::context* cur_blocked = my->blocked; + fc::context* prev_blocked = 0; + while( cur_blocked ) { + // if the blocked context is waiting on this promise + // slog( "try unblock ctx %1% from prom %2%", cur_blocked, p.get() ); + if( cur_blocked->try_unblock( p.get() ) ) { + //slog( "unblock!" ); + // remove it from the blocked list. + + // remove this context from the sleep queue... + for( uint32_t i = 0; i < my->sleep_pqueue.size(); ++i ) { + if( my->sleep_pqueue[i] == cur_blocked ) { + my->sleep_pqueue[i]->blocking_prom.clear(); + my->sleep_pqueue[i] = my->sleep_pqueue.back(); + my->sleep_pqueue.pop_back(); + std::make_heap( my->sleep_pqueue.begin(),my->sleep_pqueue.end(), sleep_priority_less() ); + break; + } + } + auto cur = cur_blocked; + if( prev_blocked ) { + prev_blocked->next_blocked = cur_blocked->next_blocked; + cur_blocked = prev_blocked->next_blocked; + } else { + my->blocked = cur_blocked->next_blocked; + cur_blocked = my->blocked; + } + cur->next_blocked = 0; + my->ready_push_front( cur ); + } else { // goto the next blocked task + prev_blocked = cur_blocked; + cur_blocked = cur_blocked->next_blocked; + } + } + //debug( "end notify" ); + + + } + + +} diff --git a/src/thread_d.hpp b/src/thread_d.hpp new file mode 100644 index 0000000..dad5658 --- /dev/null +++ b/src/thread_d.hpp @@ -0,0 +1,432 @@ +#include +#include +#include + +#include +#include "context.hpp" +#include +#include +#include +#include +#include + +namespace fc { + struct sleep_priority_less { + bool operator()( const context::ptr& a, const context::ptr& b ) { + return a->resume_time > b->resume_time; + } + }; + class thread_d { + public: + thread_d(fc::thread& s) + :self(s), boost_thread(0), + task_in_queue(0), + done(false), + current(0), + pt_head(0), + ready_head(0), + ready_tail(0), + blocked(0) + { + static char cnt = 0; + name = fc::string("th_") + char('a'+cnt); + cnt++; + } + fc::thread& self; + boost::thread* boost_thread; + bc::stack_allocator stack_alloc; + boost::mutex task_ready_mutex; + boost::condition_variable task_ready; + + boost::atomic task_in_queue; + std::vector task_pqueue; + std::vector task_sch_queue; + std::vector sleep_pqueue; + std::vector free_list; + + bool done; + fc::string name; + fc::context* current; + + fc::context* pt_head; + + fc::context* ready_head; + fc::context* ready_tail; + + fc::context* blocked; + + + + void debug( const fc::string& s ) { + boost::unique_lock lock(log_mutex()); + + std::cerr<<"--------------------- "<cur_task ) std::cerr<<'('<cur_task->get_desc()<<')'; + std::cerr<<" ---------------------------\n"; + std::cerr<<" Ready\n"; + fc::context* c = ready_head; + while( c ) { + std::cerr<<" "<cur_task ) std::cerr<<'('<cur_task->get_desc()<<')'; + fc::context* p = c->caller_context; + while( p ) { + std::cerr<<" -> "<caller_context; + } + std::cerr<<"\n"; + c = c->next; + } + std::cerr<<" Blocked\n"; + c = blocked; + while( c ) { + std::cerr<<" ctx: "<< c; + if( c->cur_task ) std::cerr<<'('<cur_task->get_desc()<<')'; + std::cerr << " blocked on prom: "; + for( uint32_t i = 0; i < c->blocking_prom.size(); ++i ) { + std::cerr<blocking_prom[i].prom<<'('<blocking_prom[i].prom->get_desc()<<')'; + if( i + 1 < c->blocking_prom.size() ) { + std::cerr<<","; + } + } + + fc::context* p = c->caller_context; + while( p ) { + std::cerr<<" -> "<caller_context; + } + std::cerr<<"\n"; + c = c->next_blocked; + } + std::cerr<<"-------------------------------------------------\n"; + } + + // insert at from of blocked linked list + inline void add_to_blocked( fc::context* c ) { + c->next_blocked = blocked; + blocked = c; + } + + void pt_push_back(fc::context* c) { + c->next = pt_head; + pt_head = c; + /* + fc::context* n = pt_head; + int i = 0; + while( n ) { + ++i; + n = n->next; + } + wlog( "idle context...%2% %1%", c, i ); + */ + } + fc::context::ptr ready_pop_front() { + fc::context::ptr tmp = 0; + if( ready_head ) { + tmp = ready_head; + ready_head = tmp->next; + if( !ready_head ) + ready_tail = 0; + tmp->next = 0; + } + return tmp; + } + void ready_push_front( const fc::context::ptr& c ) { + c->next = ready_head; + ready_head = c; + if( !ready_tail ) + ready_tail = c; + } + void ready_push_back( const fc::context::ptr& c ) { + c->next = 0; + if( ready_tail ) { + ready_tail->next = c; + } else { + ready_head = c; + } + ready_tail = c; + } + struct task_priority_less { + bool operator()( task_base* a, task_base* b ) { + return a->_prio.value < b->_prio.value ? true : (a->_prio.value > b->_prio.value ? false : a->_posted_num > b->_posted_num ); + } + }; + struct task_when_less { + bool operator()( task_base* a, task_base* b ) { + return a->_when < b->_when; + } + }; + + void enqueue( task_base* t ) { + time_point now = time_point::now(); + task_base* cur = t; + while( cur ) { + if( cur->_when > now ) { + task_sch_queue.push_back(cur); + std::push_heap( task_sch_queue.begin(), + task_sch_queue.end(), task_when_less() ); + } else { + task_pqueue.push_back(cur); + BOOST_ASSERT( this == thread::current().my ); + std::push_heap( task_pqueue.begin(), + task_pqueue.end(), task_priority_less() ); + } + cur = cur->_next; + } + } + task_base* dequeue() { + // get a new task + BOOST_ASSERT( this == thread::current().my ); + + task_base* pending = 0; + + pending = task_in_queue.exchange(0,boost::memory_order_consume); + if( pending ) { enqueue( pending ); } + + task_base* p(0); + if( task_sch_queue.size() ) { + if( task_sch_queue.front()->_when <= time_point::now() ) { + p = task_sch_queue.front(); + std::pop_heap(task_sch_queue.begin(), task_sch_queue.end(), task_when_less() ); + task_sch_queue.pop_back(); + return p; + } + } + if( task_pqueue.size() ) { + p = task_pqueue.front(); + std::pop_heap(task_pqueue.begin(), task_pqueue.end(), task_priority_less() ); + task_pqueue.pop_back(); + } + return p; + } + + /** + * This should be before or after a context switch to + * detect quit/cancel operations and throw an exception. + */ + void check_fiber_exceptions() { + if( current && current->canceled ) { + BOOST_THROW_EXCEPTION( task_canceled() ); + } else if( done ) { + BOOST_THROW_EXCEPTION( thread_quit() ); + } + } + + /** + * Find the next available context and switch to it. + * If none are available then create a new context and + * have it wait for something to do. + */ + bool start_next_fiber( bool reschedule = false ) { + check_for_timeouts(); + if( !current ) current = new fc::context( &fc::thread::current() ); + + // check to see if any other contexts are ready + if( ready_head ) { + fc::context* next = ready_pop_front(); + BOOST_ASSERT( next != current ); + if( reschedule ) ready_push_back(current); + + // jump to next context, saving current context + fc::context* prev = current; + current = next; + bc::jump_fcontext( &prev->my_context, &next->my_context, 0 ); + current = prev; + BOOST_ASSERT( current ); + } else { // all contexts are blocked, create a new context + // that will process posted tasks... + if( reschedule ) ready_push_back(current); + + fc::context* next; + if( pt_head ) { + next = pt_head; + pt_head = pt_head->next; + next->next = 0; + } else { + next = new fc::context( &thread_d::start_process_tasks, stack_alloc, + &fc::thread::current() ); + } + fc::context* prev = current; + current = next; + bc::jump_fcontext( &prev->my_context, &next->my_context, (intptr_t)this ); + current = prev; + BOOST_ASSERT( current ); + } + + if( current->canceled ) + BOOST_THROW_EXCEPTION( task_canceled() ); + + return true; + } + + static void start_process_tasks( intptr_t my ) { + thread_d* self = (thread_d*)my; + try { + self->process_tasks(); + } catch ( ... ) { + std::cerr<<"fiber exited with uncaught exception:\n "<< + boost::current_exception_diagnostic_information() <free_list.push_back(self->current); + self->start_next_fiber( false ); + } + + bool run_next_task() { + check_for_timeouts(); + task_base* next = dequeue(); + if( next ) { + next->_set_active_context( current ); + current->cur_task = next; + next->run(); + current->cur_task = 0; + next->_set_active_context(0); + delete next; + return true; + } + return false; + } + bool has_next_task() { + if( task_pqueue.size() || + (task_sch_queue.size() && task_sch_queue.front()->_when <= time_point::now()) || + task_in_queue.load( boost::memory_order_relaxed ) ) + return true; + return false; + } + void clear_free_list() { + for( uint32_t i = 0; i < free_list.size(); ++i ) { + delete free_list[i]; + } + free_list.clear(); + } + void process_tasks() { + while( !done || blocked ) { + if( run_next_task() ) continue; + + // if I have something else to do other than + // process tasks... do it. + if( ready_head ) { + pt_push_back( current ); + start_next_fiber(false); + continue; + } + + clear_free_list(); + + { // lock scope + boost::unique_lock lock(task_ready_mutex); + if( has_next_task() ) continue; + time_point timeout_time = check_for_timeouts(); + + if( timeout_time == time_point::max() ) { + task_ready.wait( lock ); + } else if( timeout_time != time_point::min() ) { + task_ready.wait_until( lock, boost::chrono::system_clock::time_point() + + boost::chrono::microseconds(timeout_time.time_since_epoch().count()) ); + } + } + } + } + /** + * Return system_clock::time_point::min() if tasks have timed out + * Retunn system_clock::time_point::max() if there are no scheduled tasks + * Return the time the next task needs to be run if there is anything scheduled. + */ + time_point check_for_timeouts() { + if( !sleep_pqueue.size() && !task_sch_queue.size() ) { + return time_point::max(); + } + + + time_point next = time_point::max(); + if( task_sch_queue.size() && next > task_sch_queue.front()->_when ) + next = task_sch_queue.front()->_when; + if( sleep_pqueue.size() && next > sleep_pqueue.front()->resume_time ) + next = sleep_pqueue.front()->resume_time; + + time_point now = time_point::now(); + if( now < next ) { return next; } + + // move all expired sleeping tasks to the ready queue + while( sleep_pqueue.size() && sleep_pqueue.front()->resume_time < now ) { + fc::context::ptr c = sleep_pqueue.front(); + std::pop_heap(sleep_pqueue.begin(), sleep_pqueue.end(), sleep_priority_less() ); + sleep_pqueue.pop_back(); + + if( c->blocking_prom.size() ) { + c->timeout_blocking_promises(); + } + else { ready_push_back( c ); } + } + return time_point::min(); + } + + + void yield_until( const time_point& tp, bool reschedule ) { + check_fiber_exceptions(); + + if( tp <= time_point::now() ) + return; + + if( !current ) { + current = new fc::context(&fc::thread::current()); + } + + current->resume_time = tp; + current->clear_blocking_promises(); + + sleep_pqueue.push_back(current); + std::push_heap( sleep_pqueue.begin(), + sleep_pqueue.end(), sleep_priority_less() ); + + start_next_fiber(reschedule); + + // clear current context from sleep queue... + for( uint32_t i = 0; i < sleep_pqueue.size(); ++i ) { + if( sleep_pqueue[i] == current ) { + sleep_pqueue[i] = sleep_pqueue.back(); + sleep_pqueue.pop_back(); + std::make_heap( sleep_pqueue.begin(), + sleep_pqueue.end(), sleep_priority_less() ); + break; + } + } + + current->resume_time = time_point::max(); + check_fiber_exceptions(); + } + + void wait( const promise_base::ptr& p, const time_point& timeout ) { + if( p->ready() ) return; + if( timeout < time_point::now() ) + BOOST_THROW_EXCEPTION( future_wait_timeout() ); + + if( !current ) { + current = new fc::context(&fc::thread::current()); + } + + //slog( " %1% blocking on %2%", current, p.get() ); + current->add_blocking_promise(p.get(),true); + + // if not max timeout, added to sleep pqueue + if( timeout != time_point::max() ) { + current->resume_time = timeout; + sleep_pqueue.push_back(current); + std::push_heap( sleep_pqueue.begin(), + sleep_pqueue.end(), + sleep_priority_less() ); + } + + // elog( "blocking %1%", current ); + add_to_blocked( current ); + // debug("swtiching fibers..." ); + + + start_next_fiber(); + // slog( "resuming %1%", current ); + + //slog( " %1% unblocking blocking on %2%", current, p.get() ); + current->remove_blocking_promise(p.get()); + + check_fiber_exceptions(); + } + }; +} // namespace fc diff --git a/src/time.cpp b/src/time.cpp new file mode 100644 index 0000000..b55d767 --- /dev/null +++ b/src/time.cpp @@ -0,0 +1,9 @@ +#include +#include + +namespace fc { + namespace bch = boost::chrono; + time_point time_point::now() { + return time_point(microseconds(bch::duration_cast(bch::system_clock::now().time_since_epoch()).count())); + } +} diff --git a/src/value.cpp b/src/value.cpp new file mode 100644 index 0000000..98e991b --- /dev/null +++ b/src/value.cpp @@ -0,0 +1,375 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +FC_REFLECT( fc::value::member, (_val) ) + + +namespace fc { + value::member::member(){} + value::member::member( const char* c ) + :_key(c){ } + value::member::member( string&& c ) + :_key(fc::move(c)){ } + + const string& value::member::key()const { return *_key; } + value& value::member::val() { return *_val; } + const value& value::member::val()const { return *_val; } + + value::value() + :_obj(nullptr),_obj_type(nullptr){ slog( "%p", this ); } + + value::value( value&& v ) + :_obj(v._obj),_obj_type(v._obj_type) + { + slog( "move construct value" ); + v._obj_type = nullptr; + v._obj = nullptr; + } + + value::~value() { + slog( "~%p", this ); + if( nullptr != _obj_type ) { + if( _obj != nullptr ) { + slog( "obj_type %p", _obj_type ); + slog( "obj_type %s", _obj_type->name() ); + slog(".. obj type %p %s", _obj, _obj_type->name() ); + size_t s = _obj_type->size_of(); + if( s > sizeof(_obj) ) { + slog( "destroy! %p", _obj ); + _obj_type->destroy( _obj ); + } else { + slog( "destructor! %p", &_obj ); + _obj_type->destructor( &_obj ); + } + } + } + } + + value::value( const cref& v ) { + slog( "this:%p %s from cref" , this, v._reflector.name()); + _obj_type = &v._reflector; + size_t s = _obj_type->size_of(); + if( s > sizeof(_obj) ) { + slog( "construct %s heap of size %d",_obj_type->name(),_obj_type->size_of() ); + _obj = new char[_obj_type->size_of()]; + slog( "v._obj %p", v._obj ); + _obj_type->copy_construct( _obj, v._obj ); + } else { + slog( "construct %s in place %p type p %p", _obj_type->name(), _obj,_obj_type ); + _obj_type->copy_construct( &_obj, v._obj ); + } + } + + value::value( const value& v ) { + slog( "%p", this ); + // slog( "copy v %s", v.type()->name() ); + _obj_type = v._obj_type; + if( nullptr != _obj_type ) { + size_t s = _obj_type->size_of(); + if( s > sizeof(_obj) ) { + _obj = new char[_obj_type->size_of()]; + _obj_type->copy_construct( _obj, v._obj ); + } else { + _obj_type->copy_construct( &_obj, &v._obj ); + } + } + } + value& value::operator=( value&& v ) { + swap( v._obj, _obj); + swap( v._obj_type, _obj_type); + return *this; + if( v.type() == nullptr ) { + return *this; + } + slog( "move assign v %s", v.type()->name() ); + size_t s = _obj_type->size_of(); + if( s > sizeof(_obj) ) { + slog( "swap pointers to heap.." ); + fc::swap( _obj, v._obj ); + fc::swap( _obj_type, v._obj_type ); + } else { + slog( "move construct in place %p %s", this, v._obj_type->name() ); + int64_t tmp; + if( nullptr != _obj_type && nullptr != v._obj_type ) { + slog( "swaping objs %s and %s", _obj_type->name(), v._obj_type->name() ); + slog( "swaping objs %p and %p", &_obj, &v._obj ); + slog( "&tmp = %p", &tmp ); + _obj_type->move_construct( &tmp, &_obj ); + slog( "move to tmp" ); + v._obj_type->move_construct( &_obj, &v._obj ); + slog( "move to dest" ); + _obj_type->move_construct( &v._obj, &tmp ); + slog( "move to src" ); + } else { + fc::swap( _obj, v._obj ); + fc::swap( _obj_type, v._obj_type ); + } + } + + /* + value tmp(std::move(v)); + return *this; + size_t s = _obj_type->size_of(); + if( s > sizeof(_obj) ) { + slog( "" ); + fc::swap( _obj, v._obj ); + } else { + slog( "swap..." ); + void* tmp; + _obj_type->move( &tmp, &_obj ); + _obj_type->move( &_obj, &v._obj ); + _obj_type->move( &v._obj, &tmp ); + } + fc::swap( _obj_type, v._obj_type ); + */ + return *this; + } + value& value::operator=( const value& v ) { + slog( "assign copy" ); + value t(v); fc::swap(t,*this); + return *this; + } + value& value::operator=( const cref& v ) { + //slog( "assign copy this %p %p %s obj_type %s",_obj,_obj_type, v._reflector.name(),_obj_type->name() ); + //if( _obj_type != null_ptr ) { + //} + wlog( ".." ); + value t(v); + + wlog( "swap" ); + //swap( t._obj, _obj ); + //swap( t._obj_type, _obj_type ); + fc::swap(t,*this); + slog( "done swap" ); + return *this; + } + /** + * @pre value is null or an object + */ + value& value::operator[]( const string& key ) { + return (*this)[key.c_str()]; + } + + value& value::operator[]( string&& key ) { + if( is_null() ) { + *this = vector(1); + } + if( _obj_type == &reflector< vector >::instance() ) { + vector& vec = *static_cast*>(ptr()); + for( uint32_t i = 0; i < vec.size(); ++i ) { + if( vec[i].key() == key ) { return vec[i].val(); } + } + vec.push_back(member(fc::move(key))); + return vec.back().val(); + } + FC_THROW( bad_cast() ); + } + + + const value& value::operator[]( const string& key )const { + return (*this)[key.c_str()]; + } + + value& value::operator[]( const char* key ) { + if( is_null() ) { + *this = vector(); + } + if( _obj_type == &reflector >::instance() ) { + slog( "sizeof vector: %d", sizeof( vector ) ); + vector& v = *static_cast*>((void*)&_obj); + vector::iterator i = v.begin(); + while( i != v.end() ) { + // todo convert to string cmp to prevent temporary string?? + if( i->key() == key ) { + return i->val(); + } + ++i; + } + v.push_back( member( key ) ); + return v.back().val(); + } + // visit the native struct looking for key and return a ref to the value + // + // if not found, then convert native struct into vector and recurse + } + const value& value::operator[]( const char* key )const { + if( is_null() ) { + // TODO: throw! + } + if( _obj_type == &reflector >::instance() ) { + const vector& v = *static_cast*>((void*)&_obj); + vector::const_iterator i = v.begin(); + while( i != v.end() ) { + if( i->key() == key ) { + return i->val(); + } + ++i; + } + FC_THROW( range_error() ); + } + FC_THROW( bad_cast() ); + } + + value& value::operator[]( int index ) { + return (*this)[uint64_t(index)]; + } + value& value::operator[]( uint64_t index ) { + if( is_null() ) { + slog( "init from vector of size %d", index+1 ); + //static_assert( sizeof(_obj) >= sizeof(vector), "sanity check" ); + *this = vector(index+1); + //new (&_obj) vector(index+1); + //_obj_type = &reflector >::instance(); + } + if( _obj_type == &reflector >::instance() ) { + slog( "return ref to index..." ); + vector& v = *static_cast*>(ptr()); + if( v.size() <= index ) { v.resize(index+1); } + slog( "index %d vs size %d", index, v.size() ); + return v.at(index); + } + // visit the native struct looking for index... + // + // + } + const value& value::operator[]( uint64_t index )const { + if( is_null() ) { + // THROW + while(1) ; + } + if( _obj_type == &reflector >::instance() ) { + const vector& v = *static_cast*>(ptr()); + return v[index]; + } + // visit the native struct looking for index... throw if not found. + } + + bool value::key_exists( const string& key ) { + return key_exists(key.c_str()); + } + bool value::key_exists( const char* key ) { + return false; + } + bool value::is_array()const { + return _obj_type == &reflector >::instance(); + } + bool value::is_object()const { + return _obj_type == &reflector >::instance(); + } + bool value::is_null()const { + return _obj_type == nullptr; + } + bool value::is_string()const { + return _obj_type == &reflector::instance(); + } + bool value::is_float()const { + return _obj_type == &reflector::instance(); + } + bool value::is_double()const { + return _obj_type == &reflector::instance(); + } + bool value::is_real()const { + return is_float() || is_double(); + } + bool value::is_integer()const { + return false; + } + bool value::is_boolean()const { + return _obj_type == &reflector::instance(); + } + fwd,8> value::get_keys()const { + fwd,8> s; + return s; + } + + value& value::push_back( const value& v ) { + slog("here I go again... type %p %s", _obj_type, _obj_type ? _obj_type->name(): "null" ); + if( is_null() ) { + wlog( "converting this to vector..." ); + *this = vector(); + } + if( _obj_type == &reflector >::instance() ) { + vector& vec = *static_cast*>(ptr()); + vec.push_back(v); + } else { + FC_THROW( bad_cast() ); + } + return *this; + } + value& value::push_back( value&& v ) { + slog("here I go again... type %p %s", _obj_type, _obj_type ? _obj_type->name(): "null" ); + if( is_null() ) { + *this = vector(); + } + if( _obj_type == &reflector >::instance() ) { + vector& vec = *static_cast*>(ptr()); + vec.push_back(fc::move(v)); + } else { + FC_THROW( bad_cast() ); + } + return *this; + } + + void* value::ptr(){ + if( nullptr != _obj_type ) { + if( _obj_type->size_of() > sizeof(_obj) ) + return _obj; + return &_obj; + } + return nullptr; + } + const void* value::ptr()const { + if( _obj_type ) { + if( _obj_type->size_of() > sizeof(_obj) ) + return _obj; + return &_obj; + } + return nullptr; + } + + abstract_reflector* value::type()const { return _obj_type; } +} // namespace fc + + +namespace fc { + const char* reflector::name()const { return "value"; } + void reflector::visit( void* s, const abstract_visitor& v )const { + } + void reflector::visit( const void* s, const abstract_const_visitor& v )const { + const value& val = *((const value*)s); + if( val.is_null() ) { v.visit(); } + else if( val.is_array() ) { + const vector& vec = *static_cast*>(val.ptr()); + auto s = vec.size(); + auto e = vec.end(); + int idx = 0; + for( auto i = vec.begin(); i != e; ++i ) { + v.visit( idx, s, *i ); + ++idx; + } + } else if( val.is_object() ) { + const vector& vec = *static_cast*>(val.ptr()); + auto s = vec.size(); + auto e = vec.end(); + int idx = 0; + for( auto i = vec.begin(); i != e; ++i ) { + v.visit( i->key().c_str(), idx, s, i->val() ); + ++idx; + } + + } else { + slog( "val type %s", val.type()->name() ); + val.type()->visit(val.ptr(), v ); + } + } + reflector& reflector::instance() { static reflector inst; return inst; } +} // namespace fc + diff --git a/src/value_cast.cpp b/src/value_cast.cpp new file mode 100644 index 0000000..abaeff1 --- /dev/null +++ b/src/value_cast.cpp @@ -0,0 +1,95 @@ +#include +#include +#include +#include + + +namespace fc { + + #define CAST_VISITOR_IMPL(X) \ + void reinterpret_value_visitor::visit()const{\ + FC_THROW( bad_cast() );\ + } \ + void reinterpret_value_visitor::visit( const char& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const uint8_t& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const uint16_t& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const uint32_t& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const uint64_t& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const int8_t& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const int16_t& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const int32_t& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const int64_t& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const double& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const float& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const bool& c )const{ _s = c; } \ + void reinterpret_value_visitor::visit( const string& c )const{\ + _s = boost::lexical_cast( reinterpret_cast(c) ); \ + } \ + void reinterpret_value_visitor::visit( const char* member, int idx, int size, \ + const cref& v)const{\ + FC_THROW( bad_cast() );\ + }\ + void reinterpret_value_visitor::visit( int idx, int size, const cref& v)const{\ + FC_THROW( bad_cast() );\ + } + + CAST_VISITOR_IMPL(int64_t); + CAST_VISITOR_IMPL(int32_t); + CAST_VISITOR_IMPL(int16_t); + CAST_VISITOR_IMPL(int8_t); + CAST_VISITOR_IMPL(uint64_t); + CAST_VISITOR_IMPL(uint32_t); + CAST_VISITOR_IMPL(uint16_t); + CAST_VISITOR_IMPL(uint8_t); + CAST_VISITOR_IMPL(double); + CAST_VISITOR_IMPL(float); + CAST_VISITOR_IMPL(bool); + + #undef CAST_VISITOR_IMPL + + void reinterpret_value_visitor::visit()const{ + FC_THROW( bad_cast() ); + } + void reinterpret_value_visitor::visit( const char& c )const{ + slog("" ); + reinterpret_cast(_s) = boost::lexical_cast(c); + } + void reinterpret_value_visitor::visit( const uint8_t& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + void reinterpret_value_visitor::visit( const uint16_t& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + void reinterpret_value_visitor::visit( const uint32_t& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + void reinterpret_value_visitor::visit( const uint64_t& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + void reinterpret_value_visitor::visit( const int8_t& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + void reinterpret_value_visitor::visit( const int16_t& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + void reinterpret_value_visitor::visit( const int32_t& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + void reinterpret_value_visitor::visit( const int64_t& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + void reinterpret_value_visitor::visit( const double& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + void reinterpret_value_visitor::visit( const float& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + void reinterpret_value_visitor::visit( const bool& c )const{ + reinterpret_cast(_s) = boost::lexical_cast(c); } + + void reinterpret_value_visitor::visit( const string& c )const{ + slog( "" ); + _s = c; + } + void reinterpret_value_visitor::visit( const char* member, int idx, int size, + const cref& v)const{ + elog( "%s", member ); + FC_THROW( bad_cast() ); + } + void reinterpret_value_visitor::visit( int idx, int size, const cref& v)const{ + elog( "%d of %d", idx, size ); + FC_THROW( bad_cast() ); + } + + +} diff --git a/src/vector.cpp b/src/vector.cpp new file mode 100644 index 0000000..48b88e8 --- /dev/null +++ b/src/vector.cpp @@ -0,0 +1,186 @@ +#include + +namespace fc { + struct vector_impl_d { + abstract_value_type& _vtbl; + char* _data; + size_t _size; + size_t _capacity; + + vector_impl_d( abstract_value_type& r, size_t size, size_t cap= 0 ) + :_vtbl(r),_data(0),_size(size),_capacity(cap) { + if( _size > _capacity ) _capacity = _size; + if( _capacity ) { + const unsigned int so = _vtbl.size_of(); + _data = new char[_capacity*so]; + char* end = _data + _size *so; + for( char* idx = _data; idx < end; idx += so ) { + _vtbl.construct( idx ); + } + } + } + + vector_impl_d( const vector_impl_d& cpy ) + :_vtbl(cpy._vtbl),_data(0),_size(cpy._size),_capacity(cpy._size) { + if( _size ) { + _data = new char[_size*_vtbl.size_of()]; + copy_from( cpy._data, cpy._size ); + } + } + + void copy_from( char* src, int cnt ) { + const unsigned int so = _vtbl.size_of(); + char* end = _data + cnt * so; + char* cpy_idx = src; + for( char* idx = _data; idx < end; idx += so ) { + _vtbl.copy_construct( idx, cpy_idx ); + cpy_idx += so; + } + } + void move_from( char* src, int cnt ) { + const unsigned int so = _vtbl.size_of(); + char* end = _data + cnt * so; + char* cpy_idx = src; + for( char* idx = _data; idx < end; idx += so ) { + _vtbl.move_construct( idx, cpy_idx ); + cpy_idx += so; + } + } + void destruct( char* src, int cnt ) { + const unsigned int so = _vtbl.size_of(); + char* end = src + cnt * so; + for( char* idx = src; idx < end; idx += so ) { + _vtbl.destructor( idx ); + } + } + + ~vector_impl_d() { + clear(); + delete[] _data; + } + + void clear() { + const unsigned int so = _vtbl.size_of(); + char* end = _data + _size * so; + for( char* idx = _data; idx < end; idx += so ) { + _vtbl.destructor( idx ); + } + _size = 0; + } + }; + + vector_impl::vector_impl( abstract_value_type& r, size_t s ) { + my = new vector_impl_d(r,s); + } + + vector_impl::vector_impl( const vector_impl& cpy ) { + my = new vector_impl_d(*cpy.my); + } + + vector_impl::vector_impl( vector_impl&& cpy ){ + my = cpy.my; + cpy.my = 0; + } + + vector_impl::~vector_impl() { + delete my; + } + + vector_impl& vector_impl::operator=( const vector_impl& v ) { + clear(); + reserve(v.size()); + size_t s = v.size(); + for( size_t i = 0; i < s; ++i ) { + _push_back( v._at(i) ); + } + return *this; + } + vector_impl& vector_impl::operator=( vector_impl&& v ) { + fc::swap(my,v.my); + return *this; + } + + void vector_impl::_push_back( const void* v ) { + reserve( my->_size + 1 ); + my->_vtbl.copy_construct( _back(), v ); + my->_size++; + } + void vector_impl::_push_back_m( void* v ) { + reserve( my->_size + 1 ); + my->_vtbl.move_construct( _back(), v ); + my->_size++; + } + + void* vector_impl::_back() { + return my->_data + my->_vtbl.size_of() * (my->_size-1); + } + const void* vector_impl::_back()const { + return my->_data + my->_vtbl.size_of() * (my->_size-1); + } + + void* vector_impl::_at(size_t p) { + return my->_data + my->_vtbl.size_of() * p; + } + const void* vector_impl::_at(size_t p)const { + return my->_data + my->_vtbl.size_of() * p; + } + + void vector_impl::pop_back() { + my->_vtbl.destructor( _back() ); + my->_size--; + } + void vector_impl::clear() { + my->clear(); + } + + size_t vector_impl::size()const { + return my->_size; + } + + void vector_impl::reserve( size_t s ) { + if( s < my->_capacity ) { + return; + } + char* new_data = new char[s*my->_vtbl.size_of()]; + fc::swap(new_data,my->_data); + my->move_from(new_data,my->_size); + my->destruct(new_data,my->_size); + delete[] new_data; + } + void vector_impl::resize( size_t s ) { + if( s <= my->_size ) { + for( size_t i = s; i < my->_size; ++i ) { + my->_vtbl.destructor( _at(i) ); + } + my->_size = s; + return; + } + if( s <= my->_capacity ) { + + return; + } + const unsigned int so = my->_vtbl.size_of(); + char* new_data = new char[s*so]; + fc::swap(new_data,my->_data); + my->_capacity = s; + // move from old to new location + my->move_from(new_data,my->_size); + // destroy old location + my->destruct(new_data,my->_size); + delete[] new_data; + + // default construct any left overs. + char* cur = (char*)_back(); + char* end = (char*)my->_data + s * so; + while( cur < end ) { + my->_vtbl.construct(cur); + cur += so; + my->_size++; + } + } + + size_t vector_impl::capacity()const { + return my->_capacity; + } + +} // namespace fc