From e2e50173312ff1034805552ebf839da1bbfa7ec7 Mon Sep 17 00:00:00 2001 From: Paul Hollinsky Date: Mon, 10 Sep 2018 20:28:29 -0400 Subject: [PATCH] Initial commit --- .gitignore | 7 + .vscode/c_cpp_properties.json | 43 + CMakeLists.txt | 69 + HARDWARE.md | 14 + api/icsneoc/icsneoc.cpp | 130 + api/icsneoc/include/icsneoc.h | 109 + api/icsneocpp/icsneocpp.cpp | 11 + api/icsneocpp/include/icsneocpp.h | 13 + communication/communication.cpp | 105 + communication/icommunication.cpp | 42 + communication/include/communication.h | 59 + communication/include/icommunication.h | 43 + communication/include/messagecallback.h | 46 + communication/include/messagedecoder.h | 258 + communication/include/messagefilter.h | 72 + .../include/multichannelcommunication.h | 102 + communication/include/network.h | 340 ++ communication/message/include/canmessage.h | 15 + communication/message/include/message.h | 18 + communication/messagedecoder.cpp | 139 + communication/multichannelcommunication.cpp | 128 + device/device.cpp | 156 + device/devicefinder.cpp | 34 + device/include/device.h | 75 + device/include/devicefinder.h | 17 + device/include/neodevice.h | 25 + device/neovifire/include/neovifire.h | 55 + device/neovifire2/include/neovifire2.h | 31 + device/plasion/include/neoviion.h | 30 + device/plasion/include/neoviplasma.h | 30 + device/plasion/include/plasion.h | 19 + device/radstar2/include/radstar2.h | 32 + device/radsupermoon/include/radsupermoon.h | 34 + device/valuecan3/include/valuecan3.h | 31 + device/valuecan4/include/valuecan4.h | 32 + device/vividcan/include/vividcan.h | 34 + platform/include/dynamiclib.h | 12 + platform/include/ftdi.h | 14 + platform/include/registry.h | 10 + platform/include/stm32.h | 14 + platform/linux/ftdi.cpp | 125 + platform/linux/include/dynamiclib.h | 19 + platform/linux/include/ftdi.h | 51 + platform/linux/include/stm32.h | 31 + platform/linux/stm32.cpp | 283 + platform/windows/include/dynamiclib.h | 19 + platform/windows/include/ftdi.h | 16 + platform/windows/include/registry.h | 33 + platform/windows/include/stm32.h | 16 + platform/windows/include/vcp.h | 48 + platform/windows/registry.cpp | 68 + platform/windows/vcp.cpp | 329 ++ third-party/concurrentqueue/.gitignore | 26 + third-party/concurrentqueue/LICENSE.md | 61 + third-party/concurrentqueue/README.md | 486 ++ .../concurrentqueue/blockingconcurrentqueue.h | 981 ++++ third-party/concurrentqueue/concurrentqueue.h | 3635 +++++++++++++ .../internal/concurrentqueue_internal_debug.h | 87 + third-party/concurrentqueue/samples.md | 375 ++ third-party/libftdi/.gitignore | 53 + third-party/libftdi/AUTHORS | 79 + third-party/libftdi/CMakeLists.txt | 244 + third-party/libftdi/COPYING-CMAKE-SCRIPTS | 22 + third-party/libftdi/COPYING.GPL | 339 ++ third-party/libftdi/COPYING.LIB | 481 ++ third-party/libftdi/ChangeLog | 251 + third-party/libftdi/FindUSB1.cmake | 38 + third-party/libftdi/LICENSE | 25 + third-party/libftdi/README | 52 + third-party/libftdi/README.build | 96 + third-party/libftdi/README.mingw | 38 + third-party/libftdi/TODO | 3 + third-party/libftdi/cmake/FindConfuse.cmake | 74 + third-party/libftdi/cmake/FindLibintl.cmake | 47 + third-party/libftdi/cmake/FindUSB1.cmake | 37 + .../libftdi/cmake/LibFTDI1Config.cmake.in | 53 + .../cmake/LibFTDI1ConfigVersion.cmake.in | 31 + .../cmake/Toolchain-Crossbuild32.cmake | 4 + .../cmake/Toolchain-i686-w64-mingw32.cmake | 17 + .../libftdi/cmake/Toolchain-mingw32.cmake | 16 + .../cmake/Toolchain-x86_64-w64-mingw32.cmake | 17 + third-party/libftdi/cmake/UseLibFTDI1.cmake | 18 + third-party/libftdi/doc/Doxyfile.in | 2393 +++++++++ third-party/libftdi/doc/Doxyfile.xml.in | 26 + third-party/libftdi/doc/EEPROM-structure | 110 + third-party/libftdi/doc/astyle_reformat.sh | 5 + third-party/libftdi/doc/release-checklist.txt | 29 + third-party/libftdi/examples/CMakeLists.txt | 55 + third-party/libftdi/examples/baud_test.c | 224 + third-party/libftdi/examples/bitbang.c | 84 + third-party/libftdi/examples/bitbang2.c | 89 + third-party/libftdi/examples/bitbang_cbus.c | 94 + .../bitbang_cbus_eeprom_for_windows.ept | 62 + third-party/libftdi/examples/bitbang_ft2232.c | 106 + .../examples/cmake_example/CMakeLists.txt | 13 + .../libftdi/examples/cmake_example/main.c | 24 + third-party/libftdi/examples/eeprom.c | 299 ++ third-party/libftdi/examples/find_all.c | 54 + third-party/libftdi/examples/find_all_pp.cpp | 72 + third-party/libftdi/examples/serial_test.c | 179 + third-party/libftdi/examples/simple.c | 53 + third-party/libftdi/examples/stream_test.c | 358 ++ .../libftdi/ftdi_eeprom/CMakeLists.txt | 55 + third-party/libftdi/ftdi_eeprom/example.conf | 59 + .../ftdi_eeprom/ftdi_eeprom_version.h.in | 8 + third-party/libftdi/ftdi_eeprom/main.c | 666 +++ third-party/libftdi/ftdipp/CMakeLists.txt | 68 + third-party/libftdi/ftdipp/ftdi.cpp | 675 +++ third-party/libftdi/ftdipp/ftdi.hpp | 221 + third-party/libftdi/libftdi-1.0.kdev4 | 3 + third-party/libftdi/libftdi.lnt | 28 + third-party/libftdi/libftdi1-config.in | 79 + third-party/libftdi/libftdi1.pc.in | 11 + third-party/libftdi/libftdi1.spec.in | 100 + third-party/libftdi/libftdipp1.pc.in | 11 + third-party/libftdi/packages/99-libftdi.rules | 14 + third-party/libftdi/packages/CMakeLists.txt | 21 + third-party/libftdi/python/CMakeLists.txt | 81 + third-party/libftdi/python/doxy2swig.py | 457 ++ .../libftdi/python/examples/CMakeLists.txt | 5 + third-party/libftdi/python/examples/cbus.py | 173 + .../libftdi/python/examples/complete.py | 121 + third-party/libftdi/python/examples/simple.py | 34 + third-party/libftdi/python/ftdi1.i | 170 + third-party/libftdi/src/CMakeLists.txt | 55 + third-party/libftdi/src/ftdi.c | 4602 +++++++++++++++++ third-party/libftdi/src/ftdi.h | 585 +++ third-party/libftdi/src/ftdi_i.h | 143 + third-party/libftdi/src/ftdi_stream.c | 300 ++ third-party/libftdi/src/ftdi_version_i.h.in | 11 + third-party/libftdi/test/CMakeLists.txt | 38 + third-party/libftdi/test/basic.cpp | 33 + third-party/libftdi/test/baudrate.cpp | 269 + 133 files changed, 24597 insertions(+) create mode 100644 .gitignore create mode 100644 .vscode/c_cpp_properties.json create mode 100644 CMakeLists.txt create mode 100644 HARDWARE.md create mode 100644 api/icsneoc/icsneoc.cpp create mode 100644 api/icsneoc/include/icsneoc.h create mode 100644 api/icsneocpp/icsneocpp.cpp create mode 100644 api/icsneocpp/include/icsneocpp.h create mode 100644 communication/communication.cpp create mode 100644 communication/icommunication.cpp create mode 100644 communication/include/communication.h create mode 100644 communication/include/icommunication.h create mode 100644 communication/include/messagecallback.h create mode 100644 communication/include/messagedecoder.h create mode 100644 communication/include/messagefilter.h create mode 100644 communication/include/multichannelcommunication.h create mode 100644 communication/include/network.h create mode 100644 communication/message/include/canmessage.h create mode 100644 communication/message/include/message.h create mode 100644 communication/messagedecoder.cpp create mode 100644 communication/multichannelcommunication.cpp create mode 100644 device/device.cpp create mode 100644 device/devicefinder.cpp create mode 100644 device/include/device.h create mode 100644 device/include/devicefinder.h create mode 100644 device/include/neodevice.h create mode 100644 device/neovifire/include/neovifire.h create mode 100644 device/neovifire2/include/neovifire2.h create mode 100644 device/plasion/include/neoviion.h create mode 100644 device/plasion/include/neoviplasma.h create mode 100644 device/plasion/include/plasion.h create mode 100644 device/radstar2/include/radstar2.h create mode 100644 device/radsupermoon/include/radsupermoon.h create mode 100644 device/valuecan3/include/valuecan3.h create mode 100644 device/valuecan4/include/valuecan4.h create mode 100644 device/vividcan/include/vividcan.h create mode 100644 platform/include/dynamiclib.h create mode 100644 platform/include/ftdi.h create mode 100644 platform/include/registry.h create mode 100644 platform/include/stm32.h create mode 100644 platform/linux/ftdi.cpp create mode 100644 platform/linux/include/dynamiclib.h create mode 100644 platform/linux/include/ftdi.h create mode 100644 platform/linux/include/stm32.h create mode 100644 platform/linux/stm32.cpp create mode 100644 platform/windows/include/dynamiclib.h create mode 100644 platform/windows/include/ftdi.h create mode 100644 platform/windows/include/registry.h create mode 100644 platform/windows/include/stm32.h create mode 100644 platform/windows/include/vcp.h create mode 100644 platform/windows/registry.cpp create mode 100644 platform/windows/vcp.cpp create mode 100644 third-party/concurrentqueue/.gitignore create mode 100644 third-party/concurrentqueue/LICENSE.md create mode 100644 third-party/concurrentqueue/README.md create mode 100644 third-party/concurrentqueue/blockingconcurrentqueue.h create mode 100644 third-party/concurrentqueue/concurrentqueue.h create mode 100644 third-party/concurrentqueue/internal/concurrentqueue_internal_debug.h create mode 100644 third-party/concurrentqueue/samples.md create mode 100644 third-party/libftdi/.gitignore create mode 100644 third-party/libftdi/AUTHORS create mode 100644 third-party/libftdi/CMakeLists.txt create mode 100644 third-party/libftdi/COPYING-CMAKE-SCRIPTS create mode 100644 third-party/libftdi/COPYING.GPL create mode 100644 third-party/libftdi/COPYING.LIB create mode 100644 third-party/libftdi/ChangeLog create mode 100644 third-party/libftdi/FindUSB1.cmake create mode 100644 third-party/libftdi/LICENSE create mode 100644 third-party/libftdi/README create mode 100644 third-party/libftdi/README.build create mode 100644 third-party/libftdi/README.mingw create mode 100644 third-party/libftdi/TODO create mode 100644 third-party/libftdi/cmake/FindConfuse.cmake create mode 100644 third-party/libftdi/cmake/FindLibintl.cmake create mode 100644 third-party/libftdi/cmake/FindUSB1.cmake create mode 100644 third-party/libftdi/cmake/LibFTDI1Config.cmake.in create mode 100644 third-party/libftdi/cmake/LibFTDI1ConfigVersion.cmake.in create mode 100644 third-party/libftdi/cmake/Toolchain-Crossbuild32.cmake create mode 100644 third-party/libftdi/cmake/Toolchain-i686-w64-mingw32.cmake create mode 100644 third-party/libftdi/cmake/Toolchain-mingw32.cmake create mode 100644 third-party/libftdi/cmake/Toolchain-x86_64-w64-mingw32.cmake create mode 100644 third-party/libftdi/cmake/UseLibFTDI1.cmake create mode 100644 third-party/libftdi/doc/Doxyfile.in create mode 100644 third-party/libftdi/doc/Doxyfile.xml.in create mode 100644 third-party/libftdi/doc/EEPROM-structure create mode 100644 third-party/libftdi/doc/astyle_reformat.sh create mode 100644 third-party/libftdi/doc/release-checklist.txt create mode 100644 third-party/libftdi/examples/CMakeLists.txt create mode 100644 third-party/libftdi/examples/baud_test.c create mode 100644 third-party/libftdi/examples/bitbang.c create mode 100644 third-party/libftdi/examples/bitbang2.c create mode 100644 third-party/libftdi/examples/bitbang_cbus.c create mode 100644 third-party/libftdi/examples/bitbang_cbus_eeprom_for_windows.ept create mode 100644 third-party/libftdi/examples/bitbang_ft2232.c create mode 100644 third-party/libftdi/examples/cmake_example/CMakeLists.txt create mode 100644 third-party/libftdi/examples/cmake_example/main.c create mode 100644 third-party/libftdi/examples/eeprom.c create mode 100644 third-party/libftdi/examples/find_all.c create mode 100644 third-party/libftdi/examples/find_all_pp.cpp create mode 100644 third-party/libftdi/examples/serial_test.c create mode 100644 third-party/libftdi/examples/simple.c create mode 100644 third-party/libftdi/examples/stream_test.c create mode 100644 third-party/libftdi/ftdi_eeprom/CMakeLists.txt create mode 100644 third-party/libftdi/ftdi_eeprom/example.conf create mode 100644 third-party/libftdi/ftdi_eeprom/ftdi_eeprom_version.h.in create mode 100644 third-party/libftdi/ftdi_eeprom/main.c create mode 100644 third-party/libftdi/ftdipp/CMakeLists.txt create mode 100644 third-party/libftdi/ftdipp/ftdi.cpp create mode 100644 third-party/libftdi/ftdipp/ftdi.hpp create mode 100644 third-party/libftdi/libftdi-1.0.kdev4 create mode 100644 third-party/libftdi/libftdi.lnt create mode 100644 third-party/libftdi/libftdi1-config.in create mode 100644 third-party/libftdi/libftdi1.pc.in create mode 100644 third-party/libftdi/libftdi1.spec.in create mode 100644 third-party/libftdi/libftdipp1.pc.in create mode 100644 third-party/libftdi/packages/99-libftdi.rules create mode 100644 third-party/libftdi/packages/CMakeLists.txt create mode 100644 third-party/libftdi/python/CMakeLists.txt create mode 100644 third-party/libftdi/python/doxy2swig.py create mode 100644 third-party/libftdi/python/examples/CMakeLists.txt create mode 100644 third-party/libftdi/python/examples/cbus.py create mode 100644 third-party/libftdi/python/examples/complete.py create mode 100644 third-party/libftdi/python/examples/simple.py create mode 100644 third-party/libftdi/python/ftdi1.i create mode 100644 third-party/libftdi/src/CMakeLists.txt create mode 100644 third-party/libftdi/src/ftdi.c create mode 100644 third-party/libftdi/src/ftdi.h create mode 100644 third-party/libftdi/src/ftdi_i.h create mode 100644 third-party/libftdi/src/ftdi_stream.c create mode 100644 third-party/libftdi/src/ftdi_version_i.h.in create mode 100644 third-party/libftdi/test/CMakeLists.txt create mode 100644 third-party/libftdi/test/basic.cpp create mode 100644 third-party/libftdi/test/baudrate.cpp diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fc13cc1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +build/ +build*/ +.DS_Store +Thumbs.db +.vscode/settings.json +third-party/concurrentqueue/benchmarks +third-party/concurrentqueue/tests \ No newline at end of file diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json new file mode 100644 index 0000000..3f24bd1 --- /dev/null +++ b/.vscode/c_cpp_properties.json @@ -0,0 +1,43 @@ +{ + "configurations": [ + { + "name": "Win32", + "includePath": [ + ], + "defines": [ + "_WIN32", + "__WIN32", + "_DEBUG", + "UNICODE", + "_UNICODE" + ], + "windowsSdkVersion": "10.0.17134.0", + "compilerPath": "C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.14.26428/bin/Hostx64/x64/cl.exe", + "cStandard": "c11", + "cppStandard": "c++17", + "intelliSenseMode": "msvc-x64", + "configurationProvider": "vector-of-bool.cmake-tools", + "compileCommands": "${workspaceFolder}/build/compile_commands.json" + }, + { + "name": "Linux", + "includePath": [ + "/usr/include/**", + "/usr/local/include/**", + "${workspaceRoot}/**" + ], + "defines": [ + "_DEBUG", + "UNICODE", + "_UNICODE" + ], + "compilerPath": "/usr/bin/gcc", + "cStandard": "c11", + "cppStandard": "c++17", + "intelliSenseMode": "gcc-x64", + "configurationProvider": "vector-of-bool.cmake-tools", + "compileCommands": "${workspaceFolder}/build/compile_commands.json" + } + ], + "version": 4 +} \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..ad344ab --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,69 @@ +cmake_minimum_required(VERSION 3.2) +project(icsneonext VERSION 0.1.0) + +include(GNUInstallDirs) + +include_directories(${CMAKE_SOURCE_DIR}) + +# Enable Warnings +if(MSVC) + # Force to always compile with W4 + if(CMAKE_CXX_FLAGS MATCHES "/W[0-4]") + string(REGEX REPLACE "/W[0-4]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") + endif() +else() #if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wno-nested-anon-types -Wno-gnu-anonymous-struct -Wno-unknown-pragmas -Wno-zero-length-array -pedantic") +endif() + +# libftdi +if(NOT WIN32) + include_directories(${CMAKE_SOURCE_DIR} third-party/libftdi/src third-party/libftdi/ftdipp) + add_subdirectory(third-party/libftdi) +endif(NOT WIN32) + +if(WIN32) + file(GLOB PLATFORM_SRC ${CMAKE_SOURCE_DIR}/platform/windows/*.cpp) +else() + file(GLOB PLATFORM_SRC ${CMAKE_SOURCE_DIR}/platform/linux/*.cpp) +endif() + +set(COMMON_SRC + communication/messagedecoder.cpp + communication/multichannelcommunication.cpp + communication/communication.cpp + communication/icommunication.cpp + device/devicefinder.cpp + device/device.cpp +) + +set(SRC_FILES ${COMMON_SRC} ${PLATFORM_SRC}) + +add_library(icsneocpp + api/icsneocpp/icsneocpp.cpp + ${SRC_FILES} +) + +add_library(icsneoc SHARED + api/icsneoc/icsneoc.cpp + api/icsneocpp/icsneocpp.cpp + ${SRC_FILES} +) + +# libftdi +if(NOT WIN32) + find_package(Threads) + set_property(TARGET ftdi1-static PROPERTY POSITION_INDEPENDENT_CODE ON) + set_property(TARGET ftdipp1-static PROPERTY POSITION_INDEPENDENT_CODE ON) + target_link_libraries(icsneocpp ftdipp1-static) + target_link_libraries(icsneoc ftdipp1-static) + target_link_libraries(icsneocpp ftdi1-static) + target_link_libraries(icsneoc ftdi1-static) + target_link_libraries(icsneocpp ${CMAKE_THREAD_LIBS_INIT}) + target_link_libraries(icsneoc ${CMAKE_THREAD_LIBS_INIT}) +endif() + +set(CPACK_PROJECT_NAME ${PROJECT_NAME}) +set(CPACK_PROJECT_VERSION ${PROJECT_VERSION}) +include(CPack) diff --git a/HARDWARE.md b/HARDWARE.md new file mode 100644 index 0000000..56669fd --- /dev/null +++ b/HARDWARE.md @@ -0,0 +1,14 @@ +Hardware +========= + +STM32 devices + ValueCAN 4 + CAN 2.0 works + +FTDI devices + neoVI FIRE + HSCAN works + ValueCAN 3 + CAN works + RADStar 2 + CAN works \ No newline at end of file diff --git a/api/icsneoc/icsneoc.cpp b/api/icsneoc/icsneoc.cpp new file mode 100644 index 0000000..b2b0bea --- /dev/null +++ b/api/icsneoc/icsneoc.cpp @@ -0,0 +1,130 @@ +#ifndef __cplusplus +#error "icsneoc.cpp must be compiled with a C++ compiler!" +#endif + +#define ICSNEOC_MAKEDLL + +#include "include/icsneoc.h" +#include "api/icsneocpp/include/icsneocpp.h" +#include "platform/include/dynamiclib.h" +#include +#include +#include +#include +#include + +using namespace icsneo; + +// Holds references for the shared_ptrs so they do not get freed until we're ready +static std::vector> connectableFoundDevices, connectedDevices; + +// Any shared_ptrs we've let go should be placed here so they're not accessed +static std::vector freedDevices; + +void icsneoFindAllDevices(neodevice_t* devices, size_t* count) { + icsneoFreeUnconnectedDevices(); // Mark previous results as freed so they can no longer be connected to + auto foundDevices = icsneo::FindAllDevices(); + + auto inputSize = *count; + *count = foundDevices.size(); + auto outputSize = *count; + if(outputSize > inputSize) { + // TODO an error should be returned that the data was truncated + outputSize = inputSize; + } + + for(size_t i = 0; i < outputSize; i++) { + connectableFoundDevices.push_back(foundDevices[i]); + devices[i] = foundDevices[i]->getNeoDevice(); + } +} + +void icsneoFreeUnconnectedDevices() { + for(auto& devptr : connectableFoundDevices) { + freedDevices.push_back(devptr.get()); + } + connectableFoundDevices.clear(); +} + +bool icsneoSerialNumToString(uint32_t num, char* str, size_t* count) { + auto result = Device::SerialNumToString(num); + if(*count <= result.length()) { + *count = result.length() + 1; // This is how big of a buffer we need + return false; + } + strcpy(str, result.c_str()); // TODO bad + *count = result.length(); + return true; +} + +uint32_t icsneoSerialStringToNum(const char* str) { + return Device::SerialStringToNum(str); +} + +bool icsneoIsValidNeoDevice(const neodevice_t* device) { + // If this neodevice_t was returned by a previous search, it will no longer be valid (as the underlying icsneo::Device is freed) + return std::find(freedDevices.begin(), freedDevices.end(), device->device) == freedDevices.end(); +} + +bool icsneoOpenDevice(const neodevice_t* device) { + if(!icsneoIsValidNeoDevice(device)) + return false; + + if(!device->device->open()) + return false; + + // We connected successfully, move the device to connected devices + std::vector>::iterator> itemsToMove; + for(auto it = connectableFoundDevices.begin(); it < connectableFoundDevices.end(); it++) { + if((*it).get() == device->device) + itemsToMove.push_back(it); + } + for(auto it : itemsToMove) { + connectedDevices.push_back(*it); + connectableFoundDevices.erase(it); + } + + return true; +} + +bool icsneoCloseDevice(const neodevice_t* device) { + if(!icsneoIsValidNeoDevice(device)) + return false; + + if(!device->device->close()) + return false; + + // We disconnected successfully, free the device and mark it as freed + std::vector>::iterator> itemsToDelete; + for(auto it = connectedDevices.begin(); it < connectedDevices.end(); it++) { + if((*it).get() == device->device) + itemsToDelete.push_back(it); + } + for(auto it : itemsToDelete) + connectedDevices.erase(it); + + freedDevices.push_back(device->device); + + return true; +} + +bool icsneoGoOnline(const neodevice_t* device) { + if(!icsneoIsValidNeoDevice(device)) + return false; + + return device->device->goOnline(); +} + +bool icsneoGoOffline(const neodevice_t* device) { + if(!icsneoIsValidNeoDevice(device)) + return false; + + return device->device->goOffline(); +} + +bool icsneoIsOnline(const neodevice_t* device) { + if(!icsneoIsValidNeoDevice(device)) + return false; + + return device->device->isOnline(); +} \ No newline at end of file diff --git a/api/icsneoc/include/icsneoc.h b/api/icsneoc/include/icsneoc.h new file mode 100644 index 0000000..a734a46 --- /dev/null +++ b/api/icsneoc/include/icsneoc.h @@ -0,0 +1,109 @@ +#ifndef __ICSNEOC_H_ +#define __ICSNEOC_H_ + +#include +#include "device/include/neodevice.h" // For neodevice_t +#include "platform/include/dynamiclib.h" // Dynamic library loading and exporting + +#ifndef ICSNEOC_DYNAMICLOAD + +#ifdef __cplusplus +extern "C" { +#endif + +extern void DLLExport icsneoFindAllDevices(neodevice_t* devices, size_t* count); + +extern void DLLExport icsneoFreeUnconnectedDevices(); + +extern bool DLLExport icsneoSerialNumToString(uint32_t num, char* str, size_t* count); + +extern uint32_t DLLExport icsneoSerialStringToNum(const char* str); + +extern bool DLLExport icsneoIsValidNeoDevice(const neodevice_t* device); + +extern bool DLLExport icsneoOpenDevice(const neodevice_t* device); + +extern bool DLLExport icsneoCloseDevice(const neodevice_t* device); + +extern bool DLLExport icsneoGoOnline(const neodevice_t* device); + +extern bool DLLExport icsneoGoOffline(const neodevice_t* device); + +extern bool DLLExport icsneoIsOnline(const neodevice_t* device); + +#ifdef __cplusplus +} +#endif + +#else // ICSNEOC_DYNAMICLOAD + +typedef void(*fn_icsneoFindAllDevices)(neodevice_t* devices, size_t* count); +fn_icsneoFindAllDevices icsneoFindAllDevices; + +typedef void(*fn_icsneoFreeUnconnectedDevices)(); +fn_icsneoFreeUnconnectedDevices icsneoFreeUnconnectedDevices; + +typedef bool(*fn_icsneoSerialNumToString)(uint32_t num, char* str, size_t* count); +fn_icsneoSerialNumToString icsneoSerialNumToString; + +typedef uint32_t(*fn_icsneoSerialStringToNum)(const char* str); +fn_icsneoSerialStringToNum icsneoSerialStringToNum; + +typedef bool(*fn_icsneoIsValidNeoDevice)(const neodevice_t* device); +fn_icsneoIsValidNeoDevice icsneoIsValidNeoDevice; + +typedef bool(*fn_icsneoOpenDevice)(const neodevice_t* device); +fn_icsneoOpenDevice icsneoOpenDevice; + +typedef bool(*fn_icsneoCloseDevice)(const neodevice_t* device); +fn_icsneoCloseDevice icsneoCloseDevice; + +typedef bool(*fn_icsneoGoOnline)(const neodevice_t* device); +fn_icsneoGoOnline icsneoGoOnline; + +typedef bool(*fn_icsneoGoOffline)(const neodevice_t* device); +fn_icsneoGoOffline icsneoGoOffline; + +typedef bool(*fn_icsneoIsOnline)(const neodevice_t* device); +fn_icsneoIsOnline icsneoIsOnline; + +#define ICSNEO_IMPORT(func) func = (fn_##func)icsneoDynamicLibraryGetFunction(icsneoLibraryHandle, #func) +#define ICSNEO_IMPORTASSERT(func) if((ICSNEO_IMPORT(func)) == NULL) return 3 +void* icsneoLibraryHandle = NULL; +bool icsneoInitialized = false; +bool icsneoDestroyed = false; +int icsneoInit() { + icsneoDestroyed = false; + if(icsneoInitialized) + return 1; + + icsneoLibraryHandle = icsneoDynamicLibraryLoad(); + if(icsneoLibraryHandle == NULL) + return 2; + + ICSNEO_IMPORTASSERT(icsneoFindAllDevices); + ICSNEO_IMPORTASSERT(icsneoFreeUnconnectedDevices); + ICSNEO_IMPORTASSERT(icsneoSerialNumToString); + ICSNEO_IMPORTASSERT(icsneoSerialStringToNum); + ICSNEO_IMPORTASSERT(icsneoIsValidNeoDevice); + ICSNEO_IMPORTASSERT(icsneoOpenDevice); + ICSNEO_IMPORTASSERT(icsneoCloseDevice); + ICSNEO_IMPORTASSERT(icsneoGoOnline); + ICSNEO_IMPORTASSERT(icsneoGoOffline); + ICSNEO_IMPORTASSERT(icsneoIsOnline); + + icsneoInitialized = true; + return 0; +} + +bool icsneoClose() ICSNEO_DESTRUCTOR { + icsneoInitialized = false; + if(icsneoDestroyed) + return true; + + return icsneoDestroyed = icsneoDynamicLibraryClose(icsneoLibraryHandle); +} + +#endif // ICSNEOC_DYNAMICLOAD + +#endif // __ICSNEOC_H_ \ No newline at end of file diff --git a/api/icsneocpp/icsneocpp.cpp b/api/icsneocpp/icsneocpp.cpp new file mode 100644 index 0000000..fa950b5 --- /dev/null +++ b/api/icsneocpp/icsneocpp.cpp @@ -0,0 +1,11 @@ +#include +#include + +#include "include/icsneocpp.h" +#include "device/include/devicefinder.h" + +using namespace icsneo; + +std::vector> icsneo::FindAllDevices() { + return DeviceFinder::FindAll(); +} \ No newline at end of file diff --git a/api/icsneocpp/include/icsneocpp.h b/api/icsneocpp/include/icsneocpp.h new file mode 100644 index 0000000..3755dbf --- /dev/null +++ b/api/icsneocpp/include/icsneocpp.h @@ -0,0 +1,13 @@ +#ifndef __ICSNEOCPP_H_ +#define __ICSNEOCPP_H_ + +#include +#include + +#include "device/include/device.h" + +namespace icsneo { + std::vector> FindAllDevices(); +}; + +#endif \ No newline at end of file diff --git a/communication/communication.cpp b/communication/communication.cpp new file mode 100644 index 0000000..96c2e71 --- /dev/null +++ b/communication/communication.cpp @@ -0,0 +1,105 @@ +#include "communication/include/communication.h" +#include +#include +#include +#include +#include +#include "communication/include/messagedecoder.h" + +using namespace icsneo; + +int Communication::messageCallbackIDCounter = 1; + +uint8_t Communication::ICSChecksum(const std::vector& data) { + uint32_t checksum = 0; + for(auto i = 0; i < data.size(); i++) + checksum += data[i]; + checksum = ~checksum; + checksum++; + return (uint8_t)checksum; +} + +std::vector& Communication::packetWrap(std::vector& data, bool addChecksum) { + if(addChecksum) + data.push_back(ICSChecksum(data)); + data.insert(data.begin(), 0xAA); + if(align16bit && data.size() % 2 == 1) + data.push_back('A'); + return data; +} + +bool Communication::open() { + if(isOpen) + return true; + + spawnThreads(); + isOpen = true; + return impl->open(); +} + +void Communication::spawnThreads() { + readTaskThread = std::thread(&Communication::readTask, this); +} + +void Communication::joinThreads() { + if(readTaskThread.joinable()) + readTaskThread.join(); +} + +bool Communication::close() { + if(!isOpen) + return false; + + isOpen = false; + closing = true; + joinThreads(); + + return impl->close(); +} + +bool Communication::sendPacket(std::vector& bytes) { + return impl->write(Communication::packetWrap(bytes)); +} + +bool Communication::sendCommand(Communication::Command cmd, std::vector arguments) { + std::vector bytes; + bytes.push_back((uint8_t)cmd); + for(auto& b : arguments) + bytes.push_back(b); + bytes.insert(bytes.begin(), 0xB | ((uint8_t)bytes.size() << 4)); + return sendPacket(bytes); +} + +int Communication::addMessageCallback(const MessageCallback& cb) { + messageCallbacks.insert(std::make_pair(messageCallbackIDCounter, cb)); + return messageCallbackIDCounter++; +} + +bool Communication::removeMessageCallback(int id) { + try { + messageCallbacks.erase(id); + return true; + } catch(...) { + return false; + } +} + +void Communication::readTask() { + std::vector readBytes; + MessageDecoder decoder; + + while(!closing) { + readBytes.clear(); + if(impl->readWait(readBytes)) { + if(decoder.input(readBytes)) { + for(auto& msg : decoder.output()) { + for(auto& cb : messageCallbacks) { // We might have closed while reading or processing + if(!closing) { + cb.second.callIfMatch(msg); + } + } + } + } + } + } +} diff --git a/communication/icommunication.cpp b/communication/icommunication.cpp new file mode 100644 index 0000000..146cdf5 --- /dev/null +++ b/communication/icommunication.cpp @@ -0,0 +1,42 @@ +#include "communication/include/icommunication.h" + +using namespace icsneo; + +bool ICommunication::read(std::vector& bytes, size_t limit) { + // A limit of zero indicates no limit + if(limit == 0) + limit = (size_t)-1; + + if(limit > (readQueue.size_approx() + 4)) + limit = (readQueue.size_approx() + 4); + + if(bytes.capacity() < limit) + bytes.resize(limit); + + size_t actuallyRead = readQueue.try_dequeue_bulk(bytes.data(), limit); + + bytes.resize(actuallyRead); + + return true; +} + +bool ICommunication::readWait(std::vector& bytes, std::chrono::milliseconds timeout, size_t limit) { + // A limit of zero indicates no limit + if(limit == 0) + limit = (size_t)-1; + + if(limit > (readQueue.size_approx() + 4)) + limit = (readQueue.size_approx() + 4); + + bytes.resize(limit); + + size_t actuallyRead = readQueue.wait_dequeue_bulk_timed(bytes.data(), limit, timeout); + + bytes.resize(actuallyRead); + + return actuallyRead > 0; +} + +bool ICommunication::write(const std::vector& bytes) { + return writeQueue.enqueue(WriteOperation(bytes)); +} \ No newline at end of file diff --git a/communication/include/communication.h b/communication/include/communication.h new file mode 100644 index 0000000..bb5643d --- /dev/null +++ b/communication/include/communication.h @@ -0,0 +1,59 @@ +#ifndef __COMMUNICATION_H_ +#define __COMMUNICATION_H_ + +#include "communication/include/icommunication.h" +#include "communication/include/network.h" +#include "communication/include/messagecallback.h" +#include +#include +#include +#include +#include +#include + +namespace icsneo { + +class Communication { +public: + static uint8_t ICSChecksum(const std::vector& data); + + Communication(std::shared_ptr com) : impl(com) {} + virtual ~Communication() { close(); } + + bool open(); + bool close(); + virtual void spawnThreads(); + virtual void joinThreads(); + bool rawWrite(const std::vector& bytes) { return impl->write(bytes); } + std::vector& packetWrap(std::vector& data, bool addChecksum = true); + bool sendPacket(std::vector& bytes); + + enum class Command : uint8_t { + EnableNetworkCommunication = 0x07, + RequestSerialNumber = 0xA1 + }; + virtual bool sendCommand(Command cmd, bool boolean) { return sendCommand(cmd, std::vector({ (uint8_t)boolean })); } + virtual bool sendCommand(Command cmd, std::vector arguments = {}); + + int addMessageCallback(const MessageCallback& cb); + bool removeMessageCallback(int id); + + void setAlign16Bit(bool enable) { align16bit = enable; } + +protected: + std::shared_ptr impl; + static int messageCallbackIDCounter; + std::map messageCallbacks; + std::atomic closing{false}; + +private: + bool isOpen = false; + bool align16bit = true; // Not needed for Gigalog, Galaxy, etc and newer + + std::thread readTaskThread; + void readTask(); +}; + +}; + +#endif \ No newline at end of file diff --git a/communication/include/icommunication.h b/communication/include/icommunication.h new file mode 100644 index 0000000..5b1c06b --- /dev/null +++ b/communication/include/icommunication.h @@ -0,0 +1,43 @@ +#ifndef __ICOMMUNICATION_H_ +#define __ICOMMUNICATION_H_ + +#include +#include +#include +#include +#include "third-party/concurrentqueue/blockingconcurrentqueue.h" + +namespace icsneo { + +class ICommunication { +public: + virtual ~ICommunication() {} + virtual bool open() = 0; + virtual bool isOpen() = 0; + virtual bool close() = 0; + virtual bool read(std::vector& bytes, size_t limit = 0); + virtual bool readWait(std::vector& bytes, std::chrono::milliseconds timeout = std::chrono::milliseconds(100), size_t limit = 0); + virtual bool write(const std::vector& bytes); + +protected: + class WriteOperation { + public: + WriteOperation() {} + WriteOperation(std::vector b) { bytes = b; } + std::vector bytes; + }; + enum IOTaskState { + LAUNCH, + WAIT + }; + virtual void readTask() = 0; + virtual void writeTask() = 0; + moodycamel::BlockingConcurrentQueue readQueue; + moodycamel::BlockingConcurrentQueue writeQueue; + std::thread readThread, writeThread; + std::atomic closing{false}; +}; + +}; + +#endif \ No newline at end of file diff --git a/communication/include/messagecallback.h b/communication/include/messagecallback.h new file mode 100644 index 0000000..20ce11c --- /dev/null +++ b/communication/include/messagecallback.h @@ -0,0 +1,46 @@ +#ifndef __MESSAGECALLBACK_H_ +#define __MESSAGECALLBACK_H_ + +#include "communication/message/include/message.h" +#include "communication/include/messagefilter.h" +#include +#include +#include + +namespace icsneo { + +class MessageCallback { +public: + typedef std::function< void( std::shared_ptr ) > fn_messageCallback; + + MessageCallback(fn_messageCallback cb, std::shared_ptr f) : callback(cb), filter(f) {} + MessageCallback(fn_messageCallback cb, MessageFilter f = MessageFilter()) : callback(cb), filter(std::make_shared(f)) {} + + // Allow the filter to be placed first if the user wants (maybe in the case of a lambda) + MessageCallback(MessageFilter f, fn_messageCallback cb) { MessageCallback(cb, f); } + + virtual bool callIfMatch(const std::shared_ptr& message) const { + bool ret = filter->match(message); + if(ret) + callback(message); + return ret; + } + const MessageFilter& getFilter() const { return *filter; } + const fn_messageCallback& getCallback() const { return callback; } + +protected: + fn_messageCallback callback; + std::shared_ptr filter; +}; + +class CANMessageCallback : public MessageCallback { +public: + CANMessageCallback(fn_messageCallback cb, CANMessageFilter f = CANMessageFilter()) : MessageCallback(cb, std::make_shared(f)) {} + + // Allow the filter to be placed first if the user wants (maybe in the case of a lambda) + CANMessageCallback(CANMessageFilter f, fn_messageCallback cb) : MessageCallback(cb, std::make_shared(f)) {} +}; + +}; + +#endif \ No newline at end of file diff --git a/communication/include/messagedecoder.h b/communication/include/messagedecoder.h new file mode 100644 index 0000000..53f596f --- /dev/null +++ b/communication/include/messagedecoder.h @@ -0,0 +1,258 @@ +#ifndef __MESSAGEDECODER_H_ +#define __MESSAGEDECODER_H_ + +#include "communication/message/include/message.h" +#include "communication/message/include/canmessage.h" +#include "communication/include/network.h" +#include +#include +#include + +namespace icsneo { + +class MessageDecoder { +public: + bool input(const std::vector& bytes); + std::vector> output(); + +private: + enum class ReadState { + SearchForHeader, + ParseHeader, + ParseLongStylePacketHeader, + GetData + }; + ReadState state = ReadState::SearchForHeader; + + int currentIndex = 0; + int messageLength = 0; + int headerSize = 0; + bool checksum = false; + bool gotGoodMessages = false; // Tracks whether we've ever gotten a good message + Message message; + std::deque bytes; + + void processMessage(const Message& message); + + std::vector> processedMessages; + + typedef uint16_t icscm_bitfield; + struct CoreMiniMsg { + CANMessage toCANMessage(Network netid); + union { + uint16_t CxTRB0SID16; + struct + { + icscm_bitfield IDE : 1; + icscm_bitfield SRR : 1; + icscm_bitfield SID : 11; + icscm_bitfield NETWORKINDEX : 3;//DO NOT CLOBBER THIS + } CxTRB0SID; + struct + { + icscm_bitfield : 13; + icscm_bitfield EDL : 1; + icscm_bitfield BRS : 1; + icscm_bitfield ESI : 1; + } CxTRB0FD; + struct + { + icscm_bitfield ErrRxOnlyBreak : 1; + icscm_bitfield ErrRxOnlyBreakSync : 1; + icscm_bitfield ID : 11; + icscm_bitfield NETWORKINDEX : 3;//DO NOT CLOBBER THIS + } CxLIN3; + struct + { + uint8_t D8; + uint8_t options : 4; + uint8_t TXMSG : 1; + uint8_t NETWORKINDEX : 3;//DO NOT CLOBBER THIS + } C1xJ1850; + struct + { + uint8_t D8; + uint8_t options : 4; + uint8_t TXMSG : 1; + uint8_t NETWORKINDEX : 3;//DO NOT CLOBBER THIS + } C1xISO; + struct + { + uint8_t D8; + uint8_t options : 4; + uint8_t TXMSG : 1; + uint8_t NETWORKINDEX : 3;//DO NOT CLOBBER THIS + } C1xJ1708; + struct + { + icscm_bitfield FCS_AVAIL : 1; + icscm_bitfield RUNT_FRAME : 1; + icscm_bitfield DISABLE_PADDING : 1; + icscm_bitfield PREEMPTION_ENABLED : 1; + icscm_bitfield MPACKET_TYPE : 4; + icscm_bitfield MPACKET_FRAG_CNT : 2; + icscm_bitfield : 6; + } C1xETH; + struct + { + uint16_t ID : 11; + uint16_t STARTUP : 1; + uint16_t SYNC : 1; + uint16_t NULL_FRAME : 1; + uint16_t PAYLOAD_PREAMBLE : 1; + uint16_t RESERVED_0 : 1; + } C1xFlex; + struct + { + uint8_t daqType; + uint8_t ethDaqRes1; + } C1xETHDAQ; + }; + union { + uint16_t CxTRB0EID16; + struct + { + icscm_bitfield EID : 12; + icscm_bitfield TXMSG : 1; + icscm_bitfield TXAborted : 1; + icscm_bitfield TXLostArb : 1; + icscm_bitfield TXError : 1; + } CxTRB0EID; + struct + { + uint8_t LINByte9; + uint8_t ErrTxRxMismatch : 1; + uint8_t TxChkSumEnhanced : 1; + uint8_t TXMaster : 1; + uint8_t TXSlave : 1; + uint8_t ErrRxBreakNot0 : 1; + uint8_t ErrRxBreakTooShort : 1; + uint8_t ErrRxSyncNot55 : 1; + uint8_t ErrRxDataGreater8 : 1; + } CxLIN; + struct + { + uint8_t D9; + uint8_t D10; + } C2xJ1850; + struct + { + uint8_t D9; + uint8_t D10; + } C2xISO; + struct + { + uint8_t D9; + uint8_t D10; + } C2xJ1708; + struct + { + uint16_t txlen : 12; + uint16_t TXMSG : 1; + uint16_t : 3; + } C2xETH; + struct + { + uint16_t HDR_CRC_10 : 1; + uint16_t PAYLOAD_LEN : 7; + uint16_t RESERVED_1 : 4; + uint16_t TXMSG : 1; + uint16_t RESERVED_2 : 3; + } C2xFlex; + }; + union { + // For use by CAN + uint16_t CxTRB0DLC16; + struct + { + icscm_bitfield DLC : 4; + icscm_bitfield RB0 : 1; + icscm_bitfield IVRIF : 1; + icscm_bitfield HVEnable : 1;// must be cleared before passing into CAN driver + icscm_bitfield ExtendedNetworkIndexBit : 1;//DO NOT CLOBBER THIS + icscm_bitfield RB1 : 1; + icscm_bitfield RTR : 1; + icscm_bitfield EID : 6; + } CxTRB0DLC; + struct + { + icscm_bitfield len : 4; + icscm_bitfield ExtendedNetworkIndexBit2 : 1;//DO NOT CLOBBER THIS + icscm_bitfield UpdateSlaveOnce : 1; + icscm_bitfield HasUpdatedSlaveOnce : 1; + icscm_bitfield ExtendedNetworkIndexBit : 1;//DO NOT CLOBBER THIS + icscm_bitfield BusRecovered : 1; + icscm_bitfield SyncFerr : 1;//!< We got framing error in our sync byte. + icscm_bitfield MidFerr : 1;//!< We got framing error in our message id. + icscm_bitfield SlaveByteFerr : 1;//!< We got framing error in one of our slave bytes. + icscm_bitfield TxAborted : 1;//!< This transmit was aborted. + icscm_bitfield breakOnly : 1; + icscm_bitfield : 2; + } CxLIN2; + // For use by JVPW + struct + { + icscm_bitfield len : 4; + icscm_bitfield ExtendedNetworkIndexBit2 : 1;//DO NOT CLOBBER THIS + icscm_bitfield just_tx_timestamp : 1; + icscm_bitfield first_seg : 1; + icscm_bitfield ExtendedNetworkIndexBit : 1;// do not clobber ExtendedNetworkIndexBit + icscm_bitfield D11 : 8; + } C3xJ1850; + // For use by the ISO/KEYWORD + struct + { + icscm_bitfield len : 4; + icscm_bitfield ExtendedNetworkIndexBit2 : 1;//DO NOT CLOBBER THIS + icscm_bitfield FRM : 1; + icscm_bitfield INIT : 1; + icscm_bitfield ExtendedNetworkIndexBit : 1;// do not clobber ExtendedNetworkIndexBit + icscm_bitfield D11 : 8; + } C3xISO; + struct + { + icscm_bitfield len : 4; + icscm_bitfield ExtendedNetworkIndexBit2 : 1;//DO NOT CLOBBER THIS + icscm_bitfield FRM : 1; + icscm_bitfield : 1; + icscm_bitfield ExtendedNetworkIndexBit : 1;// do not clobber ExtendedNetworkIndexBit + icscm_bitfield pri : 8; + } C3xJ1708; + struct + { + uint16_t rsvd; + } C3xETH; + struct + { + uint16_t CYCLE : 6; + uint16_t HDR_CRC_9_0 : 10; + } C3xFlex; + }; + unsigned char CxTRB0Dall[8]; + union { + uint16_t CxTRB0STAT; + uint16_t J1850_TX_ID; + }; + union { + struct + { + uint32_t uiTimeStamp10uS; + union { + uint32_t uiTimeStamp10uSMSB; + struct + { + unsigned : 28; + unsigned res_0s : 3;// must be 0!!! + unsigned bIsExtended : 1;// always 1 for CoreMiniMsgExtended. + }; + }; + }; + int64_t uiTimeStampLarge; + uint8_t uiTimeStampBytes[8]; + }; + }; +}; + +} + +#endif \ No newline at end of file diff --git a/communication/include/messagefilter.h b/communication/include/messagefilter.h new file mode 100644 index 0000000..10be675 --- /dev/null +++ b/communication/include/messagefilter.h @@ -0,0 +1,72 @@ +#ifndef __MESSAGEFILTER_H_ +#define __MESSAGEFILTER_H_ + +#include "communication/include/network.h" +#include "communication/message/include/message.h" +#include "communication/message/include/canmessage.h" +#include + +namespace icsneo { + +class MessageFilter { +public: + MessageFilter() : matchAny(true) {} + MessageFilter(Network::Type type) : type(type) {} + MessageFilter(Network::NetID netid) : netid(netid) {} + virtual ~MessageFilter() {} + + virtual bool match(const std::shared_ptr& message) const { + if(matchAny) + return true; + if(!matchType(message->network.getType())) + return false; + if(!matchNetID(message->network.getNetID())) + return false; + return true; + } + +private: + bool matchAny = false; + + Network::Type type = Network::Type::Invalid; // Matching a type of invalid will match any + bool matchType(Network::Type mtype) const { + if(type == Network::Type::Invalid) + return true; + return type == mtype; + } + + Network::NetID netid = Network::NetID::Invalid; // Matching a netid of invalid will match any + bool matchNetID(Network::NetID mnetid) const { + if(netid == Network::NetID::Invalid) + return true; + return netid == mnetid; + } +}; + +class CANMessageFilter : public MessageFilter { +public: + CANMessageFilter() : MessageFilter(Network::Type::CAN), arbid(INVALID_ARBID) {} + CANMessageFilter(uint32_t arbid) : MessageFilter(Network::Type::CAN), arbid(arbid) {} + + bool match(const std::shared_ptr& message) const { + if(!MessageFilter::match(message)) + return false; + const auto canMessage = std::dynamic_pointer_cast(message); + if(canMessage == nullptr || !matchArbID(canMessage->arbid)) + return false; + return true; + } + +private: + static constexpr uint32_t INVALID_ARBID = 0xffffffff; + uint32_t arbid; + bool matchArbID(uint32_t marbid) const { + if(arbid == INVALID_ARBID) + return true; + return arbid == marbid; + } +}; + +}; + +#endif \ No newline at end of file diff --git a/communication/include/multichannelcommunication.h b/communication/include/multichannelcommunication.h new file mode 100644 index 0000000..43d915b --- /dev/null +++ b/communication/include/multichannelcommunication.h @@ -0,0 +1,102 @@ +#ifndef __MULTICHANNELCOMMUNICATION_H_ +#define __MULTICHANNELCOMMUNICATION_H_ + +#include "communication/include/communication.h" +#include "communication/include/icommunication.h" + +namespace icsneo { + +class MultiChannelCommunication : public Communication { +public: + MultiChannelCommunication(std::shared_ptr com) : Communication(com) {} + void spawnThreads(); + void joinThreads(); + bool sendCommand(Communication::Command cmd, std::vector arguments); + +protected: + bool preprocessPacket(std::deque& usbReadFifo); + +private: + enum class CommandType : uint8_t { + PlasmaReadRequest = 0x10, // Status read request to HSC + PlasmaStatusResponse = 0x11, // Status response by HSC + HostPC_to_Vnet1 = 0x20, // Host PC data to Vnet module-1 + Vnet1_to_HostPC = 0x21, // Vnet module-1 data to host PC + HostPC_to_Vnet2 = 0x30, // Host PC data to Vnet module-2 + Vnet2_to_HostPC = 0x31, // Vnet module-2 data to host PC + HostPC_to_Vnet3 = 0x40, // Host PC data to Vnet module-3 + Vnet3_to_HostPC = 0x41, // Vnet module-3 data to host PC + HostPC_to_SDCC1 = 0x50, // Host PC data to write to SDCC-1 + HostPC_from_SDCC1 = 0x51, // Host PC wants data read from SDCC-1 + SDCC1_to_HostPC = 0x52, // SDCC-1 data to host PC + HostPC_to_SDCC2 = 0x60, // Host PC data to write to SDCC-2 + HostPC_from_SDCC2 = 0x61, // Host PC wants data read from SDCC-2 + SDCC2_to_HostPC = 0x62, // SDCC-2 data to host PC + PC_to_LSOC = 0x70, // Host PC data to LSOCC + LSOCC_to_PC = 0x71, // LSOCC data to host PC + HostPC_to_Microblaze = 0x80, // Host PC data to microblaze processor + Microblaze_to_HostPC = 0x81 // Microblaze processor data to host PC + }; + static bool CommandTypeIsValid(CommandType cmd) { + switch(cmd) { + case CommandType::PlasmaReadRequest: + case CommandType::PlasmaStatusResponse: + case CommandType::HostPC_to_Vnet1: + case CommandType::Vnet1_to_HostPC: + case CommandType::HostPC_to_Vnet2: + case CommandType::Vnet2_to_HostPC: + case CommandType::HostPC_to_Vnet3: + case CommandType::Vnet3_to_HostPC: + case CommandType::HostPC_to_SDCC1: + case CommandType::HostPC_from_SDCC1: + case CommandType::SDCC1_to_HostPC: + case CommandType::HostPC_to_SDCC2: + case CommandType::HostPC_from_SDCC2: + case CommandType::SDCC2_to_HostPC: + case CommandType::PC_to_LSOC: + case CommandType::LSOCC_to_PC: + case CommandType::HostPC_to_Microblaze: + case CommandType::Microblaze_to_HostPC: + return true; + default: + return false; + } + } + static bool CommandTypeHasAddress(CommandType cmd) { + // Check CommandTypeIsValid before this, you will get false on an invalid command + switch(cmd) { + case CommandType::SDCC1_to_HostPC: + case CommandType::SDCC2_to_HostPC: + return true; + default: + return false; + } + } + static uint16_t CommandTypeDefinesLength(CommandType cmd) { + // Check CommandTypeIsValid before this, you will get 0 on an invalid command + switch(cmd) { + case CommandType::PlasmaStatusResponse: + return 2; + default: + return 0; // Length is defined by following bytes in message + } + } + + enum class PreprocessState { + SearchForCommand, + ParseAddress, + ParseLength, + GetData + }; + PreprocessState state = PreprocessState::SearchForCommand; + uint16_t currentCommandLength; + CommandType currentCommandType; + size_t currentReadIndex = 0; + + std::thread mainChannelReadThread; + void readTask(); +}; + +}; + +#endif \ No newline at end of file diff --git a/communication/include/network.h b/communication/include/network.h new file mode 100644 index 0000000..1bb5491 --- /dev/null +++ b/communication/include/network.h @@ -0,0 +1,340 @@ +#ifndef __NETWORKID_H_ +#define __NETWORKID_H_ + +#include +#include + +namespace icsneo { + +class Network { +public: + enum class NetID : uint16_t { + Device = 0, + HSCAN = 1, + MSCAN = 2, + SWCAN = 3, + LSFTCAN = 4, + FordSCP = 5, + J1708 = 6, + Aux = 7, + J1850VPW = 8, + ISO = 9, + ISOPIC = 10, + Main51 = 11, + RED = 12, + SCI = 13, + ISO2 = 14, + ISO14230 = 15, + LIN = 16, + OP_Ethernet1 = 17, + OP_Ethernet2 = 18, + OP_Ethernet3 = 19, + ISO3 = 41, + HSCAN2 = 42, + HSCAN3 = 44, + OP_Ethernet4 = 45, + OP_Ethernet5 = 46, + ISO4 = 47, + LIN2 = 48, + LIN3 = 49, + LIN4 = 50, + MOST = 51, + Red_App_Error = 52, + CGI = 53, + Reset_Status = 54, + FB_Status = 55, + App_Signal_Status = 56, + Read_Datalink_Cm_Tx_Msg = 57, + Read_Datalink_Cm_Rx_Msg = 58, + Logging_Overflow = 59, + Read_Settings_Ex = 60, + HSCAN4 = 61, + HSCAN5 = 62, + RS232 = 63, + UART = 64, + UART2 = 65, + UART3 = 66, + UART4 = 67, + SWCAN2 = 68, + Ethernet_DAQ = 69, + Data_To_Host = 70, + TextAPI_To_Host = 71, + OP_Ethernet6 = 73, + Red_VBat = 74, + OP_Ethernet7 = 75, + OP_Ethernet8 = 76, + OP_Ethernet9 = 77, + OP_Ethernet10 = 78, + OP_Ethernet11 = 79, + FlexRay1a = 80, + FlexRay1b = 81, + FlexRay2a = 82, + FlexRay2b = 83, + LIN5 = 84, + FlexRay = 85, + FlexRay2 = 86, + OP_Ethernet12 = 87, + MOST25 = 90, + MOST50 = 91, + MOST150 = 92, + Ethernet = 93, + GMFSA = 94, + TCP = 95, + HSCAN6 = 96, + HSCAN7 = 97, + LIN6 = 98, + LSFTCAN2 = 99, + HW_COM_Latency_Test = 512, + Device_Status = 513, + Invalid = 0xffff + }; + enum class Type { + CAN, + LIN, + FlexRay, + MOST, + Ethernet, + Other, + Invalid + }; + static constexpr const char* GetTypeString(Type type) { + switch(type) { + case Type::CAN: + return "CAN"; + case Type::LIN: + return "LIN"; + case Type::FlexRay: + return "FlexRay"; + case Type::MOST: + return "MOST"; + case Type::Other: + return "Other"; + case Type::Invalid: + default: + return "Invalid Type"; + } + } + static constexpr Type GetTypeOfNetID(NetID netid) { + switch(netid) { + case NetID::HSCAN: + case NetID::MSCAN: + case NetID::SWCAN: + case NetID::LSFTCAN: + case NetID::HSCAN2: + case NetID::HSCAN3: + case NetID::HSCAN4: + case NetID::HSCAN5: + case NetID::SWCAN2: + case NetID::HSCAN6: + case NetID::HSCAN7: + case NetID::LSFTCAN2: + return Type::CAN; + case NetID::LIN: + case NetID::LIN2: + case NetID::LIN3: + case NetID::LIN4: + case NetID::LIN5: + case NetID::LIN6: + return Type::LIN; + case NetID::FlexRay: + case NetID::FlexRay1a: + case NetID::FlexRay1b: + case NetID::FlexRay2: + case NetID::FlexRay2a: + case NetID::FlexRay2b: + return Type::FlexRay; + case NetID::MOST: + case NetID::MOST25: + case NetID::MOST50: + case NetID::MOST150: + return Type::MOST; + case NetID::Invalid: + return Type::Invalid; + default: + return Type::Other; + } + } + static constexpr const char* GetNetIDString(NetID netid) { + switch(netid) { + case NetID::Device: + return "Device"; + case NetID::HSCAN: + return "HSCAN"; + case NetID::MSCAN: + return "MSCAN"; + case NetID::SWCAN: + return "SWCAN"; + case NetID::LSFTCAN: + return "LSFTCAN"; + case NetID::FordSCP: + return "FordSCP"; + case NetID::J1708: + return "J1708"; + case NetID::Aux: + return "Aux"; + case NetID::J1850VPW: + return "J1850 VPW"; + case NetID::ISO: + return "ISO"; + case NetID::ISOPIC: + return "ISOPIC"; + case NetID::Main51: + return "Main51"; + case NetID::RED: + return "RED"; + case NetID::SCI: + return "SCI"; + case NetID::ISO2: + return "ISO 2"; + case NetID::ISO14230: + return "ISO 14230"; + case NetID::LIN: + return "LIN"; + case NetID::OP_Ethernet1: + return "Ethernet 1"; + case NetID::OP_Ethernet2: + return "Ethernet 2"; + case NetID::OP_Ethernet3: + return "Ethernet 3"; + case NetID::ISO3: + return "ISO 3"; + case NetID::HSCAN2: + return "HSCAN 2"; + case NetID::HSCAN3: + return "HSCAN 3"; + case NetID::OP_Ethernet4: + return "Ethernet 4"; + case NetID::OP_Ethernet5: + return "Ethernet 5"; + case NetID::ISO4: + return "ISO 4"; + case NetID::LIN2: + return "LIN 2"; + case NetID::LIN3: + return "LIN 3"; + case NetID::LIN4: + return "LIN 4"; + case NetID::MOST: + return "MOST"; + case NetID::Red_App_Error: + return "Red App Error"; + case NetID::CGI: + return "CGI"; + case NetID::Reset_Status: + return "Reset Status"; + case NetID::FB_Status: + return "FB Status"; + case NetID::App_Signal_Status: + return "App Signal Status"; + case NetID::Read_Datalink_Cm_Tx_Msg: + return "Read Datalink Cm Tx Msg"; + case NetID::Read_Datalink_Cm_Rx_Msg: + return "Read Datalink Cm Rx Msg"; + case NetID::Logging_Overflow: + return "Logging Overflow"; + case NetID::Read_Settings_Ex: + return "Read Settings Ex"; + case NetID::HSCAN4: + return "HSCAN 4"; + case NetID::HSCAN5: + return "HSCAN 5"; + case NetID::RS232: + return "RS232"; + case NetID::UART: + return "UART"; + case NetID::UART2: + return "UART 2"; + case NetID::UART3: + return "UART 3"; + case NetID::UART4: + return "UART 4"; + case NetID::SWCAN2: + return "SWCAN 2"; + case NetID::Ethernet_DAQ: + return "Ethernet DAQ"; + case NetID::Data_To_Host: + return "Data To Host"; + case NetID::TextAPI_To_Host: + return "TextAPI To Host"; + case NetID::OP_Ethernet6: + return "Ethernet 6"; + case NetID::Red_VBat: + return "Red VBat"; + case NetID::OP_Ethernet7: + return "Ethernet 7"; + case NetID::OP_Ethernet8: + return "Ethernet 8"; + case NetID::OP_Ethernet9: + return "Ethernet 9"; + case NetID::OP_Ethernet10: + return "Ethernet 10"; + case NetID::OP_Ethernet11: + return "Ethernet 11"; + case NetID::FlexRay1a: + return "FlexRay 1a"; + case NetID::FlexRay1b: + return "FlexRay 1b"; + case NetID::FlexRay2a: + return "FlexRay 2a"; + case NetID::FlexRay2b: + return "FlexRay 2b"; + case NetID::LIN5: + return "LIN 5"; + case NetID::FlexRay: + return "FlexRay"; + case NetID::FlexRay2: + return "FlexRay 2"; + case NetID::OP_Ethernet12: + return "Ethernet 12"; + case NetID::MOST25: + return "MOST25"; + case NetID::MOST50: + return "MOST50"; + case NetID::MOST150: + return "MOST150"; + case NetID::Ethernet: + return "Ethernet"; + case NetID::GMFSA: + return "GMFSA"; + case NetID::TCP: + return "TCP"; + case NetID::HSCAN6: + return "HSCAN 6"; + case NetID::HSCAN7: + return "HSCAN 7"; + case NetID::LIN6: + return "LIN 6"; + case NetID::LSFTCAN2: + return "LSFTCAN 2"; + case NetID::HW_COM_Latency_Test: + return "HW COM Latency Test"; + case NetID::Device_Status: + return "Device Status"; + case NetID::Invalid: + default: + return "Invalid Network"; + } + } + + Network() { setValue(NetID::Invalid); } + Network(uint16_t netid) { setValue((NetID)netid); } + Network(NetID netid) { setValue(netid); } + NetID getNetID() const { return value; } + Type getType() const { return type; } + friend std::ostream& operator<<(std::ostream& os, const Network& network) { + os << GetNetIDString(network.getNetID()); + return os; + } + +private: + NetID value; // Always use setValue so that value and type stay in sync + Type type; + void setValue(NetID id) { + value = id; + type = GetTypeOfNetID(value); + } +}; + +}; + +#endif \ No newline at end of file diff --git a/communication/message/include/canmessage.h b/communication/message/include/canmessage.h new file mode 100644 index 0000000..35aa877 --- /dev/null +++ b/communication/message/include/canmessage.h @@ -0,0 +1,15 @@ +#ifndef __CANMESSAGE_H_ +#define __CANMESSAGE_H_ + +#include "communication/message/include/message.h" + +namespace icsneo { + +class CANMessage : public Message { +public: + uint32_t arbid; +}; + +}; + +#endif \ No newline at end of file diff --git a/communication/message/include/message.h b/communication/message/include/message.h new file mode 100644 index 0000000..fab82c6 --- /dev/null +++ b/communication/message/include/message.h @@ -0,0 +1,18 @@ +#ifndef __MESSAGE_H_ +#define __MESSAGE_H_ + +#include "communication/include/network.h" +#include + +namespace icsneo { + +class Message { +public: + virtual ~Message() {} + Network network; + std::vector data; +}; + +}; + +#endif \ No newline at end of file diff --git a/communication/messagedecoder.cpp b/communication/messagedecoder.cpp new file mode 100644 index 0000000..fd5251a --- /dev/null +++ b/communication/messagedecoder.cpp @@ -0,0 +1,139 @@ +#include "communication/include/messagedecoder.h" +#include "communication/include/communication.h" +#include + +using namespace icsneo; + +CANMessage MessageDecoder::CoreMiniMsg::toCANMessage(Network network) { + CANMessage msg; + msg.network = network; + msg.arbid = CxTRB0SID.SID; + msg.data.reserve(CxTRB0DLC.DLC); + for(auto i = 0; i < CxTRB0DLC.DLC; i++) + msg.data.push_back(CxTRB0Dall[i]); + return msg; +} + +bool MessageDecoder::input(const std::vector& inputBytes) { + bool haveEnoughData = true; + bytes.insert(bytes.end(), inputBytes.begin(), inputBytes.end()); + + while(haveEnoughData) { + switch(state) { + case ReadState::SearchForHeader: + if(bytes.size() < 1) { + haveEnoughData = false; + break; + } + + if(bytes[0] == 0xAA) { // 0xAA denotes the beginning of a packet + state = ReadState::ParseHeader; + currentIndex = 1; + } else { + //std::cout << (int)bytes[0] << " "; + bytes.pop_front(); // Discard + } + break; + case ReadState::ParseHeader: + if(bytes.size() < 2) { + haveEnoughData = false; + break; + } + + messageLength = bytes[1] >> 4 & 0xf; // Upper nibble of the second byte denotes the message length + message.network = Network(bytes[1] & 0xf); // Lower nibble of the second byte is the network ID + if(messageLength == 0) { // A length of zero denotes a long style packet + state = ReadState::ParseLongStylePacketHeader; + checksum = false; + headerSize = 6; + } else { + state = ReadState::GetData; + checksum = true; + headerSize = 2; + messageLength += 2; // The message length given in short messages does not include header + } + currentIndex++; + break; + case ReadState::ParseLongStylePacketHeader: + if(bytes.size() < 6) { + haveEnoughData = false; + break; + } + + messageLength = bytes[2]; // Long messages have a little endian length on bytes 3 and 4 + messageLength |= bytes[3] << 8; + message.network = Network((bytes[5] << 8) | bytes[4]); // Long messages have their netid stored as little endian on bytes 5 and 6 + currentIndex += 4; + + /* Long messages can't have a length less than 4, because that would indicate a negative payload size. + * Unlike the short message length, the long message length encompasses everything from the 0xAA to the + * end of the payload. The short message length, for reference, only encompasses the length of the actual + * payload, and not the header or checksum. + */ + if(messageLength < 4 || messageLength > 4000) { + bytes.pop_front(); + //std::cout << "skipping long message with length " << messageLength << std::endl; + state = ReadState::SearchForHeader; + } else { + state = ReadState::GetData; + } + break; + case ReadState::GetData: + // We do not include the checksum in messageLength so it doesn't get copied into the payload buffer + if(bytes.size() < messageLength + (checksum ? 1 : 0)) { // Read until we have the rest of the message + haveEnoughData = false; + break; + } + + message.data.clear(); + if(messageLength > 0) + message.data.reserve(messageLength - headerSize); + + while(currentIndex < messageLength) + message.data.push_back(bytes[currentIndex++]); + + if(!checksum || bytes[currentIndex] == Communication::ICSChecksum(message.data)) { + // Got a good packet + gotGoodMessages = true; + processMessage(message); + for (auto i = 0; i < messageLength; i++) + bytes.pop_front(); + + } else { + if(gotGoodMessages) // Don't complain unless we've already gotten a good message, in case we started in the middle of a stream + std::cout << "Dropping message due to bad checksum" << std::endl; + bytes.pop_front(); // Drop the first byte so it doesn't get picked up again + } + + // Reset for the next packet + currentIndex = 0; + state = ReadState::SearchForHeader; + break; + } + } + + return processedMessages.size() > 0; +} + +std::vector> MessageDecoder::output() { + auto ret = std::move(processedMessages); + processedMessages = std::vector>(); // Reset the vector + return ret; +} + +void MessageDecoder::processMessage(const Message& msg) { + switch(msg.network.getType()) { + case Network::Type::CAN: + if(msg.data.size() >= 24) { + CoreMiniMsg* cmsg = (CoreMiniMsg*)msg.data.data(); + processedMessages.push_back(std::make_shared(cmsg->toCANMessage(msg.network))); + } else { + //std::cout << "bad CAN frame " << msg.data.size() << std::endl; + } + break; + default: + // if(msg.network.getNetID() != Network::NetID::Device) + // std::cout << "Message: " << msg.network << " with data length " << msg.data.size() << std::endl; + processedMessages.push_back(std::make_shared(msg)); + } +} \ No newline at end of file diff --git a/communication/multichannelcommunication.cpp b/communication/multichannelcommunication.cpp new file mode 100644 index 0000000..96bb354 --- /dev/null +++ b/communication/multichannelcommunication.cpp @@ -0,0 +1,128 @@ +#include "communication/include/multichannelcommunication.h" +#include "communication/include/messagedecoder.h" +#include +#include + +using namespace icsneo; + +void MultiChannelCommunication::spawnThreads() { + mainChannelReadThread = std::thread(&MultiChannelCommunication::readTask, this); +} + +void MultiChannelCommunication::joinThreads() { + if(mainChannelReadThread.joinable()) + mainChannelReadThread.join(); +} + +bool MultiChannelCommunication::sendCommand(Communication::Command cmd, std::vector arguments) { + std::vector bytes; + bytes.push_back((uint8_t)cmd); + for(auto& b : arguments) + bytes.push_back(b); + bytes.insert(bytes.begin(), 0xB | ((uint8_t)bytes.size() << 4)); + bytes = Communication::packetWrap(bytes); + bytes.insert(bytes.begin(), {(uint8_t)CommandType::HostPC_to_Vnet1, (uint8_t)bytes.size(), (uint8_t)(bytes.size() >> 8)}); + return rawWrite(bytes); +} + +void MultiChannelCommunication::readTask() { + bool readMore = true; + std::deque usbReadFifo; + std::vector readBytes; + std::vector payloadBytes; + MessageDecoder decoder; + + while(!closing) { + if(readMore) { + readBytes.clear(); + if(impl->readWait(readBytes)) { + readMore = false; + usbReadFifo.insert(usbReadFifo.end(), std::make_move_iterator(readBytes.begin()), std::make_move_iterator(readBytes.end())); + } + } else { + switch(state) { + case PreprocessState::SearchForCommand: + if(usbReadFifo.size() < 1) { + readMore = true; + continue; + } + + currentCommandType = (CommandType)usbReadFifo[0]; + + if(!CommandTypeIsValid(currentCommandType)) { + std::cout << "cnv" << std::hex << (int)currentCommandType << ' ' << std::dec; + usbReadFifo.pop_front(); + continue; + } + + currentReadIndex = 1; + + if(CommandTypeHasAddress(currentCommandType)) { + state = PreprocessState::ParseAddress; + continue; // No commands which define an address also define a length, so we can just continue from there + } + + currentCommandLength = CommandTypeDefinesLength(currentCommandType); + if(currentCommandLength == 0) { + state = PreprocessState::ParseLength; + continue; + } + + state = PreprocessState::GetData; + continue; + case PreprocessState::ParseAddress: + // The address is represented by a 4 byte little endian + // Don't care about it yet + currentReadIndex += 4; + // Intentionally fall through + case PreprocessState::ParseLength: + state = PreprocessState::ParseLength; // Set state in case we've fallen through, but later need to go around again + + if(usbReadFifo.size() < currentReadIndex + 2) { // Come back we have more data + readMore = true; + continue; + } + + // The length is represented by a 2 byte little endian + currentCommandLength = usbReadFifo[currentReadIndex++]; + currentCommandLength |= usbReadFifo[currentReadIndex++] << 8; + // Intentionally fall through + case PreprocessState::GetData: + state = PreprocessState::GetData; // Set state in case we've fallen through, but later need to go around again + + if(usbReadFifo.size() <= currentReadIndex + currentCommandLength) { // Come back we have more data + readMore = true; + continue; + } + + //std::cout << std::dec << "Got a multichannel message! Size: " << currentCommandLength << std::hex << std::setfill('0') << std::setw(2) << " Cmd: 0x" << (int)currentCommandType << std::endl; + for(auto i = 0; i < currentReadIndex; i++) + usbReadFifo.pop_front(); + + payloadBytes.clear(); + payloadBytes.reserve(currentCommandLength); + for(auto i = 0; i < currentCommandLength; i++) { + //std::cout << (int)usbReadFifo[0] << ' '; + payloadBytes.push_back(usbReadFifo[0]); + // if(i % 16 == 15) + // std::cout << std::endl; + usbReadFifo.pop_front(); + } + //std::cout << std::dec << std::endl; + + if(decoder.input(payloadBytes)) { + for(auto& msg : decoder.output()) { + for(auto& cb : messageCallbacks) { + if(!closing) { // We might have closed while reading or processing + cb.second.callIfMatch(msg); + } + } + } + } + + state = PreprocessState::SearchForCommand; + } + } + + } +} \ No newline at end of file diff --git a/device/device.cpp b/device/device.cpp new file mode 100644 index 0000000..f53ab0f --- /dev/null +++ b/device/device.cpp @@ -0,0 +1,156 @@ +#include "include/device.h" +#include "communication/include/messagecallback.h" +#include +#include +#include + +using namespace icsneo; + +static const uint8_t fromBase36Table[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 0, 0, 0, 0, 0, 0, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35 }; + +static const char toBase36Table[36] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', + 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z' }; + +static const uint32_t toBase36Powers[7] = { 1, 36, 1296, 46656, 1679616, 60466176, 2176782336 }; + +#define MIN_BASE36_SERIAL (16796160) +#define MAX_SERIAL (2176782335) + +std::string Device::SerialNumToString(uint32_t serial) { + if(serial == 0 || serial > MAX_SERIAL) + return "0"; + + std::stringstream ss; + if(serial >= MIN_BASE36_SERIAL) { + for (auto i = 5; i >= 0; i--) { + ss << toBase36Table[serial / toBase36Powers[i]]; + serial = serial % toBase36Powers[i]; + } + } else { + ss << serial; + } + return ss.str(); +} + +uint32_t Device::SerialStringToNum(const std::string& serial) { + if(Device::SerialStringIsNumeric(serial)) { + try { + return std::stoi(serial); + } catch(...) { + return 0; + } + } + + if(serial.length() != 6) + return 0; // Non-numeric serial numbers should be 6 characters + + uint32_t ret = 0; + for (auto i = 0; i < 6; i++) { + ret *= 36; + ret += fromBase36Table[(unsigned char)serial[i]]; + } + return ret; +} + +bool Device::SerialStringIsNumeric(const std::string& serial) { + if(serial.length() == 0) + return false; + + if(serial.length() == 1) + return isdigit(serial[0]); + + // Check the first two characters, at least one should be a character if we need to do a base36 conversion + return isdigit(serial[0]) && isdigit(serial[1]); +} + +void Device::enableMessagePolling() { + if(messagePollingCallbackID != 0) // We are already polling + return; + + messagePollingCallbackID = com->addMessageCallback(MessageCallback([this](std::shared_ptr message) { + pollingContainer.enqueue(message); + enforcePollingMessageLimit(); + })); +} + +bool Device::disableMessagePolling() { + if(messagePollingCallbackID == 0) + return true; // Not currently polling + + auto ret = com->removeMessageCallback(messagePollingCallbackID); + getMessages(); // Flush any messages still in the container + messagePollingCallbackID = 0; + return ret; +} + +std::vector> Device::getMessages() { + std::vector> ret; + getMessages(ret); + return ret; +} + +bool Device::getMessages(std::vector>& container, size_t limit) { + // A limit of zero indicates no limit + auto oglimit = limit; + if(limit == 0) + limit = (size_t)-1; + + if(limit > (pollingContainer.size_approx() + 4)) + limit = (pollingContainer.size_approx() + 4); + + if(container.capacity() < limit) + container.resize(limit); + + size_t actuallyRead = pollingContainer.try_dequeue_bulk(container.data(), limit); + + container.resize(actuallyRead); + + return actuallyRead <= oglimit; +} + +void Device::enforcePollingMessageLimit() { + while(pollingContainer.size_approx() > pollingMessageLimit) { + std::shared_ptr throwAway; + pollingContainer.try_dequeue(throwAway); + // TODO Flag an error for the user! + } +} + +bool Device::open() { + if(!com) + return false; + + return com->open(); +} + +bool Device::close() { + if(!com) + return false; + + return com->close(); +} + +bool Device::goOnline() { + if(!com->sendCommand(Communication::Command::EnableNetworkCommunication, true)) + return false; + + if(!com->sendCommand(Communication::Command::RequestSerialNumber)) + return false; + + com->addMessageCallback(CANMessageCallback([](std::shared_ptr message) { + std::shared_ptr canMessage = std::static_pointer_cast(message); + std::cout << "CAN 0x" << std::hex << canMessage->arbid << std::dec << " [" << canMessage->data.size() << "] " << std::hex; + for(const auto& b : canMessage->data) + std::cout << (int)b << ' '; + std::cout << std::dec << std::endl; + })); + + return online = true; +} + +bool Device::goOffline() { + return com->sendCommand(Communication::Command::EnableNetworkCommunication, false); +} \ No newline at end of file diff --git a/device/devicefinder.cpp b/device/devicefinder.cpp new file mode 100644 index 0000000..7438c60 --- /dev/null +++ b/device/devicefinder.cpp @@ -0,0 +1,34 @@ +#include "device/include/devicefinder.h" +#include "device/neovifire/include/neovifire.h" +#include "device/neovifire2/include/neovifire2.h" +#include "device/plasion/include/neoviion.h" +#include "device/plasion/include/neoviplasma.h" +#include "device/radstar2/include/radstar2.h" +#include "device/radsupermoon/include/radsupermoon.h" +#include "device/valuecan3/include/valuecan3.h" +#include "device/valuecan4/include/valuecan4.h" +#include "device/vividcan/include/vividcan.h" + +using namespace icsneo; + +std::vector> DeviceFinder::FindAll() { + std::vector> foundDevices; + std::vector>> findResults; + + findResults.push_back(NeoVIFIRE::Find()); + findResults.push_back(NeoVIFIRE2::Find()); + findResults.push_back(NeoVIION::Find()); + findResults.push_back(NeoVIPLASMA::Find()); + findResults.push_back(RADStar2::Find()); + findResults.push_back(RADSupermoon::Find()); + findResults.push_back(ValueCAN3::Find()); + findResults.push_back(ValueCAN4::Find()); + findResults.push_back(VividCAN::Find()); + + for(auto& results : findResults) { + if(results.size()) + foundDevices.insert(foundDevices.end(), std::make_move_iterator(results.begin()), std::make_move_iterator(results.end())); + } + + return foundDevices; +} \ No newline at end of file diff --git a/device/include/device.h b/device/include/device.h new file mode 100644 index 0000000..7dec685 --- /dev/null +++ b/device/include/device.h @@ -0,0 +1,75 @@ +#ifndef __DEVICE_H__ +#define __DEVICE_H__ + +#include +#include +#include +#include "device/include/neodevice.h" +#include "communication/include/communication.h" +#include "third-party/concurrentqueue/concurrentqueue.h" + +namespace icsneo { + +class Device { +public: + Device(neodevice_t neodevice = {}) { + data = neodevice; + data.device = this; + setProductName("undefined"); + } + virtual ~Device() { + disableMessagePolling(); + close(); + } + + static std::string SerialNumToString(uint32_t serial); + static uint32_t SerialStringToNum(const std::string& serial); + static bool SerialStringIsNumeric(const std::string& serial); + + std::string getProductName() const { return data.type; } + uint16_t getUSBProductId() const { return usbProductId; } + std::string getSerial() const { return data.serial; } + uint32_t getSerialNumber() const { return Device::SerialStringToNum(getSerial()); } + const neodevice_t& getNeoDevice() const { return data; } + + virtual bool open(); + virtual bool close(); + virtual bool isOnline() const { return online; } + virtual bool goOnline(); + virtual bool goOffline(); + + // Message polling related functions + void enableMessagePolling(); + bool disableMessagePolling(); + std::vector> getMessages(); + bool getMessages(std::vector>& container, size_t limit = 0); + size_t getPollingMessageLimit() { return pollingMessageLimit; } + void setPollingMessageLimit(size_t newSize) { + pollingMessageLimit = newSize; + enforcePollingMessageLimit(); + } + +protected: + uint16_t usbProductId = 0; + bool online = false; + int messagePollingCallbackID = 0; + std::shared_ptr com; + + neodevice_t& getWritableNeoDevice() { return data; } + void setProductName(const std::string& newName) { + #pragma warning( disable : 4996 ) + auto copied = newName.copy(data.type, sizeof(data.type) - 1); + data.type[copied] = '\0'; + } + +private: + neodevice_t data; + + size_t pollingMessageLimit = 20000; + moodycamel::ConcurrentQueue> pollingContainer; + void enforcePollingMessageLimit(); +}; + +}; + +#endif \ No newline at end of file diff --git a/device/include/devicefinder.h b/device/include/devicefinder.h new file mode 100644 index 0000000..0bfeb34 --- /dev/null +++ b/device/include/devicefinder.h @@ -0,0 +1,17 @@ +#ifndef __DEVICEFINDER_H_ +#define __DEVICEFINDER_H_ + +#include "device/include/device.h" +#include +#include + +namespace icsneo { + +class DeviceFinder { +public: + static std::vector> FindAll(); +}; + +}; + +#endif \ No newline at end of file diff --git a/device/include/neodevice.h b/device/include/neodevice.h new file mode 100644 index 0000000..bca52f7 --- /dev/null +++ b/device/include/neodevice.h @@ -0,0 +1,25 @@ +#ifndef __NEODEVICE_H_ +#define __NEODEVICE_H_ + +#include + +#ifdef __cplusplus +// A forward declaration is needed as there is a circular dependency +namespace icsneo { + class Device; +}; +typedef icsneo::Device* devicehandle_t; +#else +typedef void* devicehandle_t; +#endif + +typedef int32_t neodevice_handle_t; + +typedef struct { + devicehandle_t device; + neodevice_handle_t handle; + char serial[7]; + char type[64]; +} neodevice_t; + +#endif \ No newline at end of file diff --git a/device/neovifire/include/neovifire.h b/device/neovifire/include/neovifire.h new file mode 100644 index 0000000..bae223f --- /dev/null +++ b/device/neovifire/include/neovifire.h @@ -0,0 +1,55 @@ +#ifndef __NEOVIFIRE_H_ +#define __NEOVIFIRE_H_ + +#include "device/include/device.h" +#include "platform/include/ftdi.h" + +namespace icsneo { + +class NeoVIFIRE : public Device { +public: + static constexpr const char* PRODUCT_NAME = "neoVI FIRE"; + static constexpr const uint16_t USB_PRODUCT_ID = 0x0701; + NeoVIFIRE(neodevice_t neodevice) : Device(neodevice) { + com = std::make_shared(std::make_shared(getWritableNeoDevice())); + setProductName(PRODUCT_NAME); + usbProductId = USB_PRODUCT_ID; + } + + enum class Mode : char { + Application = 'A', + Bootloader = 'B' + }; + + bool goOnline() { + // Enter mode is only needed on very old FIRE devices, will be ignored by newer devices + if(!enterMode(Mode::Application)) + return false; + + return Device::goOnline(); + } + + bool enterMode(Mode mode) { + // Included for compatibility with bootloaders on very old FIRE devices + // Mode will be a uppercase char like 'A' + if(!com->rawWrite({ (uint8_t)mode })) + return false; + + // We then expect to see that same mode back in lowercase + // This won't happen in the case of new devices, though, so we assume it worked + return true; + } + + static std::vector> Find() { + std::vector> found; + + for(auto neodevice : FTDI::FindByProduct(USB_PRODUCT_ID)) + found.push_back(std::make_shared(neodevice)); + + return found; + } +}; + +}; + +#endif \ No newline at end of file diff --git a/device/neovifire2/include/neovifire2.h b/device/neovifire2/include/neovifire2.h new file mode 100644 index 0000000..c840a0a --- /dev/null +++ b/device/neovifire2/include/neovifire2.h @@ -0,0 +1,31 @@ +#ifndef __NEOVIFIRE2_H_ +#define __NEOVIFIRE2_H_ + +#include "device/include/device.h" +#include "platform/include/ftdi.h" + +namespace icsneo { + +class NeoVIFIRE2 : public Device { +public: + static constexpr const char* PRODUCT_NAME = "neoVI FIRE 2"; + static constexpr const uint16_t USB_PRODUCT_ID = 0x1000; + NeoVIFIRE2(neodevice_t neodevice) : Device(neodevice) { + com = std::make_shared(std::make_shared(getWritableNeoDevice())); + setProductName(PRODUCT_NAME); + usbProductId = USB_PRODUCT_ID; + } + + static std::vector> Find() { + std::vector> found; + + for(auto neodevice : FTDI::FindByProduct(USB_PRODUCT_ID)) + found.push_back(std::make_shared(neodevice)); + + return found; + } +}; + +}; + +#endif \ No newline at end of file diff --git a/device/plasion/include/neoviion.h b/device/plasion/include/neoviion.h new file mode 100644 index 0000000..c62aa6d --- /dev/null +++ b/device/plasion/include/neoviion.h @@ -0,0 +1,30 @@ +#ifndef __NEOVIION_H_ +#define __NEOVIION_H_ + +#include "device/plasion/include/plasion.h" +#include "platform/include/ftdi.h" + +namespace icsneo { + +class NeoVIION : public Plasion { +public: + static constexpr const char* PRODUCT_NAME = "neoVI ION"; + static constexpr const uint16_t USB_PRODUCT_ID = 0x0901; + NeoVIION(neodevice_t neodevice) : Plasion(neodevice) { + setProductName(PRODUCT_NAME); + usbProductId = USB_PRODUCT_ID; + } + + static std::vector> Find() { + std::vector> found; + + for(auto neodevice : FTDI::FindByProduct(USB_PRODUCT_ID)) + found.push_back(std::make_shared(neodevice)); + + return found; + } +}; + +}; + +#endif \ No newline at end of file diff --git a/device/plasion/include/neoviplasma.h b/device/plasion/include/neoviplasma.h new file mode 100644 index 0000000..a5f2561 --- /dev/null +++ b/device/plasion/include/neoviplasma.h @@ -0,0 +1,30 @@ +#ifndef __NEOVIPLASMA_H_ +#define __NEOVIPLASMA_H_ + +#include "device/plasion/include/plasion.h" +#include "platform/include/ftdi.h" + +namespace icsneo { + +class NeoVIPLASMA : public Plasion { +public: + static constexpr const char* PRODUCT_NAME = "neoVI PLASMA"; + static constexpr const uint16_t USB_PRODUCT_ID = 0x0801; + NeoVIPLASMA(neodevice_t neodevice) : Plasion(neodevice) { + setProductName(PRODUCT_NAME); + usbProductId = USB_PRODUCT_ID; + } + + static std::vector> Find() { + std::vector> found; + + for(auto neodevice : FTDI::FindByProduct(USB_PRODUCT_ID)) + found.push_back(std::make_shared(neodevice)); + + return found; + } +}; + +}; + +#endif \ No newline at end of file diff --git a/device/plasion/include/plasion.h b/device/plasion/include/plasion.h new file mode 100644 index 0000000..9fc3ad0 --- /dev/null +++ b/device/plasion/include/plasion.h @@ -0,0 +1,19 @@ +#ifndef __PLASION_H_ +#define __PLASION_H_ + +#include "device/include/device.h" +#include "communication/include/multichannelcommunication.h" +#include "platform/include/ftdi.h" + +namespace icsneo { + +class Plasion : public Device { +public: + Plasion(neodevice_t neodevice) : Device(neodevice) { + com = std::make_shared(std::make_shared(getWritableNeoDevice())); + } +}; + +}; + +#endif \ No newline at end of file diff --git a/device/radstar2/include/radstar2.h b/device/radstar2/include/radstar2.h new file mode 100644 index 0000000..33b9c1b --- /dev/null +++ b/device/radstar2/include/radstar2.h @@ -0,0 +1,32 @@ +#ifndef __RADSTAR2_H_ +#define __RADSTAR2_H_ + +#include "device/include/device.h" +#include "platform/include/ftdi.h" + +namespace icsneo { + +class RADStar2 : public Device { +public: + // Serial numbers start with RS + static constexpr const char* PRODUCT_NAME = "RADStar 2"; + static constexpr const uint16_t USB_PRODUCT_ID = 0x0005; + RADStar2(neodevice_t neodevice) : Device(neodevice) { + com = std::make_shared(std::make_shared(getWritableNeoDevice())); + setProductName(PRODUCT_NAME); + usbProductId = USB_PRODUCT_ID; + } + + static std::vector> Find() { + std::vector> found; + + for(auto neodevice : FTDI::FindByProduct(USB_PRODUCT_ID)) + found.push_back(std::make_shared(neodevice)); + + return found; + } +}; + +}; + +#endif \ No newline at end of file diff --git a/device/radsupermoon/include/radsupermoon.h b/device/radsupermoon/include/radsupermoon.h new file mode 100644 index 0000000..b688366 --- /dev/null +++ b/device/radsupermoon/include/radsupermoon.h @@ -0,0 +1,34 @@ +#ifndef __RADSUPERMOON_H_ +#define __RADSUPERMOON_H_ + +#include "device/include/device.h" +#include "platform/include/ftdi.h" + +namespace icsneo { + +class RADSupermoon : public Device { +public: + // Serial numbers start with VV + static constexpr const char* PRODUCT_NAME = "RADSupermoon"; + static constexpr const uint16_t USB_PRODUCT_ID = 0x1201; + RADSupermoon(neodevice_t neodevice) : Device(neodevice) { + com = std::make_shared(std::make_shared(getWritableNeoDevice())); + com->setAlign16Bit(false); + setProductName(PRODUCT_NAME); + usbProductId = USB_PRODUCT_ID; + } + // RSM does not connect at all yet (needs FTDI D3xx driver, not the 2xx compatible one) + + static std::vector> Find() { + std::vector> found; + + for(auto neodevice : FTDI::FindByProduct(USB_PRODUCT_ID)) + found.push_back(std::make_shared(neodevice)); + + return found; + } +}; + +}; + +#endif \ No newline at end of file diff --git a/device/valuecan3/include/valuecan3.h b/device/valuecan3/include/valuecan3.h new file mode 100644 index 0000000..6b68065 --- /dev/null +++ b/device/valuecan3/include/valuecan3.h @@ -0,0 +1,31 @@ +#ifndef __VALUECAN3_H_ +#define __VALUECAN3_H_ + +#include "device/include/device.h" +#include "platform/include/ftdi.h" + +namespace icsneo { + +class ValueCAN3 : public Device { +public: + static constexpr const char* PRODUCT_NAME = "ValueCAN 3"; + static constexpr const uint16_t USB_PRODUCT_ID = 0x0601; + ValueCAN3(neodevice_t neodevice) : Device(neodevice) { + com = std::make_shared(std::make_shared(getWritableNeoDevice())); + setProductName(PRODUCT_NAME); + usbProductId = USB_PRODUCT_ID; + } + + static std::vector> Find() { + std::vector> found; + + for(auto neodevice : FTDI::FindByProduct(USB_PRODUCT_ID)) + found.push_back(std::make_shared(neodevice)); + + return found; + } +}; + +}; + +#endif \ No newline at end of file diff --git a/device/valuecan4/include/valuecan4.h b/device/valuecan4/include/valuecan4.h new file mode 100644 index 0000000..e663f6d --- /dev/null +++ b/device/valuecan4/include/valuecan4.h @@ -0,0 +1,32 @@ +#ifndef __VALUECAN4_H_ +#define __VALUECAN4_H_ + +#include "device/include/device.h" +#include "platform/include/stm32.h" + +namespace icsneo { + +class ValueCAN4 : public Device { +public: + // Serial numbers are V0 for 4-4, VE for 4-2EL, V2 for 4-2, and V1 for 4-1 + static constexpr const char* PRODUCT_NAME = "ValueCAN 4"; + static constexpr const uint16_t USB_PRODUCT_ID = 0x1101; + ValueCAN4(neodevice_t neodevice) : Device(neodevice) { + com = std::make_shared(std::make_shared(getWritableNeoDevice())); + setProductName(PRODUCT_NAME); + usbProductId = USB_PRODUCT_ID; + } + + static std::vector> Find() { + std::vector> found; + + for(auto neodevice : STM32::FindByProduct(USB_PRODUCT_ID)) + found.push_back(std::make_shared(neodevice)); + + return found; + } +}; + +}; + +#endif \ No newline at end of file diff --git a/device/vividcan/include/vividcan.h b/device/vividcan/include/vividcan.h new file mode 100644 index 0000000..902bc0a --- /dev/null +++ b/device/vividcan/include/vividcan.h @@ -0,0 +1,34 @@ +#ifndef __VIVIDCAN_H_ +#define __VIVIDCAN_H_ + +#include "device/include/device.h" +#include "platform/include/stm32.h" + +namespace icsneo { + +class VividCAN : public Device { +public: + // Serial numbers start with VV + static constexpr const char* PRODUCT_NAME = "VividCAN"; + static constexpr const uint16_t USB_PRODUCT_ID = 0x1102; + VividCAN(neodevice_t neodevice) : Device(neodevice) { + com = std::make_shared(std::make_shared(getWritableNeoDevice())); + setProductName(PRODUCT_NAME); + usbProductId = USB_PRODUCT_ID; + } + + bool goOnline() { return false; } + + static std::vector> Find() { + std::vector> found; + + for(auto neodevice : STM32::FindByProduct(USB_PRODUCT_ID)) + found.push_back(std::make_shared(neodevice)); + + return found; + } +}; + +}; + +#endif \ No newline at end of file diff --git a/platform/include/dynamiclib.h b/platform/include/dynamiclib.h new file mode 100644 index 0000000..4152230 --- /dev/null +++ b/platform/include/dynamiclib.h @@ -0,0 +1,12 @@ +#ifndef __DYNAMICLIB_H_ +#define __DYNAMICLIB_H_ + +#if defined _WIN32 +#include "platform/windows/include/dynamiclib.h" +#elif defined __linux__ +#include "platform/linux/include/dynamiclib.h" +#else +#warning "This platform is not supported by the dynamic library driver" +#endif + +#endif \ No newline at end of file diff --git a/platform/include/ftdi.h b/platform/include/ftdi.h new file mode 100644 index 0000000..b35d465 --- /dev/null +++ b/platform/include/ftdi.h @@ -0,0 +1,14 @@ +#ifndef __FTDI_H_ +#define __FTDI_H_ + +#define INTREPID_USB_VENDOR_ID (0x093c) + +#if defined _WIN32 +#include "platform/windows/include/ftdi.h" +#elif defined __linux__ +#include "platform/linux/include/ftdi.h" +#else +#warning "This platform is not supported by the FTDI driver" +#endif + +#endif \ No newline at end of file diff --git a/platform/include/registry.h b/platform/include/registry.h new file mode 100644 index 0000000..0995e49 --- /dev/null +++ b/platform/include/registry.h @@ -0,0 +1,10 @@ +#ifndef __REGISTRY_H_ +#define __REGISTRY_H_ + +#if defined _WIN32 +#include "platform/windows/include/registry.h" +#else +#warning "This platform is not supported by the registry driver" +#endif + +#endif \ No newline at end of file diff --git a/platform/include/stm32.h b/platform/include/stm32.h new file mode 100644 index 0000000..ef2ecc1 --- /dev/null +++ b/platform/include/stm32.h @@ -0,0 +1,14 @@ +#ifndef __STM32_H_ +#define __STM32_H_ + +#define INTREPID_USB_VENDOR_ID (0x093c) + +#if defined _WIN32 +#include "platform/windows/include/stm32.h" +#elif defined __linux__ +#include "platform/linux/include/stm32.h" +#else +#warning "This platform is not supported by the STM32 driver" +#endif + +#endif \ No newline at end of file diff --git a/platform/linux/ftdi.cpp b/platform/linux/ftdi.cpp new file mode 100644 index 0000000..ec4ae2e --- /dev/null +++ b/platform/linux/ftdi.cpp @@ -0,0 +1,125 @@ +#include "platform/linux/include/ftdi.h" +#include "platform/include/ftdi.h" +#include +#include +#include +#include + +using namespace icsneo; + +// Instantiate static variables +neodevice_handle_t FTDI::handleCounter = 1; +Ftdi::Context FTDI::context; +std::vector FTDI::searchResultDevices; + +/* Theory: Ftdi::List::find_all gives us back Ftdi::Context objects, but these can't be passed + * back and forth with C nicely. So we wrap the Ftdi::Context objects in FTDIDevice classes which + * will give it a nice neodevice_handle_t handle that we can reference it by. These FTDIDevice objects are + * stored in searchResultDevices, and then moved into the instantiated FTDI class by the constructor. + */ +std::vector FTDI::FindByProduct(int product) { + constexpr size_t deviceSerialBufferLength = sizeof(device.serial); + std::vector found; + + auto devlist = std::unique_ptr(Ftdi::List::find_all(context, INTREPID_USB_VENDOR_ID, product)); + searchResultDevices.clear(); + for(auto it = devlist->begin(); it != devlist->end(); it++) + searchResultDevices.push_back(*it); // The upconversion to FTDIDevice will assign a handle + + for(auto& dev : searchResultDevices) { + neodevice_t d; + auto& serial = dev.serial(); + strncpy(d.serial, serial.c_str(), deviceSerialBufferLength - 1); + d.serial[deviceSerialBufferLength - 1] = '\0'; // strncpy does not write a null terminator if serial is too long + d.handle = dev.handle; + found.push_back(d); + } + + return found; +} + +bool FTDI::IsHandleValid(neodevice_handle_t handle) { + for(auto& dev : searchResultDevices) { + if(dev.handle != handle) + continue; + + return true; + } + return false; +} + +bool FTDI::GetDeviceForHandle(neodevice_handle_t handle, FTDIDevice& device) { + for(auto& dev : searchResultDevices) { + if(dev.handle != handle) + continue; + + device = dev; + return true; + } + return false; +} + +FTDI::FTDI(neodevice_t& forDevice) : device(forDevice) { + openable = GetDeviceForHandle(forDevice.handle, ftdiDevice); +} + +bool FTDI::open() { + if(isOpen() || !openable) + return false; + + if(ftdiDevice.open()) + return false; + + ftdiDevice.set_usb_read_timeout(100); + ftdiDevice.set_usb_write_timeout(1000); + ftdiDevice.reset(); + ftdiDevice.set_baud_rate(500000); + ftdiDevice.flush(); + + // Create threads + closing = false; + readThread = std::thread(&FTDI::readTask, this); + writeThread = std::thread(&FTDI::writeTask, this); + + return true; +} + +bool FTDI::close() { + if(!isOpen()) + return false; + + closing = true; + + if(readThread.joinable()) + readThread.join(); + + if(writeThread.joinable()) + writeThread.join(); + + ftdiDevice.set_dtr(false); + + if(ftdiDevice.close()) + return false; + + return true; +} + +void FTDI::readTask() { + constexpr size_t READ_BUFFER_SIZE = 8; + uint8_t readbuf[READ_BUFFER_SIZE]; + while(!closing) { + auto readBytes = ftdiDevice.read(readbuf, READ_BUFFER_SIZE); + if(readBytes > 0) + readQueue.enqueue_bulk(readbuf, readBytes); + } +} + +void FTDI::writeTask() { + WriteOperation writeOp; + while(!closing) { + if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100))) + continue; + + ftdiDevice.write(writeOp.bytes.data(), (int)writeOp.bytes.size()); + } +} \ No newline at end of file diff --git a/platform/linux/include/dynamiclib.h b/platform/linux/include/dynamiclib.h new file mode 100644 index 0000000..5a0e6ee --- /dev/null +++ b/platform/linux/include/dynamiclib.h @@ -0,0 +1,19 @@ +#ifndef __DYNAMICLIB_H_LINUX_ +#define __DYNAMICLIB_H_LINUX_ + +#include + +// Nothing special is needed to export +#define DLLExport + +// #ifndef ICSNEO_NO_AUTO_DESTRUCT +// #define ICSNEO_DESTRUCTOR __attribute__((destructor)); +// #else +#define ICSNEO_DESTRUCTOR +// #endif + +#define icsneoDynamicLibraryLoad() dlopen("/media/paulywog/Windows 10/Users/phollinsky/Code/icsneonext/build/libicsneoc.so", RTLD_LAZY) +#define icsneoDynamicLibraryGetFunction(handle, func) dlsym(handle, func) +#define icsneoDynamicLibraryClose(handle) (dlclose(handle) == 0) + +#endif \ No newline at end of file diff --git a/platform/linux/include/ftdi.h b/platform/linux/include/ftdi.h new file mode 100644 index 0000000..6f52cd1 --- /dev/null +++ b/platform/linux/include/ftdi.h @@ -0,0 +1,51 @@ +#ifndef __FTDI_H_LINUX_ +#define __FTDI_H_LINUX_ + +#include +#include +#include +#include +#include +#include "device/include/neodevice.h" +#include "communication/include/icommunication.h" +#include "third-party/concurrentqueue/blockingconcurrentqueue.h" + +namespace icsneo { + +class FTDI : public ICommunication { +public: + static constexpr neodevice_handle_t INVALID_HANDLE = 0x7fffffff; // int32_t max value + static std::vector FindByProduct(int product); + static bool IsHandleValid(neodevice_handle_t handle); + + FTDI(neodevice_t& forDevice); + ~FTDI() { close(); } + bool open(); + bool close(); + bool isOpen() { return ftdiDevice.is_open(); } + +private: + static Ftdi::Context context; + static neodevice_handle_t handleCounter; + class FTDIDevice : public Ftdi::Context { + public: + FTDIDevice() {} + FTDIDevice(const Ftdi::Context &x) : Ftdi::Context(x) { + handle = handleCounter++; + } + neodevice_handle_t handle = INVALID_HANDLE; + }; + static std::vector searchResultDevices; + static bool GetDeviceForHandle(neodevice_handle_t handle, FTDIDevice& device); + + void readTask(); + void writeTask(); + bool openable; // Set to false in the constructor if the object has not been found in searchResultDevices + + neodevice_t& device; + FTDIDevice ftdiDevice; +}; + +}; + +#endif \ No newline at end of file diff --git a/platform/linux/include/stm32.h b/platform/linux/include/stm32.h new file mode 100644 index 0000000..9b1e64b --- /dev/null +++ b/platform/linux/include/stm32.h @@ -0,0 +1,31 @@ +#ifndef __STM32_LINUX_H_ +#define __STM32_LINUX_H_ + +#include "communication/include/icommunication.h" +#include "device/include/neodevice.h" +#include +#include + +namespace icsneo { + +class STM32 : public ICommunication { +public: + STM32(neodevice_t& forDevice) : device(forDevice) {} + static std::vector FindByProduct(int product); + + bool open(); + bool isOpen(); + bool close(); + +private: + neodevice_t& device; + int fd = -1; + static constexpr neodevice_handle_t HANDLE_OFFSET = 10; + + void readTask(); + void writeTask(); +}; + +}; + +#endif \ No newline at end of file diff --git a/platform/linux/stm32.cpp b/platform/linux/stm32.cpp new file mode 100644 index 0000000..234688a --- /dev/null +++ b/platform/linux/stm32.cpp @@ -0,0 +1,283 @@ +#include "platform/include/stm32.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace icsneo; + +class Directory { +public: + class Listing { + public: + Listing(std::string newName, uint8_t newType) : name(newName), type(newType) {} + const std::string& getName() const { return name; } + uint8_t getType() const { return type; } + private: + std::string name; + uint8_t type; + }; + Directory(std::string directory) { + dir = opendir(directory.c_str()); + } + ~Directory() { + if(openedSuccessfully()) + closedir(dir); + dir = nullptr; + } + bool openedSuccessfully() { return dir != nullptr; } + std::vector ls() { + std::vector results; + struct dirent* entry; + while((entry = readdir(dir)) != nullptr) { + std::string name = entry->d_name; + if(name != "." && name != "..") // Ignore parent and self + results.emplace_back(name, entry->d_type); + } + return results; + } +private: + DIR* dir; +}; + +class USBSerialGetter { +public: + USBSerialGetter(std::string usbid) { + std::stringstream ss; + auto colonpos = usbid.find(":"); + if(colonpos == std::string::npos) { + succeeded = false; + return; + } + + ss << "/sys/bus/usb/devices/" << usbid.substr(0, colonpos) << "/serial"; + try { + std::ifstream reader(ss.str()); + std::getline(reader, serial); + } catch(...) { + succeeded = false; + return; + } + + succeeded = true; + } + bool success() const { return succeeded; } + const std::string& getSerial() const { return serial; } +private: + bool succeeded; + std::string serial; +}; + +std::vector STM32::FindByProduct(int product) { + std::vector found; + + Directory directory("/sys/bus/usb/drivers/cdc_acm"); // Query the STM32 driver + if(!directory.openedSuccessfully()) + return found; + + std::vector foundusbs; + for(auto& entry : directory.ls()) { + /* This directory will have directories (links) for all devices using the cdc_acm driver (as STM32 devices do) + * There will also be other files and directories providing information about the driver in here. We want to ignore them. + * Devices will be named like "7-2:1.0" where 7 is the enumeration for the USB controller, 2 is the device enumeration on + * that specific controller (will change if the device is unplugged and replugged), 1 is the device itself and 0 is + * enumeration for different services provided by the device. We're looking for the service that provides TTY. + * For now we find the directories with a digit for the first character, these are likely to be our USB devices. + */ + if(isdigit(entry.getName()[0]) && entry.getType() == DT_LNK) + foundusbs.emplace_back(entry.getName()); + } + + // Pair the USB and TTY if found + std::map foundttys; + for(auto& usb : foundusbs) { + std::stringstream ss; + ss << "/sys/bus/usb/drivers/cdc_acm/" << usb << "/tty"; + Directory devicedir(ss.str()); + if(!devicedir.openedSuccessfully()) // The tty directory doesn't exist, because this is not the tty service we want + continue; + + auto listing = devicedir.ls(); + if(listing.size() != 1) // We either got no serial ports or multiple, either way no good + continue; + + foundttys.insert(std::make_pair(usb, listing[0].getName())); + } + + // We're going to remove from the map if this is not the product we're looking for + for(auto iter = foundttys.begin(); iter != foundttys.end(); ) { + const auto& dev = *iter; + const std::string matchString = "PRODUCT="; + std::stringstream ss; + ss << "/sys/class/tty/" << dev.second << "/device/uevent"; // Read the uevent file, which contains should have a line like "PRODUCT=93c/1101/100" + std::ifstream fs(ss.str()); + std::string productLine; + size_t pos = std::string::npos; + do { + std::getline(fs, productLine, '\n'); + } while(((pos = productLine.find(matchString)) == std::string::npos) && !fs.eof()); + + if(pos != 0) { // We did not find a product line... weird + iter = foundttys.erase(iter); // Remove the element, this also moves iter forward for us + continue; + } + + size_t firstSlashPos = productLine.find('/', matchString.length()); + if(firstSlashPos == std::string::npos) { + iter = foundttys.erase(iter); + continue; + } + size_t pidpos = firstSlashPos + 1; + + std::string vidstr = productLine.substr(matchString.length(), firstSlashPos - matchString.length()); + std::string pidstr = productLine.substr(pidpos, productLine.find('/', pidpos) - pidpos); // In hex like "1101" or "93c" + + uint16_t vid, pid; + try { + vid = (uint16_t)std::stoul(vidstr, nullptr, 16); + pid = (uint16_t)std::stoul(pidstr, nullptr, 16); + } catch(...) { + iter = foundttys.erase(iter); // We could not parse the numbers + continue; + } + + if(vid != INTREPID_USB_VENDOR_ID || pid != product) { + iter = foundttys.erase(iter); // Not the right VID or PID, remove + continue; + } + iter++; // If the loop ends without erasing the iter from the map, the item is good + } + + // At this point, foundttys contains the the devices we want + + // Get the serial number, create the neodevice_t + for(auto& dev : foundttys) { + neodevice_t device; + + USBSerialGetter getter(dev.first); + if(!getter.success()) + continue; // Failure, could not get serial number + + // In ttyACM0, we want the i to be the first character of the number + size_t i; + for(i = 0; i < dev.second.length(); i++) { + if(isdigit(dev.second[i])) + break; + } + // Now we try to parse the number so we have a handle for later + try { + device.handle = (neodevice_handle_t)std::stoul(dev.second.substr(i)); + /* The TTY numbering starts at zero, but we want to keep zero for an undefined + * handle, so add a constant, and we'll subtract that constant in the open function. + */ + device.handle += HANDLE_OFFSET; + } catch(...) { + continue; // Somehow this failed, have to toss the device + } + + device.serial[getter.getSerial().copy(device.serial, sizeof(device.serial)-1)] = '\0'; + + found.push_back(device); // Finally, add device to search results + } + + return found; +} + +bool STM32::open() { + std::stringstream ss; + ss << "/dev/ttyACM" << (int)(device.handle - HANDLE_OFFSET); + fd = ::open(ss.str().c_str(), O_RDWR | O_NOCTTY | O_SYNC); + if(!isOpen()) { + std::cout << "Open of " << ss.str().c_str() << " failed with " << strerror(errno) << ' '; + return false; + } + + struct termios tty; + + if(tcgetattr(fd, &tty) < 0) { + close(); + return false; + } + + cfsetspeed(&tty, B500000); // Set speed to 500kbaud + + tty.c_cflag |= (CLOCAL | CREAD); // Ignore modem controls + tty.c_cflag &= ~CSIZE; + tty.c_cflag |= CS8; // 8-bit characters + tty.c_cflag &= ~PARENB; // No parity bit + tty.c_cflag &= ~CSTOPB; // One stop bit + tty.c_cflag &= ~CRTSCTS; // No hardware flow control + + // Non-canonical mode + tty.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); + tty.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); + tty.c_oflag &= ~OPOST; + + // Fetch bytes as they become available + // See http://man7.org/linux/man-pages/man3/termios.3.html + tty.c_cc[VMIN] = 0; + tty.c_cc[VTIME] = 1; // 100ms timeout (1 decisecond, what?) + + if(tcsetattr(fd, TCSAFLUSH, &tty) != 0) { // Flushes input and output buffers as well as setting settings + close(); + return false; + } + + // Create threads + readThread = std::thread(&STM32::readTask, this); + writeThread = std::thread(&STM32::writeTask, this); + + return true; +} + +bool STM32::isOpen() { + return fd >= 0; // Negative fd indicates error or not opened yet +} + +bool STM32::close() { + if(!isOpen()) + return false; + + closing = true; + + if(readThread.joinable()) + readThread.join(); + + if(writeThread.joinable()) + writeThread.join(); + + int ret = ::close(fd); + fd = -1; + + return ret == 0; +} + +void STM32::readTask() { + constexpr size_t READ_BUFFER_SIZE = 8; + uint8_t readbuf[READ_BUFFER_SIZE]; + while(!closing) { + auto bytesRead = ::read(fd, readbuf, READ_BUFFER_SIZE); + if(bytesRead > 0) + readQueue.enqueue_bulk(readbuf, bytesRead); + } +} + +void STM32::writeTask() { + WriteOperation writeOp; + while(!closing) { + if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100))) + continue; + + const auto writeSize = writeOp.bytes.size(); + int actualWritten = ::write(fd, writeOp.bytes.data(), writeSize); + if(actualWritten != writeSize) + std::cout << "Failure to write " << writeSize << " bytes, wrote " << actualWritten << std::endl; + } +} \ No newline at end of file diff --git a/platform/windows/include/dynamiclib.h b/platform/windows/include/dynamiclib.h new file mode 100644 index 0000000..6f38c7d --- /dev/null +++ b/platform/windows/include/dynamiclib.h @@ -0,0 +1,19 @@ +#ifndef __DYNAMICLIB_H_WINDOWS_ +#define __DYNAMICLIB_H_WINDOWS_ + +#include + +#ifdef ICSNEOC_MAKEDLL +#define DLLExport __declspec(dllexport) +#else +#define DLLExport __declspec(dllimport) +#endif + +// MSVC does not have the ability to specify a destructor +#define ICSNEO_DESTRUCTOR + +#define icsneoDynamicLibraryLoad() LoadLibrary(L"C:\\Users\\Phollinsky\\Code\\icsneonext\\build\\icsneoc.dll") +#define icsneoDynamicLibraryGetFunction(handle, func) GetProcAddress((HMODULE) handle, func) +#define icsneoDynamicLibraryClose(handle) FreeLibrary((HMODULE) handle) + +#endif \ No newline at end of file diff --git a/platform/windows/include/ftdi.h b/platform/windows/include/ftdi.h new file mode 100644 index 0000000..033488a --- /dev/null +++ b/platform/windows/include/ftdi.h @@ -0,0 +1,16 @@ +#ifndef __FTDI_WINDOWS_H_ +#define __FTDI_WINDOWS_H_ + +#include "platform/windows/include/vcp.h" + +namespace icsneo { + +class FTDI : public VCP { +public: + FTDI(neodevice_t& forDevice) : VCP(forDevice) {} + static std::vector FindByProduct(int product) { return VCP::FindByProduct(product, L"serenum"); } +}; + +}; + +#endif \ No newline at end of file diff --git a/platform/windows/include/registry.h b/platform/windows/include/registry.h new file mode 100644 index 0000000..c4159cf --- /dev/null +++ b/platform/windows/include/registry.h @@ -0,0 +1,33 @@ +#ifndef __REGISTRY_H_WINDOWS_ +#define __REGISTRY_H_WINDOWS_ + +#include +#include + +namespace icsneo { + +class Registry { +public: + // Get string value + static bool Get(std::wstring path, std::wstring key, std::wstring& value); + static bool Get(std::string path, std::string key, std::string& value); + + // Get DWORD value + static bool Get(std::wstring path, std::wstring key, uint32_t& value); + static bool Get(std::string path, std::string key, uint32_t& value); + +private: + class Key { + public: + Key(std::wstring path, bool readwrite = false); + ~Key(); + HKEY GetKey() { return key; } + bool IsOpen() { return key != nullptr; } + private: + HKEY key; + }; +}; + +}; + +#endif \ No newline at end of file diff --git a/platform/windows/include/stm32.h b/platform/windows/include/stm32.h new file mode 100644 index 0000000..fc44a7a --- /dev/null +++ b/platform/windows/include/stm32.h @@ -0,0 +1,16 @@ +#ifndef __STM32_WINDOWS_H_ +#define __STM32_WINDOWS_H_ + +#include "platform/windows/include/vcp.h" + +namespace icsneo { + +class STM32 : public VCP { +public: + STM32(neodevice_t& forDevice) : VCP(forDevice) {} + static std::vector FindByProduct(int product) { return VCP::FindByProduct(product, L"usbser"); } +}; + +}; + +#endif \ No newline at end of file diff --git a/platform/windows/include/vcp.h b/platform/windows/include/vcp.h new file mode 100644 index 0000000..df4a237 --- /dev/null +++ b/platform/windows/include/vcp.h @@ -0,0 +1,48 @@ +#ifndef __VCP_H_WINDOWS_ +#define __VCP_H_WINDOWS_ + +#include +#include +#include +#include +#include +#include +#include "device/include/neodevice.h" +#include "communication/include/icommunication.h" + +namespace icsneo { + +// Virtual COM Port Communication +class VCP : public ICommunication { +public: + static std::vector FindByProduct(int product, wchar_t* driverName); + static bool IsHandleValid(neodevice_handle_t handle); + typedef void(*fn_boolCallback)(bool success); + + VCP(neodevice_t& forDevice) : device(forDevice) { + overlappedRead.hEvent = INVALID_HANDLE_VALUE; + overlappedWrite.hEvent = INVALID_HANDLE_VALUE; + overlappedWait.hEvent = INVALID_HANDLE_VALUE; + } + ~VCP() { close(); } + bool open() { return open(false); } + void openAsync(fn_boolCallback callback); + bool close(); + bool isOpen() { return handle != INVALID_HANDLE_VALUE; } + +private: + bool open(bool fromAsync); + bool opening = false; + neodevice_t& device; + HANDLE handle = INVALID_HANDLE_VALUE; + OVERLAPPED overlappedRead = {}; + OVERLAPPED overlappedWrite = {}; + OVERLAPPED overlappedWait = {}; + std::vector> threads; + void readTask(); + void writeTask(); +}; + +}; + +#endif \ No newline at end of file diff --git a/platform/windows/registry.cpp b/platform/windows/registry.cpp new file mode 100644 index 0000000..93dd0f0 --- /dev/null +++ b/platform/windows/registry.cpp @@ -0,0 +1,68 @@ +#include "platform/windows/include/registry.h" +#include +#include + +using namespace icsneo; + +static std::wstring_convert> converter; + +Registry::Key::Key(std::wstring path, bool readwrite) { + DWORD dwDisposition; + if(readwrite) + RegCreateKeyExW(HKEY_LOCAL_MACHINE, path.c_str(), 0, nullptr, 0, KEY_QUERY_VALUE | KEY_WRITE, nullptr, &key, &dwDisposition); + else + RegOpenKeyExW(HKEY_LOCAL_MACHINE, path.c_str(), 0, KEY_READ, &key); +} + +Registry::Key::~Key() { + if(IsOpen()) + RegCloseKey(key); +} + +bool Registry::Get(std::wstring path, std::wstring key, std::wstring& value) { + Key regKey(path); + if(!regKey.IsOpen()) + return false; + + // Query for the type and size of the data + DWORD type, size; + auto ret = RegQueryValueExW(regKey.GetKey(), key.c_str(), nullptr, &type, (LPBYTE)nullptr, &size); + if(ret != ERROR_SUCCESS) + return false; + + // Query for the data itself + std::vector data(size / 2 + 1); + DWORD bytesRead = size; // We want to read up to the size we got earlier + ret = RegQueryValueExW(regKey.GetKey(), key.c_str(), nullptr, &type, (LPBYTE)data.data(), &bytesRead); + if(ret != ERROR_SUCCESS) + return false; + + value = data.data(); + return true; +} + +bool Registry::Get(std::string path, std::string key, std::string& value) { + std::wstring wvalue; + bool ret = Get(converter.from_bytes(path), converter.from_bytes(key), wvalue); + value = converter.to_bytes(wvalue); + return ret; +} + +bool Registry::Get(std::wstring path, std::wstring key, uint32_t& value) { + Key regKey(path); + if(!regKey.IsOpen()) + return false; + + // Query for the data + DWORD type, size, kvalue; + auto ret = RegQueryValueExW(regKey.GetKey(), key.c_str(), nullptr, &type, (LPBYTE)&kvalue, &size); + if(ret != ERROR_SUCCESS || type != REG_DWORD) + return false; + + value = kvalue; + return true; +} + +bool Registry::Get(std::string path, std::string key, uint32_t& value) { + return Get(converter.from_bytes(path), converter.from_bytes(key), value); +} \ No newline at end of file diff --git a/platform/windows/vcp.cpp b/platform/windows/vcp.cpp new file mode 100644 index 0000000..3d19f5a --- /dev/null +++ b/platform/windows/vcp.cpp @@ -0,0 +1,329 @@ +#include "platform/windows/include/ftdi.h" +#include "platform/include/ftdi.h" +#include "platform/include/registry.h" +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace icsneo; + +static std::wstring_convert> converter; +static const std::wstring DRIVER_SERVICES_REG_KEY = L"SYSTEM\\CurrentControlSet\\services\\"; +static const std::wstring ALL_ENUM_REG_KEY = L"SYSTEM\\CurrentControlSet\\Enum\\"; +static constexpr unsigned int RETRY_TIMES = 5; +static constexpr unsigned int RETRY_DELAY = 50; + +std::vector VCP::FindByProduct(int product, wchar_t* driverName) { + std::vector found; + + std::wstringstream regss; + regss << DRIVER_SERVICES_REG_KEY << driverName << L"\\Enum\\"; + std::wstring driverEnumRegKey = regss.str(); + + uint32_t deviceCount = 0; + if(!Registry::Get(driverEnumRegKey, L"Count", deviceCount)) { + return found; + } + + for(uint32_t i = 0; i < deviceCount; i++) { + neodevice_t device = {}; + + // First we want to look at what devices FTDI is enumerating (inside driverEnumRegKey) + // The entry for a ValueCAN 3 with SN 138635 looks like "FTDIBUS\VID_093C+PID_0601+138635A\0000" + // The entry for a ValueCAN 4 with SN V20227 looks like "USB\VID_093C&PID_1101\V20227" + std::wstringstream ss; + ss << i; + std::wstring entry; + if(!Registry::Get(driverEnumRegKey, ss.str(), entry)) + continue; + + std::transform(entry.begin(), entry.end(), entry.begin(), std::towupper); + + std::wstringstream vss; + vss << "VID_" << std::setfill(L'0') << std::setw(4) << std::uppercase << std::hex << INTREPID_USB_VENDOR_ID; // Intrepid Vendor ID + if(entry.find(vss.str()) == std::wstring::npos) + continue; + + std::wstringstream pss; + pss << "PID_" << std::setfill(L'0') << std::setw(4) << std::uppercase << std::hex << product; + auto pidpos = entry.find(pss.str()); + if(pidpos == std::wstring::npos) + continue; + + // Okay, this is a device we want + // Get the serial number + auto startchar = entry.find(L"+", pidpos + 1); + if(startchar == std::wstring::npos) + startchar = entry.find(L"\\", pidpos + 1); + bool conversionError = false; + int sn = 0; + try { + sn = std::stoi(entry.substr(startchar + 1)); + } catch(...) { + conversionError = true; + } + + std::wstringstream oss; + if(!sn || conversionError) { + // This is a device with characters in the serial number + oss << entry.substr(startchar + 1, 6); + } else { + oss << sn; + } + + strcpy_s(device.serial, sizeof(device.serial), converter.to_bytes(oss.str()).c_str()); + + // Serial number is saved, we want the COM port number now + // This will be stored under ALL_ENUM_REG_KEY\entry\Device Parameters\PortName (entry from the FTDI_ENUM) + std::wstringstream dpss; + dpss << ALL_ENUM_REG_KEY << entry << L"\\Device Parameters"; + std::wstring port; + Registry::Get(dpss.str(), L"PortName", port); // TODO If error do something else (Plasma maybe?) + std::transform(port.begin(), port.end(), port.begin(), std::towupper); + auto compos = port.find(L"COM"); + device.handle = 0; + if(compos != std::wstring::npos) { + try { + device.handle = std::stoi(port.substr(compos + 3)); + } catch(...) {} // In case of this, or any other error, handle has already been initialized to 0 + } + + found.push_back(device); + } + + return found; +} + +bool VCP::IsHandleValid(neodevice_handle_t handle) { + if(handle < 1) + return false; + + if(handle > 256) // Windows default max COM port is COM256 + return false; // TODO Enumerate subkeys of HKLM\HARDWARE\DEVICEMAP\SERIALCOMM as a user might have more serial ports somehow + + return true; +} + +bool VCP::open(bool fromAsync) { + if(isOpen() || (!fromAsync && opening)) + return false; + + if(!IsHandleValid(device.handle)) + return false; + + opening = true; + + std::wstringstream comss; + comss << L"\\\\.\\COM" << device.handle; + + // We're going to attempt to open 5 (RETRY_TIMES) times in a row + for(int i = 0; !isOpen() && i < RETRY_TIMES; i++) { + handle = CreateFileW(comss.str().c_str(), GENERIC_READ | GENERIC_WRITE, 0, nullptr, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, nullptr); + if(GetLastError() == ERROR_SUCCESS) + break; // We have the file handle + + std::this_thread::sleep_for(std::chrono::milliseconds(RETRY_DELAY)); + } + + opening = false; + + if(!isOpen()) + return false; + + // Set the timeouts + COMMTIMEOUTS timeouts; + if(!GetCommTimeouts(handle, &timeouts)) { + close(); + return false; + } + + timeouts.WriteTotalTimeoutConstant = 10000; + timeouts.WriteTotalTimeoutMultiplier = 0; + + if(!SetCommTimeouts(handle, &timeouts)) { + close(); + return false; + } + + // Set the COM state + DCB comstate; + if(!GetCommState(handle, &comstate)) { + close(); + return false; + } + + comstate.BaudRate = 115200; + comstate.ByteSize = 8; + comstate.Parity = NOPARITY; + comstate.StopBits = 0; + comstate.fDtrControl = DTR_CONTROL_ENABLE; + comstate.fRtsControl = RTS_CONTROL_ENABLE; + + if(!SetCommState(handle, &comstate)) { + close(); + return false; + } + + PurgeComm(handle, PURGE_RXCLEAR); + + // Set up events so that overlapped IO can work with them + overlappedRead.hEvent = CreateEvent(nullptr, false, false, nullptr); + overlappedWrite.hEvent = CreateEvent(nullptr, false, false, nullptr); + overlappedWait.hEvent = CreateEvent(nullptr, true, false, nullptr); + if (overlappedRead.hEvent == nullptr || overlappedWrite.hEvent == nullptr || overlappedWait.hEvent == nullptr) { + close(); + return false; + } + + // Set up event so that we will satisfy overlappedWait when a character comes in + if(!SetCommMask(handle, EV_RXCHAR)) { + close(); + return false; + } + + // TODO Set up some sort of shared memory, save which COM port we have open so we don't try to open it again + + // Create threads + readThread = std::thread(&VCP::readTask, this); + writeThread = std::thread(&VCP::writeTask, this); + + return true; +} + +void VCP::openAsync(fn_boolCallback callback) { + threads.push_back(std::make_shared([&]() { + callback(open(true)); + })); +} + +bool VCP::close() { + if(!isOpen()) + return false; + + closing = true; // Signal the threads that we are closing + for(auto& t : threads) + t->join(); // Wait for the threads to close + readThread.join(); + writeThread.join(); + + if(!CloseHandle(handle)) + return false; + + handle = INVALID_HANDLE_VALUE; + + bool ret = true; // If one of the events fails closing, we probably still want to try and close the others + if(overlappedRead.hEvent != INVALID_HANDLE_VALUE) { + if(!CloseHandle(overlappedRead.hEvent)) + ret = false; + } + if(overlappedWrite.hEvent != INVALID_HANDLE_VALUE) { + if(!CloseHandle(overlappedWrite.hEvent)) + ret = false; + } + if(overlappedWait.hEvent != INVALID_HANDLE_VALUE) { + if(!CloseHandle(overlappedWait.hEvent)) + ret = false; + } + + // TODO Set up some sort of shared memory, free which COM port we had open so we can try to open it again + + return ret; +} + +void VCP::readTask() { + constexpr size_t READ_BUFFER_SIZE = 8; + uint8_t readbuf[READ_BUFFER_SIZE]; + IOTaskState state = LAUNCH; + DWORD bytesRead = 0; + while(!closing) { + switch(state) { + case LAUNCH: { + COMSTAT comStatus; + unsigned long errorCodes; + if(!ClearCommError(handle, &errorCodes, &comStatus)) + std::cout << "Error clearing com err" << std::endl; + + bytesRead = 0; + if(ReadFile(handle, readbuf, READ_BUFFER_SIZE, nullptr, &overlappedRead)) { + if(GetOverlappedResult(handle, &overlappedRead, &bytesRead, FALSE)) { + if(bytesRead) + readQueue.enqueue_bulk(readbuf, bytesRead); + } else { + std::cout <<"Readfile succeeded but not enqueued " << GetLastError() << std::endl; + } + continue; + } + + auto err = GetLastError(); + if(err == ERROR_SUCCESS) + std::cout << "Error was success?" << std::endl; + + if(err == ERROR_IO_PENDING) + state = WAIT; + else + std::cout << "ReadFile failed " << err << std::endl; + } + break; + case WAIT: { + auto ret = WaitForSingleObject(overlappedRead.hEvent, 100); + if(ret == WAIT_OBJECT_0) { + auto err = GetLastError(); + if(GetOverlappedResult(handle, &overlappedRead, &bytesRead, FALSE)) { + readQueue.enqueue_bulk(readbuf, bytesRead); + state = LAUNCH; + } else + std::cout << "ReadFile deferred failed " << err << std::endl; + } + if(ret == WAIT_ABANDONED) { + state = LAUNCH; + std::cout << "Readfile abandoned" << std::endl; + } + } + } + } +} + +void VCP::writeTask() { + IOTaskState state = LAUNCH; + VCP::WriteOperation writeOp; + DWORD bytesWritten = 0; + while(!closing) { + switch(state) { + case LAUNCH: { + if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100))) + continue; + + bytesWritten = 0; + if(WriteFile(handle, writeOp.bytes.data(), (DWORD)writeOp.bytes.size(), nullptr, &overlappedWrite)) + continue; + + auto err = GetLastError(); + if(err == ERROR_IO_PENDING) { + state = WAIT; + } + else + std::cout << "Writefile failed " << err << std::endl; + } + break; + case WAIT: { + auto ret = WaitForSingleObject(overlappedWrite.hEvent, 50); + if(ret == WAIT_OBJECT_0) { + if(!GetOverlappedResult(handle, &overlappedWrite, &bytesWritten, FALSE)) { + std::cout << "Writefile deferred failed " << GetLastError() << std::endl; + } + state = LAUNCH; + } + + if(ret == WAIT_ABANDONED) { + std::cout << "Writefile deferred abandoned" << std::endl; + state = LAUNCH; + } + } + } + } +} \ No newline at end of file diff --git a/third-party/concurrentqueue/.gitignore b/third-party/concurrentqueue/.gitignore new file mode 100644 index 0000000..accfe6e --- /dev/null +++ b/third-party/concurrentqueue/.gitignore @@ -0,0 +1,26 @@ +*.ipch +*.suo +*.user +*.sdf +*.opensdf +*.exe +*.pdb +*.vs +*.VC.db +build/bin/ +build/*.log +build/msvc14/*.log +build/msvc14/obj/ +build/msvc12/*.log +build/msvc12/obj/ +build/msvc11/*.log +build/msvc11/obj/ +build/xcode/build/ +tests/fuzztests/fuzztests.log +benchmarks/benchmarks.log +tests/CDSChecker/*.o +tests/CDSChecker/*.log +tests/CDSChecker/model-checker/ +tests/relacy/freelist.exe +tests/relacy/spmchash.exe +tests/relacy/log.txt diff --git a/third-party/concurrentqueue/LICENSE.md b/third-party/concurrentqueue/LICENSE.md new file mode 100644 index 0000000..c4e7588 --- /dev/null +++ b/third-party/concurrentqueue/LICENSE.md @@ -0,0 +1,61 @@ +This license file applies to everything in this repository except that which +is explicitly annotated as being written by other authors, i.e. the Boost +queue (included in the benchmarks for comparison), Intel's TBB library (ditto), +the CDSChecker tool (used for verification), the Relacy model checker (ditto), +and Jeff Preshing's semaphore implementation (used in the blocking queue) which +has a zlib license (embedded in blockingconcurrentqueue.h). + +--- + +Simplified BSD License: + +Copyright (c) 2013-2016, Cameron Desrochers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, this list of +conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright notice, this list of +conditions and the following disclaimer in the documentation and/or other materials +provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--- + +I have also chosen to dual-license under the Boost Software License as an alternative to +the Simplified BSD license above: + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third-party/concurrentqueue/README.md b/third-party/concurrentqueue/README.md new file mode 100644 index 0000000..852ccf9 --- /dev/null +++ b/third-party/concurrentqueue/README.md @@ -0,0 +1,486 @@ +# moodycamel::ConcurrentQueue + +An industrial-strength lock-free queue for C++. + +Note: If all you need is a single-producer, single-consumer queue, I have [one of those too][spsc]. + +## Features + +- Knock-your-socks-off [blazing fast performance][benchmarks]. +- Single-header implementation. Just drop it in your project. +- Fully thread-safe lock-free queue. Use concurrently from any number of threads. +- C++11 implementation -- elements are moved (instead of copied) where possible. +- Templated, obviating the need to deal exclusively with pointers -- memory is managed for you. +- No artificial limitations on element types or maximum count. +- Memory can be allocated once up-front, or dynamically as needed. +- Fully portable (no assembly; all is done through standard C++11 primitives). +- Supports super-fast bulk operations. +- Includes a low-overhead blocking version (BlockingConcurrentQueue). +- Exception safe. + +## Reasons to use + +There are not that many full-fledged lock-free queues for C++. Boost has one, but it's limited to objects with trivial +assignment operators and trivial destructors, for example. +Intel's TBB queue isn't lock-free, and requires trivial constructors too. +There're many academic papers that implement lock-free queues in C++, but usable source code is +hard to find, and tests even more so. + +This queue not only has less limitations than others (for the most part), but [it's also faster][benchmarks]. +It's been fairly well-tested, and offers advanced features like **bulk enqueueing/dequeueing** +(which, with my new design, is much faster than one element at a time, approaching and even surpassing +the speed of a non-concurrent queue even under heavy contention). + +In short, there was a lock-free queue shaped hole in the C++ open-source universe, and I set out +to fill it with the fastest, most complete, and well-tested design and implementation I could. +The result is `moodycamel::ConcurrentQueue` :-) + +## Reasons *not* to use + +The fastest synchronization of all is the kind that never takes place. Fundamentally, +concurrent data structures require some synchronization, and that takes time. Every effort +was made, of course, to minimize the overhead, but if you can avoid sharing data between +threads, do so! + +Why use concurrent data structures at all, then? Because they're gosh darn convenient! (And, indeed, +sometimes sharing data concurrently is unavoidable.) + +My queue is **not linearizable** (see the next section on high-level design). The foundations of +its design assume that producers are independent; if this is not the case, and your producers +co-ordinate amongst themselves in some fashion, be aware that the elements won't necessarily +come out of the queue in the same order they were put in *relative to the ordering formed by that co-ordination* +(but they will still come out in the order they were put in by any *individual* producer). If this affects +your use case, you may be better off with another implementation; either way, it's an important limitation +to be aware of. + +My queue is also **not NUMA aware**, and does a lot of memory re-use internally, meaning it probably doesn't +scale particularly well on NUMA architectures; however, I don't know of any other lock-free queue that *is* +NUMA aware (except for [SALSA][salsa], which is very cool, but has no publicly available implementation that I know of). + +Finally, the queue is **not sequentially consistent**; there *is* a happens-before relationship between when an element is put +in the queue and when it comes out, but other things (such as pumping the queue until it's empty) require more thought +to get right in all eventualities, because explicit memory ordering may have to be done to get the desired effect. In other words, +it can sometimes be difficult to use the queue correctly. This is why it's a good idea to follow the [samples][samples.md] where possible. +On the other hand, the upside of this lack of sequential consistency is better performance. + +## High-level design + +Elements are stored internally using contiguous blocks instead of linked lists for better performance. +The queue is made up of a collection of sub-queues, one for each producer. When a consumer +wants to dequeue an element, it checks all the sub-queues until it finds one that's not empty. +All of this is largely transparent to the user of the queue, however -- it mostly just worksTM. + +One particular consequence of this design, however, (which seems to be non-intuitive) is that if two producers +enqueue at the same time, there is no defined ordering between the elements when they're later dequeued. +Normally this is fine, because even with a fully linearizable queue there'd be a race between the producer +threads and so you couldn't rely on the ordering anyway. However, if for some reason you do extra explicit synchronization +between the two producer threads yourself, thus defining a total order between enqueue operations, you might expect +that the elements would come out in the same total order, which is a guarantee my queue does not offer. At that +point, though, there semantically aren't really two separate producers, but rather one that happens to be spread +across multiple threads. In this case, you can still establish a total ordering with my queue by creating +a single producer token, and using that from both threads to enqueue (taking care to synchronize access to the token, +of course, but there was already extra synchronization involved anyway). + +I've written a more detailed [overview of the internal design][blog], as well as [the full +nitty-gritty details of the design][design], on my blog. Finally, the +[source][source] itself is available for perusal for those interested in its implementation. + +## Basic use + +The entire queue's implementation is contained in **one header**, [`concurrentqueue.h`][concurrentqueue.h]. +Simply download and include that to use the queue. The blocking version is in a separate header, +[`blockingconcurrentqueue.h`][blockingconcurrentqueue.h], that depends on the first. +The implementation makes use of certain key C++11 features, so it requires a fairly recent compiler +(e.g. VS2012+ or g++ 4.8; note that g++ 4.6 has a known bug with `std::atomic` and is thus not supported). +The algorithm implementations themselves are platform independent. + +Use it like you would any other templated queue, with the exception that you can use +it from many threads at once :-) + +Simple example: + + #include "concurrentqueue.h" + + moodycamel::ConcurrentQueue q; + q.enqueue(25); + + int item; + bool found = q.try_dequeue(item); + assert(found && item == 25); + +Description of basic methods: +- `ConcurrentQueue(size_t initialSizeEstimate)` + Constructor which optionally accepts an estimate of the number of elements the queue will hold +- `enqueue(T&& item)` + Enqueues one item, allocating extra space if necessary +- `try_enqueue(T&& item)` + Enqueues one item, but only if enough memory is already allocated +- `try_dequeue(T& item)` + Dequeues one item, returning true if an item was found or false if the queue appeared empty + +Note that it is up to the user to ensure that the queue object is completely constructed before +being used by any other threads (this includes making the memory effects of construction +visible, possibly via a memory barrier). Similarly, it's important that all threads have +finished using the queue (and the memory effects have fully propagated) before it is +destructed. + +There's usually two versions of each method, one "explicit" version that takes a user-allocated per-producer or +per-consumer token, and one "implicit" version that works without tokens. Using the explicit methods is almost +always faster (though not necessarily by a huge factor). Apart from performance, the primary distinction between them +is their sub-queue allocation behaviour for enqueue operations: Using the implicit enqueue methods causes an +automatically-allocated thread-local producer sub-queue to be allocated (it is marked for reuse once the thread exits). +Explicit producers, on the other hand, are tied directly to their tokens' lifetimes (and are also recycled as needed). + +Full API (pseudocode): + + # Allocates more memory if necessary + enqueue(item) : bool + enqueue(prod_token, item) : bool + enqueue_bulk(item_first, count) : bool + enqueue_bulk(prod_token, item_first, count) : bool + + # Fails if not enough memory to enqueue + try_enqueue(item) : bool + try_enqueue(prod_token, item) : bool + try_enqueue_bulk(item_first, count) : bool + try_enqueue_bulk(prod_token, item_first, count) : bool + + # Attempts to dequeue from the queue (never allocates) + try_dequeue(item&) : bool + try_dequeue(cons_token, item&) : bool + try_dequeue_bulk(item_first, max) : size_t + try_dequeue_bulk(cons_token, item_first, max) : size_t + + # If you happen to know which producer you want to dequeue from + try_dequeue_from_producer(prod_token, item&) : bool + try_dequeue_bulk_from_producer(prod_token, item_first, max) : size_t + + # A not-necessarily-accurate count of the total number of elements + size_approx() : size_t + +## Blocking version + +As mentioned above, a full blocking wrapper of the queue is provided that adds +`wait_dequeue` and `wait_dequeue_bulk` methods in addition to the regular interface. +This wrapper is extremely low-overhead, but slightly less fast than the non-blocking +queue (due to the necessary bookkeeping involving a lightweight semaphore). + +There are also timed versions that allow a timeout to be specified (either in microseconds +or with a `std::chrono` object). + +The only major caveat with the blocking version is that you must be careful not to +destroy the queue while somebody is waiting on it. This generally means you need to +know for certain that another element is going to come along before you call one of +the blocking methods. (To be fair, the non-blocking version cannot be destroyed while +in use either, but it can be easier to coordinate the cleanup.) + +Blocking example: + + #include "blockingconcurrentqueue.h" + + moodycamel::BlockingConcurrentQueue q; + std::thread producer([&]() { + for (int i = 0; i != 100; ++i) { + std::this_thread::sleep_for(std::chrono::milliseconds(i % 10)); + q.enqueue(i); + } + }); + std::thread consumer([&]() { + for (int i = 0; i != 100; ++i) { + int item; + q.wait_dequeue(item); + assert(item == i); + + if (q.wait_dequeue_timed(item, std::chrono::milliseconds(5))) { + ++i; + assert(item == i); + } + } + }); + producer.join(); + consumer.join(); + + assert(q.size_approx() == 0); + +## Advanced features + +#### Tokens + +The queue can take advantage of extra per-producer and per-consumer storage if +it's available to speed up its operations. This takes the form of "tokens": +You can create a consumer token and/or a producer token for each thread or task +(tokens themselves are not thread-safe), and use the methods that accept a token +as their first parameter: + + moodycamel::ConcurrentQueue q; + + moodycamel::ProducerToken ptok(q); + q.enqueue(ptok, 17); + + moodycamel::ConsumerToken ctok(q); + int item; + q.try_dequeue(ctok, item); + assert(item == 17); + +If you happen to know which producer you want to consume from (e.g. in +a single-producer, multi-consumer scenario), you can use the `try_dequeue_from_producer` +methods, which accept a producer token instead of a consumer token, and cut some overhead. + +Note that tokens work with the blocking version of the queue too. + +When producing or consuming many elements, the most efficient way is to: + +1. Use the bulk methods of the queue with tokens +2. Failing that, use the bulk methods without tokens +3. Failing that, use the single-item methods with tokens +4. Failing that, use the single-item methods without tokens + +Having said that, don't create tokens willy-nilly -- ideally there would be +one token (of each kind) per thread. The queue will work with what it is +given, but it performs best when used with tokens. + +Note that tokens aren't actually tied to any given thread; it's not technically +required that they be local to the thread, only that they be used by a single +producer/consumer at a time. + +#### Bulk operations + +Thanks to the [novel design][blog] of the queue, it's just as easy to enqueue/dequeue multiple +items as it is to do one at a time. This means that overhead can be cut drastically for +bulk operations. Example syntax: + + moodycamel::ConcurrentQueue q; + + int items[] = { 1, 2, 3, 4, 5 }; + q.enqueue_bulk(items, 5); + + int results[5]; // Could also be any iterator + size_t count = q.try_dequeue_bulk(results, 5); + for (size_t i = 0; i != count; ++i) { + assert(results[i] == items[i]); + } + +#### Preallocation (correctly using `try_enqueue`) + +`try_enqueue`, unlike just plain `enqueue`, will never allocate memory. If there's not enough room in the +queue, it simply returns false. The key to using this method properly, then, is to ensure enough space is +pre-allocated for your desired maximum element count. + +The constructor accepts a count of the number of elements that it should reserve space for. Because the +queue works with blocks of elements, however, and not individual elements themselves, the value to pass +in order to obtain an effective number of pre-allocated element slots is non-obvious. + +First, be aware that the count passed is rounded up to the next multiple of the block size. Note that the +default block size is 32 (this can be changed via the traits). Second, once a slot in a block has been +enqueued to, that slot cannot be re-used until the rest of the block has completely been completely filled +up and then completely emptied. This affects the number of blocks you need in order to account for the +overhead of partially-filled blocks. Third, each producer (whether implicit or explicit) claims and recycles +blocks in a different manner, which again affects the number of blocks you need to account for a desired number of +usable slots. + +Suppose you want the queue to be able to hold at least `N` elements at any given time. Without delving too +deep into the rather arcane implementation details, here are some simple formulas for the number of elements +to request for pre-allocation in such a case. Note the division is intended to be arithmetic division and not +integer division (in order for `ceil()` to work). + +For explicit producers (using tokens to enqueue): + + (ceil(N / BLOCK_SIZE) + 1) * MAX_NUM_PRODUCERS * BLOCK_SIZE + +For implicit producers (no tokens): + + (ceil(N / BLOCK_SIZE) - 1 + 2 * MAX_NUM_PRODUCERS) * BLOCK_SIZE + +When using mixed producer types: + + ((ceil(N / BLOCK_SIZE) - 1) * (MAX_EXPLICIT_PRODUCERS + 1) + 2 * (MAX_IMPLICIT_PRODUCERS + MAX_EXPLICIT_PRODUCERS)) * BLOCK_SIZE + +If these formulas seem rather inconvenient, you can use the constructor overload that accepts the minimum +number of elements (`N`) and the maximum number of explicit and implicit producers directly, and let it do the +computation for you. + +Finally, it's important to note that because the queue is only eventually consistent and takes advantage of +weak memory ordering for speed, there's always a possibility that under contention `try_enqueue` will fail +even if the queue is correctly pre-sized for the desired number of elements. (e.g. A given thread may think that +the queue's full even when that's no longer the case.) So no matter what, you still need to handle the failure +case (perhaps looping until it succeeds), unless you don't mind dropping elements. + +#### Exception safety + +The queue is exception safe, and will never become corrupted if used with a type that may throw exceptions. +The queue itself never throws any exceptions (operations fail gracefully (return false) if memory allocation +fails instead of throwing `std::bad_alloc`). + +It is important to note that the guarantees of exception safety only hold if the element type never throws +from its destructor, and that any iterators passed into the queue (for bulk operations) never throw either. +Note that in particular this means `std::back_inserter` iterators must be used with care, since the vector +being inserted into may need to allocate and throw a `std::bad_alloc` exception from inside the iterator; +so be sure to reserve enough capacity in the target container first if you do this. + +The guarantees are presently as follows: +- Enqueue operations are rolled back completely if an exception is thrown from an element's constructor. + For bulk enqueue operations, this means that elements are copied instead of moved (in order to avoid + having only some of the objects be moved in the event of an exception). Non-bulk enqueues always use + the move constructor if one is available. +- If the assignment operator throws during a dequeue operation (both single and bulk), the element(s) are + considered dequeued regardless. In such a case, the dequeued elements are all properly destructed before + the exception is propagated, but there's no way to get the elements themselves back. +- Any exception that is thrown is propagated up the call stack, at which point the queue is in a consistent + state. + +Note: If any of your type's copy constructors/move constructors/assignment operators don't throw, be sure +to annotate them with `noexcept`; this will avoid the exception-checking overhead in the queue where possible +(even with zero-cost exceptions, there's still a code size impact that has to be taken into account). + +#### Traits + +The queue also supports a traits template argument which defines various types, constants, +and the memory allocation and deallocation functions that are to be used by the queue. The typical pattern +to providing your own traits is to create a class that inherits from the default traits +and override only the values you wish to change. Example: + + struct MyTraits : public moodycamel::ConcurrentQueueDefaultTraits + { + static const size_t BLOCK_SIZE = 256; // Use bigger blocks + }; + + moodycamel::ConcurrentQueue q; + +#### How to dequeue types without calling the constructor + +The normal way to dequeue an item is to pass in an existing object by reference, which +is then assigned to internally by the queue (using the move-assignment operator if possible). +This can pose a problem for types that are +expensive to construct or don't have a default constructor; fortunately, there is a simple +workaround: Create a wrapper class that copies the memory contents of the object when it +is assigned by the queue (a poor man's move, essentially). Note that this only works if +the object contains no internal pointers. Example: + + struct MyObjectMover { + inline void operator=(MyObject&& obj) + { + std::memcpy(data, &obj, sizeof(MyObject)); + + // TODO: Cleanup obj so that when it's destructed by the queue + // it doesn't corrupt the data of the object we just moved it into + } + + inline MyObject& obj() { return *reinterpret_cast(data); } + + private: + align(alignof(MyObject)) char data[sizeof(MyObject)]; + }; + +A less dodgy alternative, if moves are cheap but default construction is not, is to use a +wrapper that defers construction until the object is assigned, enabling use of the move +constructor: + + struct MyObjectMover { + inline void operator=(MyObject&& x) { + new (data) MyObject(std::move(x)); + created = true; + } + + inline MyObject& obj() { + assert(created); + return *reinterpret_cast(data); + } + + ~MyObjectMover() { + if (created) + obj().~MyObject(); + } + + private: + align(alignof(MyObject)) char data[sizeof(MyObject)]; + bool created = false; + }; + + +## Samples + +There are some more detailed samples [here][samples.md]. The source of +the [unit tests][unittest-src] and [benchmarks][benchmark-src] are available for reference as well. + +## Benchmarks + +See my blog post for some [benchmark results][benchmarks] (including versus `boost::lockfree::queue` and `tbb::concurrent_queue`), +or run the benchmarks yourself (requires MinGW and certain GnuWin32 utilities to build on Windows, or a recent +g++ on Linux): + + cd build + make benchmarks + bin/benchmarks + +The short version of the benchmarks is that it's so fast (especially the bulk methods), that if you're actually +using the queue to *do* anything, the queue won't be your bottleneck. + +## Tests (and bugs) + +I've written quite a few unit tests as well as a randomized long-running fuzz tester. I also ran the +core queue algorithm through the [CDSChecker][cdschecker] C++11 memory model model checker. Some of the +inner algorithms were tested separately using the [Relacy][relacy] model checker, and full integration +tests were also performed with Relacy. +I've tested +on Linux (Fedora 19) and Windows (7), but only on x86 processors so far (Intel and AMD). The code was +written to be platform-independent, however, and should work across all processors and OSes. + +Due to the complexity of the implementation and the difficult-to-test nature of lock-free code in general, +there may still be bugs. If anyone is seeing buggy behaviour, I'd like to hear about it! (Especially if +a unit test for it can be cooked up.) Just open an issue on GitHub. + +## License + +I'm releasing the source of this repository (with the exception of third-party code, i.e. the Boost queue +(used in the benchmarks for comparison), Intel's TBB library (ditto), CDSChecker, Relacy, and Jeff Preshing's +cross-platform semaphore, which all have their own licenses) +under a simplified BSD license. I'm also dual-licensing under the Boost Software License. +See the [LICENSE.md][license] file for more details. + +Note that lock-free programming is a patent minefield, and this code may very +well violate a pending patent (I haven't looked), though it does not to my present knowledge. +I did design and implement this queue from scratch. + +## Diving into the code + +If you're interested in the source code itself, it helps to have a rough idea of how it's laid out. This +section attempts to describe that. + +The queue is formed of several basic parts (listed here in roughly the order they appear in the source). There's the +helper functions (e.g. for rounding to a power of 2). There's the default traits of the queue, which contain the +constants and malloc/free functions used by the queue. There's the producer and consumer tokens. Then there's the queue's +public API itself, starting with the constructor, destructor, and swap/assignment methods. There's the public enqueue methods, +which are all wrappers around a small set of private enqueue methods found later on. There's the dequeue methods, which are +defined inline and are relatively straightforward. + +Then there's all the main internal data structures. First, there's a lock-free free list, used for recycling spent blocks (elements +are enqueued to blocks internally). Then there's the block structure itself, which has two different ways of tracking whether +it's fully emptied or not (remember, given two parallel consumers, there's no way to know which one will finish first) depending on where it's used. +Then there's a small base class for the two types of internal SPMC producer queues (one for explicit producers that holds onto memory +but attempts to be faster, and one for implicit ones which attempt to recycle more memory back into the parent but is a little slower). +The explicit producer is defined first, then the implicit one. They both contain the same general four methods: One to enqueue, one to +dequeue, one to enqueue in bulk, and one to dequeue in bulk. (Obviously they have constructors and destructors too, and helper methods.) +The main difference between them is how the block handling is done (they both use the same blocks, but in different ways, and map indices +to them in different ways). + +Finally, there's the miscellaneous internal methods: There's the ones that handle the initial block pool (populated when the queue is constructed), +and an abstract block pool that comprises the initial pool and any blocks on the free list. There's ones that handle the producer list +(a lock-free add-only linked list of all the producers in the system). There's ones that handle the implicit producer lookup table (which +is really a sort of specialized TLS lookup). And then there's some helper methods for allocating and freeing objects, and the data members +of the queue itself, followed lastly by the free-standing swap functions. + + +[blog]: http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++ +[design]: http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue +[samples.md]: https://github.com/cameron314/concurrentqueue/blob/master/samples.md +[source]: https://github.com/cameron314/concurrentqueue +[concurrentqueue.h]: https://github.com/cameron314/concurrentqueue/blob/master/concurrentqueue.h +[blockingconcurrentqueue.h]: https://github.com/cameron314/concurrentqueue/blob/master/blockingconcurrentqueue.h +[unittest-src]: https://github.com/cameron314/concurrentqueue/tree/master/tests/unittests +[benchmarks]: http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++#benchmarks +[benchmark-src]: https://github.com/cameron314/concurrentqueue/tree/master/benchmarks +[license]: https://github.com/cameron314/concurrentqueue/blob/master/LICENSE.md +[cdschecker]: http://demsky.eecs.uci.edu/c11modelchecker.html +[relacy]: http://www.1024cores.net/home/relacy-race-detector +[spsc]: https://github.com/cameron314/readerwriterqueue +[salsa]: http://webee.technion.ac.il/~idish/ftp/spaa049-gidron.pdf diff --git a/third-party/concurrentqueue/blockingconcurrentqueue.h b/third-party/concurrentqueue/blockingconcurrentqueue.h new file mode 100644 index 0000000..c855f9d --- /dev/null +++ b/third-party/concurrentqueue/blockingconcurrentqueue.h @@ -0,0 +1,981 @@ +// Provides an efficient blocking version of moodycamel::ConcurrentQueue. +// ©2015-2016 Cameron Desrochers. Distributed under the terms of the simplified +// BSD license, available at the top of concurrentqueue.h. +// Uses Jeff Preshing's semaphore implementation (under the terms of its +// separate zlib license, embedded below). + +#pragma once + +#include "concurrentqueue.h" +#include +#include +#include +#include +#include + +#if defined(_WIN32) +// Avoid including windows.h in a header; we only need a handful of +// items, so we'll redeclare them here (this is relatively safe since +// the API generally has to remain stable between Windows versions). +// I know this is an ugly hack but it still beats polluting the global +// namespace with thousands of generic names or adding a .cpp for nothing. +extern "C" { + struct _SECURITY_ATTRIBUTES; + __declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName); + __declspec(dllimport) int __stdcall CloseHandle(void* hObject); + __declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds); + __declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount); +} +#elif defined(__MACH__) +#include +#elif defined(__unix__) +#include +#endif + +namespace moodycamel +{ +namespace details +{ + // Code in the mpmc_sema namespace below is an adaptation of Jeff Preshing's + // portable + lightweight semaphore implementations, originally from + // https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h + // LICENSE: + // Copyright (c) 2015 Jeff Preshing + // + // This software is provided 'as-is', without any express or implied + // warranty. In no event will the authors be held liable for any damages + // arising from the use of this software. + // + // Permission is granted to anyone to use this software for any purpose, + // including commercial applications, and to alter it and redistribute it + // freely, subject to the following restrictions: + // + // 1. The origin of this software must not be misrepresented; you must not + // claim that you wrote the original software. If you use this software + // in a product, an acknowledgement in the product documentation would be + // appreciated but is not required. + // 2. Altered source versions must be plainly marked as such, and must not be + // misrepresented as being the original software. + // 3. This notice may not be removed or altered from any source distribution. + namespace mpmc_sema + { +#if defined(_WIN32) + class Semaphore + { + private: + void* m_hSema; + + Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; + Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; + + public: + Semaphore(int initialCount = 0) + { + assert(initialCount >= 0); + const long maxLong = 0x7fffffff; + m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr); + } + + ~Semaphore() + { + CloseHandle(m_hSema); + } + + void wait() + { + const unsigned long infinite = 0xffffffff; + WaitForSingleObject(m_hSema, infinite); + } + + bool try_wait() + { + const unsigned long RC_WAIT_TIMEOUT = 0x00000102; + return WaitForSingleObject(m_hSema, 0) != RC_WAIT_TIMEOUT; + } + + bool timed_wait(std::uint64_t usecs) + { + const unsigned long RC_WAIT_TIMEOUT = 0x00000102; + return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) != RC_WAIT_TIMEOUT; + } + + void signal(int count = 1) + { + ReleaseSemaphore(m_hSema, count, nullptr); + } + }; +#elif defined(__MACH__) + //--------------------------------------------------------- + // Semaphore (Apple iOS and OSX) + // Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html + //--------------------------------------------------------- + class Semaphore + { + private: + semaphore_t m_sema; + + Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; + Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; + + public: + Semaphore(int initialCount = 0) + { + assert(initialCount >= 0); + semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount); + } + + ~Semaphore() + { + semaphore_destroy(mach_task_self(), m_sema); + } + + void wait() + { + semaphore_wait(m_sema); + } + + bool try_wait() + { + return timed_wait(0); + } + + bool timed_wait(std::uint64_t timeout_usecs) + { + mach_timespec_t ts; + ts.tv_sec = static_cast(timeout_usecs / 1000000); + ts.tv_nsec = (timeout_usecs % 1000000) * 1000; + + // added in OSX 10.10: https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html + kern_return_t rc = semaphore_timedwait(m_sema, ts); + + return rc != KERN_OPERATION_TIMED_OUT && rc != KERN_ABORTED; + } + + void signal() + { + semaphore_signal(m_sema); + } + + void signal(int count) + { + while (count-- > 0) + { + semaphore_signal(m_sema); + } + } + }; +#elif defined(__unix__) + //--------------------------------------------------------- + // Semaphore (POSIX, Linux) + //--------------------------------------------------------- + class Semaphore + { + private: + sem_t m_sema; + + Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; + Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; + + public: + Semaphore(int initialCount = 0) + { + assert(initialCount >= 0); + sem_init(&m_sema, 0, initialCount); + } + + ~Semaphore() + { + sem_destroy(&m_sema); + } + + void wait() + { + // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error + int rc; + do { + rc = sem_wait(&m_sema); + } while (rc == -1 && errno == EINTR); + } + + bool try_wait() + { + int rc; + do { + rc = sem_trywait(&m_sema); + } while (rc == -1 && errno == EINTR); + return !(rc == -1 && errno == EAGAIN); + } + + bool timed_wait(std::uint64_t usecs) + { + struct timespec ts; + const int usecs_in_1_sec = 1000000; + const int nsecs_in_1_sec = 1000000000; + clock_gettime(CLOCK_REALTIME, &ts); + ts.tv_sec += usecs / usecs_in_1_sec; + ts.tv_nsec += (usecs % usecs_in_1_sec) * 1000; + // sem_timedwait bombs if you have more than 1e9 in tv_nsec + // so we have to clean things up before passing it in + if (ts.tv_nsec >= nsecs_in_1_sec) { + ts.tv_nsec -= nsecs_in_1_sec; + ++ts.tv_sec; + } + + int rc; + do { + rc = sem_timedwait(&m_sema, &ts); + } while (rc == -1 && errno == EINTR); + return !(rc == -1 && errno == ETIMEDOUT); + } + + void signal() + { + sem_post(&m_sema); + } + + void signal(int count) + { + while (count-- > 0) + { + sem_post(&m_sema); + } + } + }; +#else +#error Unsupported platform! (No semaphore wrapper available) +#endif + + //--------------------------------------------------------- + // LightweightSemaphore + //--------------------------------------------------------- + class LightweightSemaphore + { + public: + typedef std::make_signed::type ssize_t; + + private: + std::atomic m_count; + Semaphore m_sema; + + bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) + { + ssize_t oldCount; + // Is there a better way to set the initial spin count? + // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC, + // as threads start hitting the kernel semaphore. + int spin = 10000; + while (--spin >= 0) + { + oldCount = m_count.load(std::memory_order_relaxed); + if ((oldCount > 0) && m_count.compare_exchange_strong(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed)) + return true; + std::atomic_signal_fence(std::memory_order_acquire); // Prevent the compiler from collapsing the loop. + } + oldCount = m_count.fetch_sub(1, std::memory_order_acquire); + if (oldCount > 0) + return true; + if (timeout_usecs < 0) + { + m_sema.wait(); + return true; + } + if (m_sema.timed_wait((std::uint64_t)timeout_usecs)) + return true; + // At this point, we've timed out waiting for the semaphore, but the + // count is still decremented indicating we may still be waiting on + // it. So we have to re-adjust the count, but only if the semaphore + // wasn't signaled enough times for us too since then. If it was, we + // need to release the semaphore too. + while (true) + { + oldCount = m_count.load(std::memory_order_acquire); + if (oldCount >= 0 && m_sema.try_wait()) + return true; + if (oldCount < 0 && m_count.compare_exchange_strong(oldCount, oldCount + 1, std::memory_order_relaxed, std::memory_order_relaxed)) + return false; + } + } + + ssize_t waitManyWithPartialSpinning(ssize_t max, std::int64_t timeout_usecs = -1) + { + assert(max > 0); + ssize_t oldCount; + int spin = 10000; + while (--spin >= 0) + { + oldCount = m_count.load(std::memory_order_relaxed); + if (oldCount > 0) + { + ssize_t newCount = oldCount > max ? oldCount - max : 0; + if (m_count.compare_exchange_strong(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed)) + return oldCount - newCount; + } + std::atomic_signal_fence(std::memory_order_acquire); + } + oldCount = m_count.fetch_sub(1, std::memory_order_acquire); + if (oldCount <= 0) + { + if (timeout_usecs < 0) + m_sema.wait(); + else if (!m_sema.timed_wait((std::uint64_t)timeout_usecs)) + { + while (true) + { + oldCount = m_count.load(std::memory_order_acquire); + if (oldCount >= 0 && m_sema.try_wait()) + break; + if (oldCount < 0 && m_count.compare_exchange_strong(oldCount, oldCount + 1, std::memory_order_relaxed, std::memory_order_relaxed)) + return 0; + } + } + } + if (max > 1) + return 1 + tryWaitMany(max - 1); + return 1; + } + + public: + LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount) + { + assert(initialCount >= 0); + } + + bool tryWait() + { + ssize_t oldCount = m_count.load(std::memory_order_relaxed); + while (oldCount > 0) + { + if (m_count.compare_exchange_weak(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed)) + return true; + } + return false; + } + + void wait() + { + if (!tryWait()) + waitWithPartialSpinning(); + } + + bool wait(std::int64_t timeout_usecs) + { + return tryWait() || waitWithPartialSpinning(timeout_usecs); + } + + // Acquires between 0 and (greedily) max, inclusive + ssize_t tryWaitMany(ssize_t max) + { + assert(max >= 0); + ssize_t oldCount = m_count.load(std::memory_order_relaxed); + while (oldCount > 0) + { + ssize_t newCount = oldCount > max ? oldCount - max : 0; + if (m_count.compare_exchange_weak(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed)) + return oldCount - newCount; + } + return 0; + } + + // Acquires at least one, and (greedily) at most max + ssize_t waitMany(ssize_t max, std::int64_t timeout_usecs) + { + assert(max >= 0); + ssize_t result = tryWaitMany(max); + if (result == 0 && max > 0) + result = waitManyWithPartialSpinning(max, timeout_usecs); + return result; + } + + ssize_t waitMany(ssize_t max) + { + ssize_t result = waitMany(max, -1); + assert(result > 0); + return result; + } + + void signal(ssize_t count = 1) + { + assert(count >= 0); + ssize_t oldCount = m_count.fetch_add(count, std::memory_order_release); + ssize_t toRelease = -oldCount < count ? -oldCount : count; + if (toRelease > 0) + { + m_sema.signal((int)toRelease); + } + } + + ssize_t availableApprox() const + { + ssize_t count = m_count.load(std::memory_order_relaxed); + return count > 0 ? count : 0; + } + }; + } // end namespace mpmc_sema +} // end namespace details + + +// This is a blocking version of the queue. It has an almost identical interface to +// the normal non-blocking version, with the addition of various wait_dequeue() methods +// and the removal of producer-specific dequeue methods. +template +class BlockingConcurrentQueue +{ +private: + typedef ::moodycamel::ConcurrentQueue ConcurrentQueue; + typedef details::mpmc_sema::LightweightSemaphore LightweightSemaphore; + +public: + typedef typename ConcurrentQueue::producer_token_t producer_token_t; + typedef typename ConcurrentQueue::consumer_token_t consumer_token_t; + + typedef typename ConcurrentQueue::index_t index_t; + typedef typename ConcurrentQueue::size_t size_t; + typedef typename std::make_signed::type ssize_t; + + static const size_t BLOCK_SIZE = ConcurrentQueue::BLOCK_SIZE; + static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = ConcurrentQueue::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD; + static const size_t EXPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::EXPLICIT_INITIAL_INDEX_SIZE; + static const size_t IMPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::IMPLICIT_INITIAL_INDEX_SIZE; + static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = ConcurrentQueue::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; + static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = ConcurrentQueue::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE; + static const size_t MAX_SUBQUEUE_SIZE = ConcurrentQueue::MAX_SUBQUEUE_SIZE; + +public: + // Creates a queue with at least `capacity` element slots; note that the + // actual number of elements that can be inserted without additional memory + // allocation depends on the number of producers and the block size (e.g. if + // the block size is equal to `capacity`, only a single block will be allocated + // up-front, which means only a single producer will be able to enqueue elements + // without an extra allocation -- blocks aren't shared between producers). + // This method is not thread safe -- it is up to the user to ensure that the + // queue is fully constructed before it starts being used by other threads (this + // includes making the memory effects of construction visible, possibly with a + // memory barrier). + explicit BlockingConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE) + : inner(capacity), sema(create(), &BlockingConcurrentQueue::template destroy) + { + assert(reinterpret_cast((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && "BlockingConcurrentQueue must have ConcurrentQueue as its first member"); + if (!sema) { + MOODYCAMEL_THROW(std::bad_alloc()); + } + } + + BlockingConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers) + : inner(minCapacity, maxExplicitProducers, maxImplicitProducers), sema(create(), &BlockingConcurrentQueue::template destroy) + { + assert(reinterpret_cast((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && "BlockingConcurrentQueue must have ConcurrentQueue as its first member"); + if (!sema) { + MOODYCAMEL_THROW(std::bad_alloc()); + } + } + + // Disable copying and copy assignment + BlockingConcurrentQueue(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; + BlockingConcurrentQueue& operator=(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; + + // Moving is supported, but note that it is *not* a thread-safe operation. + // Nobody can use the queue while it's being moved, and the memory effects + // of that move must be propagated to other threads before they can use it. + // Note: When a queue is moved, its tokens are still valid but can only be + // used with the destination queue (i.e. semantically they are moved along + // with the queue itself). + BlockingConcurrentQueue(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT + : inner(std::move(other.inner)), sema(std::move(other.sema)) + { } + + inline BlockingConcurrentQueue& operator=(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT + { + return swap_internal(other); + } + + // Swaps this queue's state with the other's. Not thread-safe. + // Swapping two queues does not invalidate their tokens, however + // the tokens that were created for one queue must be used with + // only the swapped queue (i.e. the tokens are tied to the + // queue's movable state, not the object itself). + inline void swap(BlockingConcurrentQueue& other) MOODYCAMEL_NOEXCEPT + { + swap_internal(other); + } + +private: + BlockingConcurrentQueue& swap_internal(BlockingConcurrentQueue& other) + { + if (this == &other) { + return *this; + } + + inner.swap(other.inner); + sema.swap(other.sema); + return *this; + } + +public: + // Enqueues a single item (by copying it). + // Allocates memory if required. Only fails if memory allocation fails (or implicit + // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, + // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Thread-safe. + inline bool enqueue(T const& item) + { + if ((details::likely)(inner.enqueue(item))) { + sema->signal(); + return true; + } + return false; + } + + // Enqueues a single item (by moving it, if possible). + // Allocates memory if required. Only fails if memory allocation fails (or implicit + // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, + // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Thread-safe. + inline bool enqueue(T&& item) + { + if ((details::likely)(inner.enqueue(std::move(item)))) { + sema->signal(); + return true; + } + return false; + } + + // Enqueues a single item (by copying it) using an explicit producer token. + // Allocates memory if required. Only fails if memory allocation fails (or + // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Thread-safe. + inline bool enqueue(producer_token_t const& token, T const& item) + { + if ((details::likely)(inner.enqueue(token, item))) { + sema->signal(); + return true; + } + return false; + } + + // Enqueues a single item (by moving it, if possible) using an explicit producer token. + // Allocates memory if required. Only fails if memory allocation fails (or + // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Thread-safe. + inline bool enqueue(producer_token_t const& token, T&& item) + { + if ((details::likely)(inner.enqueue(token, std::move(item)))) { + sema->signal(); + return true; + } + return false; + } + + // Enqueues several items. + // Allocates memory if required. Only fails if memory allocation fails (or + // implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE + // is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Note: Use std::make_move_iterator if the elements should be moved instead of copied. + // Thread-safe. + template + inline bool enqueue_bulk(It itemFirst, size_t count) + { + if ((details::likely)(inner.enqueue_bulk(std::forward(itemFirst), count))) { + sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); + return true; + } + return false; + } + + // Enqueues several items using an explicit producer token. + // Allocates memory if required. Only fails if memory allocation fails + // (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Note: Use std::make_move_iterator if the elements should be moved + // instead of copied. + // Thread-safe. + template + inline bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) + { + if ((details::likely)(inner.enqueue_bulk(token, std::forward(itemFirst), count))) { + sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); + return true; + } + return false; + } + + // Enqueues a single item (by copying it). + // Does not allocate memory. Fails if not enough room to enqueue (or implicit + // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE + // is 0). + // Thread-safe. + inline bool try_enqueue(T const& item) + { + if (inner.try_enqueue(item)) { + sema->signal(); + return true; + } + return false; + } + + // Enqueues a single item (by moving it, if possible). + // Does not allocate memory (except for one-time implicit producer). + // Fails if not enough room to enqueue (or implicit production is + // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). + // Thread-safe. + inline bool try_enqueue(T&& item) + { + if (inner.try_enqueue(std::move(item))) { + sema->signal(); + return true; + } + return false; + } + + // Enqueues a single item (by copying it) using an explicit producer token. + // Does not allocate memory. Fails if not enough room to enqueue. + // Thread-safe. + inline bool try_enqueue(producer_token_t const& token, T const& item) + { + if (inner.try_enqueue(token, item)) { + sema->signal(); + return true; + } + return false; + } + + // Enqueues a single item (by moving it, if possible) using an explicit producer token. + // Does not allocate memory. Fails if not enough room to enqueue. + // Thread-safe. + inline bool try_enqueue(producer_token_t const& token, T&& item) + { + if (inner.try_enqueue(token, std::move(item))) { + sema->signal(); + return true; + } + return false; + } + + // Enqueues several items. + // Does not allocate memory (except for one-time implicit producer). + // Fails if not enough room to enqueue (or implicit production is + // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). + // Note: Use std::make_move_iterator if the elements should be moved + // instead of copied. + // Thread-safe. + template + inline bool try_enqueue_bulk(It itemFirst, size_t count) + { + if (inner.try_enqueue_bulk(std::forward(itemFirst), count)) { + sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); + return true; + } + return false; + } + + // Enqueues several items using an explicit producer token. + // Does not allocate memory. Fails if not enough room to enqueue. + // Note: Use std::make_move_iterator if the elements should be moved + // instead of copied. + // Thread-safe. + template + inline bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) + { + if (inner.try_enqueue_bulk(token, std::forward(itemFirst), count)) { + sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); + return true; + } + return false; + } + + + // Attempts to dequeue from the queue. + // Returns false if all producer streams appeared empty at the time they + // were checked (so, the queue is likely but not guaranteed to be empty). + // Never allocates. Thread-safe. + template + inline bool try_dequeue(U& item) + { + if (sema->tryWait()) { + while (!inner.try_dequeue(item)) { + continue; + } + return true; + } + return false; + } + + // Attempts to dequeue from the queue using an explicit consumer token. + // Returns false if all producer streams appeared empty at the time they + // were checked (so, the queue is likely but not guaranteed to be empty). + // Never allocates. Thread-safe. + template + inline bool try_dequeue(consumer_token_t& token, U& item) + { + if (sema->tryWait()) { + while (!inner.try_dequeue(token, item)) { + continue; + } + return true; + } + return false; + } + + // Attempts to dequeue several elements from the queue. + // Returns the number of items actually dequeued. + // Returns 0 if all producer streams appeared empty at the time they + // were checked (so, the queue is likely but not guaranteed to be empty). + // Never allocates. Thread-safe. + template + inline size_t try_dequeue_bulk(It itemFirst, size_t max) + { + size_t count = 0; + max = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max); + while (count != max) { + count += inner.template try_dequeue_bulk(itemFirst, max - count); + } + return count; + } + + // Attempts to dequeue several elements from the queue using an explicit consumer token. + // Returns the number of items actually dequeued. + // Returns 0 if all producer streams appeared empty at the time they + // were checked (so, the queue is likely but not guaranteed to be empty). + // Never allocates. Thread-safe. + template + inline size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) + { + size_t count = 0; + max = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max); + while (count != max) { + count += inner.template try_dequeue_bulk(token, itemFirst, max - count); + } + return count; + } + + + + // Blocks the current thread until there's something to dequeue, then + // dequeues it. + // Never allocates. Thread-safe. + template + inline void wait_dequeue(U& item) + { + sema->wait(); + while (!inner.try_dequeue(item)) { + continue; + } + } + + // Blocks the current thread until either there's something to dequeue + // or the timeout (specified in microseconds) expires. Returns false + // without setting `item` if the timeout expires, otherwise assigns + // to `item` and returns true. + // Using a negative timeout indicates an indefinite timeout, + // and is thus functionally equivalent to calling wait_dequeue. + // Never allocates. Thread-safe. + template + inline bool wait_dequeue_timed(U& item, std::int64_t timeout_usecs) + { + if (!sema->wait(timeout_usecs)) { + return false; + } + while (!inner.try_dequeue(item)) { + continue; + } + return true; + } + + // Blocks the current thread until either there's something to dequeue + // or the timeout expires. Returns false without setting `item` if the + // timeout expires, otherwise assigns to `item` and returns true. + // Never allocates. Thread-safe. + template + inline bool wait_dequeue_timed(U& item, std::chrono::duration const& timeout) + { + return wait_dequeue_timed(item, std::chrono::duration_cast(timeout).count()); + } + + // Blocks the current thread until there's something to dequeue, then + // dequeues it using an explicit consumer token. + // Never allocates. Thread-safe. + template + inline void wait_dequeue(consumer_token_t& token, U& item) + { + sema->wait(); + while (!inner.try_dequeue(token, item)) { + continue; + } + } + + // Blocks the current thread until either there's something to dequeue + // or the timeout (specified in microseconds) expires. Returns false + // without setting `item` if the timeout expires, otherwise assigns + // to `item` and returns true. + // Using a negative timeout indicates an indefinite timeout, + // and is thus functionally equivalent to calling wait_dequeue. + // Never allocates. Thread-safe. + template + inline bool wait_dequeue_timed(consumer_token_t& token, U& item, std::int64_t timeout_usecs) + { + if (!sema->wait(timeout_usecs)) { + return false; + } + while (!inner.try_dequeue(token, item)) { + continue; + } + return true; + } + + // Blocks the current thread until either there's something to dequeue + // or the timeout expires. Returns false without setting `item` if the + // timeout expires, otherwise assigns to `item` and returns true. + // Never allocates. Thread-safe. + template + inline bool wait_dequeue_timed(consumer_token_t& token, U& item, std::chrono::duration const& timeout) + { + return wait_dequeue_timed(token, item, std::chrono::duration_cast(timeout).count()); + } + + // Attempts to dequeue several elements from the queue. + // Returns the number of items actually dequeued, which will + // always be at least one (this method blocks until the queue + // is non-empty) and at most max. + // Never allocates. Thread-safe. + template + inline size_t wait_dequeue_bulk(It itemFirst, size_t max) + { + size_t count = 0; + max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max); + while (count != max) { + count += inner.template try_dequeue_bulk(itemFirst, max - count); + } + return count; + } + + // Attempts to dequeue several elements from the queue. + // Returns the number of items actually dequeued, which can + // be 0 if the timeout expires while waiting for elements, + // and at most max. + // Using a negative timeout indicates an indefinite timeout, + // and is thus functionally equivalent to calling wait_dequeue_bulk. + // Never allocates. Thread-safe. + template + inline size_t wait_dequeue_bulk_timed(It itemFirst, size_t max, std::int64_t timeout_usecs) + { + size_t count = 0; + max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max, timeout_usecs); + while (count != max) { + count += inner.template try_dequeue_bulk(itemFirst, max - count); + } + return count; + } + + // Attempts to dequeue several elements from the queue. + // Returns the number of items actually dequeued, which can + // be 0 if the timeout expires while waiting for elements, + // and at most max. + // Never allocates. Thread-safe. + template + inline size_t wait_dequeue_bulk_timed(It itemFirst, size_t max, std::chrono::duration const& timeout) + { + return wait_dequeue_bulk_timed(itemFirst, max, std::chrono::duration_cast(timeout).count()); + } + + // Attempts to dequeue several elements from the queue using an explicit consumer token. + // Returns the number of items actually dequeued, which will + // always be at least one (this method blocks until the queue + // is non-empty) and at most max. + // Never allocates. Thread-safe. + template + inline size_t wait_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) + { + size_t count = 0; + max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max); + while (count != max) { + count += inner.template try_dequeue_bulk(token, itemFirst, max - count); + } + return count; + } + + // Attempts to dequeue several elements from the queue using an explicit consumer token. + // Returns the number of items actually dequeued, which can + // be 0 if the timeout expires while waiting for elements, + // and at most max. + // Using a negative timeout indicates an indefinite timeout, + // and is thus functionally equivalent to calling wait_dequeue_bulk. + // Never allocates. Thread-safe. + template + inline size_t wait_dequeue_bulk_timed(consumer_token_t& token, It itemFirst, size_t max, std::int64_t timeout_usecs) + { + size_t count = 0; + max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max, timeout_usecs); + while (count != max) { + count += inner.template try_dequeue_bulk(token, itemFirst, max - count); + } + return count; + } + + // Attempts to dequeue several elements from the queue using an explicit consumer token. + // Returns the number of items actually dequeued, which can + // be 0 if the timeout expires while waiting for elements, + // and at most max. + // Never allocates. Thread-safe. + template + inline size_t wait_dequeue_bulk_timed(consumer_token_t& token, It itemFirst, size_t max, std::chrono::duration const& timeout) + { + return wait_dequeue_bulk_timed(token, itemFirst, max, std::chrono::duration_cast(timeout).count()); + } + + + // Returns an estimate of the total number of elements currently in the queue. This + // estimate is only accurate if the queue has completely stabilized before it is called + // (i.e. all enqueue and dequeue operations have completed and their memory effects are + // visible on the calling thread, and no further operations start while this method is + // being called). + // Thread-safe. + inline size_t size_approx() const + { + return (size_t)sema->availableApprox(); + } + + + // Returns true if the underlying atomic variables used by + // the queue are lock-free (they should be on most platforms). + // Thread-safe. + static bool is_lock_free() + { + return ConcurrentQueue::is_lock_free(); + } + + +private: + template + static inline U* create() + { + auto p = (Traits::malloc)(sizeof(U)); + return p != nullptr ? new (p) U : nullptr; + } + + template + static inline U* create(A1&& a1) + { + auto p = (Traits::malloc)(sizeof(U)); + return p != nullptr ? new (p) U(std::forward(a1)) : nullptr; + } + + template + static inline void destroy(U* p) + { + if (p != nullptr) { + p->~U(); + } + (Traits::free)(p); + } + +private: + ConcurrentQueue inner; + std::unique_ptr sema; +}; + + +template +inline void swap(BlockingConcurrentQueue& a, BlockingConcurrentQueue& b) MOODYCAMEL_NOEXCEPT +{ + a.swap(b); +} + +} // end namespace moodycamel diff --git a/third-party/concurrentqueue/concurrentqueue.h b/third-party/concurrentqueue/concurrentqueue.h new file mode 100644 index 0000000..aa046e5 --- /dev/null +++ b/third-party/concurrentqueue/concurrentqueue.h @@ -0,0 +1,3635 @@ +// Provides a C++11 implementation of a multi-producer, multi-consumer lock-free queue. +// An overview, including benchmark results, is provided here: +// http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++ +// The full design is also described in excruciating detail at: +// http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue + +// Simplified BSD license: +// Copyright (c) 2013-2016, Cameron Desrochers. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// - Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials +// provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#pragma once + +#if defined(__GNUC__) +// Disable -Wconversion warnings (spuriously triggered when Traits::size_t and +// Traits::index_t are set to < 32 bits, causing integer promotion, causing warnings +// upon assigning any computed values) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" + +#ifdef MCDBGQ_USE_RELACY +#pragma GCC diagnostic ignored "-Wint-to-pointer-cast" +#endif +#endif + +#if defined(__APPLE__) +#include "TargetConditionals.h" +#endif + +#ifdef MCDBGQ_USE_RELACY +#include "relacy/relacy_std.hpp" +#include "relacy_shims.h" +// We only use malloc/free anyway, and the delete macro messes up `= delete` method declarations. +// We'll override the default trait malloc ourselves without a macro. +#undef new +#undef delete +#undef malloc +#undef free +#else +#include // Requires C++11. Sorry VS2010. +#include +#endif +#include // for max_align_t +#include +#include +#include +#include +#include +#include +#include // for CHAR_BIT +#include +#include // partly for __WINPTHREADS_VERSION if on MinGW-w64 w/ POSIX threading + +// Platform-specific definitions of a numeric thread ID type and an invalid value +namespace moodycamel { namespace details { + template struct thread_id_converter { + typedef thread_id_t thread_id_numeric_size_t; + typedef thread_id_t thread_id_hash_t; + static thread_id_hash_t prehash(thread_id_t const& x) { return x; } + }; +} } +#if defined(MCDBGQ_USE_RELACY) +namespace moodycamel { namespace details { + typedef std::uint32_t thread_id_t; + static const thread_id_t invalid_thread_id = 0xFFFFFFFFU; + static const thread_id_t invalid_thread_id2 = 0xFFFFFFFEU; + static inline thread_id_t thread_id() { return rl::thread_index(); } +} } +#elif defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__) +// No sense pulling in windows.h in a header, we'll manually declare the function +// we use and rely on backwards-compatibility for this not to break +extern "C" __declspec(dllimport) unsigned long __stdcall GetCurrentThreadId(void); +namespace moodycamel { namespace details { + static_assert(sizeof(unsigned long) == sizeof(std::uint32_t), "Expected size of unsigned long to be 32 bits on Windows"); + typedef std::uint32_t thread_id_t; + static const thread_id_t invalid_thread_id = 0; // See http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx + static const thread_id_t invalid_thread_id2 = 0xFFFFFFFFU; // Not technically guaranteed to be invalid, but is never used in practice. Note that all Win32 thread IDs are presently multiples of 4. + static inline thread_id_t thread_id() { return static_cast(::GetCurrentThreadId()); } +} } +#elif defined(__arm__) || defined(_M_ARM) || defined(__aarch64__) || (defined(__APPLE__) && TARGET_OS_IPHONE) +namespace moodycamel { namespace details { + static_assert(sizeof(std::thread::id) == 4 || sizeof(std::thread::id) == 8, "std::thread::id is expected to be either 4 or 8 bytes"); + + typedef std::thread::id thread_id_t; + static const thread_id_t invalid_thread_id; // Default ctor creates invalid ID + + // Note we don't define a invalid_thread_id2 since std::thread::id doesn't have one; it's + // only used if MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is defined anyway, which it won't + // be. + static inline thread_id_t thread_id() { return std::this_thread::get_id(); } + + template struct thread_id_size { }; + template<> struct thread_id_size<4> { typedef std::uint32_t numeric_t; }; + template<> struct thread_id_size<8> { typedef std::uint64_t numeric_t; }; + + template<> struct thread_id_converter { + typedef thread_id_size::numeric_t thread_id_numeric_size_t; +#ifndef __APPLE__ + typedef std::size_t thread_id_hash_t; +#else + typedef thread_id_numeric_size_t thread_id_hash_t; +#endif + + static thread_id_hash_t prehash(thread_id_t const& x) + { +#ifndef __APPLE__ + return std::hash()(x); +#else + return *reinterpret_cast(&x); +#endif + } + }; +} } +#else +// Use a nice trick from this answer: http://stackoverflow.com/a/8438730/21475 +// In order to get a numeric thread ID in a platform-independent way, we use a thread-local +// static variable's address as a thread identifier :-) +#if defined(__GNUC__) || defined(__INTEL_COMPILER) +#define MOODYCAMEL_THREADLOCAL __thread +#elif defined(_MSC_VER) +#define MOODYCAMEL_THREADLOCAL __declspec(thread) +#else +// Assume C++11 compliant compiler +#define MOODYCAMEL_THREADLOCAL thread_local +#endif +namespace moodycamel { namespace details { + typedef std::uintptr_t thread_id_t; + static const thread_id_t invalid_thread_id = 0; // Address can't be nullptr + static const thread_id_t invalid_thread_id2 = 1; // Member accesses off a null pointer are also generally invalid. Plus it's not aligned. + static inline thread_id_t thread_id() { static MOODYCAMEL_THREADLOCAL int x; return reinterpret_cast(&x); } +} } +#endif + +// Exceptions +#ifndef MOODYCAMEL_EXCEPTIONS_ENABLED +#if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || (!defined(_MSC_VER) && !defined(__GNUC__)) +#define MOODYCAMEL_EXCEPTIONS_ENABLED +#endif +#endif +#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED +#define MOODYCAMEL_TRY try +#define MOODYCAMEL_CATCH(...) catch(__VA_ARGS__) +#define MOODYCAMEL_RETHROW throw +#define MOODYCAMEL_THROW(expr) throw (expr) +#else +#define MOODYCAMEL_TRY if (true) +#define MOODYCAMEL_CATCH(...) else if (false) +#define MOODYCAMEL_RETHROW +#define MOODYCAMEL_THROW(expr) +#endif + +#ifndef MOODYCAMEL_NOEXCEPT +#if !defined(MOODYCAMEL_EXCEPTIONS_ENABLED) +#define MOODYCAMEL_NOEXCEPT +#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) true +#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) true +#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1800 +// VS2012's std::is_nothrow_[move_]constructible is broken and returns true when it shouldn't :-( +// We have to assume *all* non-trivial constructors may throw on VS2012! +#define MOODYCAMEL_NOEXCEPT _NOEXCEPT +#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference::value && std::is_move_constructible::value ? std::is_trivially_move_constructible::value : std::is_trivially_copy_constructible::value) +#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference::value && std::is_move_assignable::value ? std::is_trivially_move_assignable::value || std::is_nothrow_move_assignable::value : std::is_trivially_copy_assignable::value || std::is_nothrow_copy_assignable::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr)) +#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1900 +#define MOODYCAMEL_NOEXCEPT _NOEXCEPT +#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference::value && std::is_move_constructible::value ? std::is_trivially_move_constructible::value || std::is_nothrow_move_constructible::value : std::is_trivially_copy_constructible::value || std::is_nothrow_copy_constructible::value) +#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference::value && std::is_move_assignable::value ? std::is_trivially_move_assignable::value || std::is_nothrow_move_assignable::value : std::is_trivially_copy_assignable::value || std::is_nothrow_copy_assignable::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr)) +#else +#define MOODYCAMEL_NOEXCEPT noexcept +#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) noexcept(expr) +#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) noexcept(expr) +#endif +#endif + +#ifndef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED +#ifdef MCDBGQ_USE_RELACY +#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED +#else +// VS2013 doesn't support `thread_local`, and MinGW-w64 w/ POSIX threading has a crippling bug: http://sourceforge.net/p/mingw-w64/bugs/445 +// g++ <=4.7 doesn't support thread_local either. +// Finally, iOS/ARM doesn't have support for it either, and g++/ARM allows it to compile but it's unconfirmed to actually work +#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && (!defined(__MINGW32__) && !defined(__MINGW64__) || !defined(__WINPTHREADS_VERSION)) && (!defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) && (!defined(__APPLE__) || !TARGET_OS_IPHONE) && !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) +// Assume `thread_local` is fully supported in all other C++11 compilers/platforms +//#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED // always disabled for now since several users report having problems with it on +#endif +#endif +#endif + +// VS2012 doesn't support deleted functions. +// In this case, we declare the function normally but don't define it. A link error will be generated if the function is called. +#ifndef MOODYCAMEL_DELETE_FUNCTION +#if defined(_MSC_VER) && _MSC_VER < 1800 +#define MOODYCAMEL_DELETE_FUNCTION +#else +#define MOODYCAMEL_DELETE_FUNCTION = delete +#endif +#endif + +// Compiler-specific likely/unlikely hints +namespace moodycamel { namespace details { +#if defined(__GNUC__) + static inline bool (likely)(bool x) { return __builtin_expect((x), true); } + static inline bool (unlikely)(bool x) { return __builtin_expect((x), false); } +#else + static inline bool (likely)(bool x) { return x; } + static inline bool (unlikely)(bool x) { return x; } +#endif +} } + +#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG +#include "internal/concurrentqueue_internal_debug.h" +#endif + +namespace moodycamel { +namespace details { + template + struct const_numeric_max { + static_assert(std::is_integral::value, "const_numeric_max can only be used with integers"); + static const T value = std::numeric_limits::is_signed + ? (static_cast(1) << (sizeof(T) * CHAR_BIT - 1)) - static_cast(1) + : static_cast(-1); + }; + +#if defined(__GLIBCXX__) + typedef ::max_align_t std_max_align_t; // libstdc++ forgot to add it to std:: for a while +#else + typedef std::max_align_t std_max_align_t; // Others (e.g. MSVC) insist it can *only* be accessed via std:: +#endif + + // Some platforms have incorrectly set max_align_t to a type with <8 bytes alignment even while supporting + // 8-byte aligned scalar values (*cough* 32-bit iOS). Work around this with our own union. See issue #64. + typedef union { + std_max_align_t x; + long long y; + void* z; + } max_align_t; +} + +// Default traits for the ConcurrentQueue. To change some of the +// traits without re-implementing all of them, inherit from this +// struct and shadow the declarations you wish to be different; +// since the traits are used as a template type parameter, the +// shadowed declarations will be used where defined, and the defaults +// otherwise. +struct ConcurrentQueueDefaultTraits +{ + // General-purpose size type. std::size_t is strongly recommended. + typedef std::size_t size_t; + + // The type used for the enqueue and dequeue indices. Must be at least as + // large as size_t. Should be significantly larger than the number of elements + // you expect to hold at once, especially if you have a high turnover rate; + // for example, on 32-bit x86, if you expect to have over a hundred million + // elements or pump several million elements through your queue in a very + // short space of time, using a 32-bit type *may* trigger a race condition. + // A 64-bit int type is recommended in that case, and in practice will + // prevent a race condition no matter the usage of the queue. Note that + // whether the queue is lock-free with a 64-int type depends on the whether + // std::atomic is lock-free, which is platform-specific. + typedef std::size_t index_t; + + // Internally, all elements are enqueued and dequeued from multi-element + // blocks; this is the smallest controllable unit. If you expect few elements + // but many producers, a smaller block size should be favoured. For few producers + // and/or many elements, a larger block size is preferred. A sane default + // is provided. Must be a power of 2. + static const size_t BLOCK_SIZE = 32; + + // For explicit producers (i.e. when using a producer token), the block is + // checked for being empty by iterating through a list of flags, one per element. + // For large block sizes, this is too inefficient, and switching to an atomic + // counter-based approach is faster. The switch is made for block sizes strictly + // larger than this threshold. + static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = 32; + + // How many full blocks can be expected for a single explicit producer? This should + // reflect that number's maximum for optimal performance. Must be a power of 2. + static const size_t EXPLICIT_INITIAL_INDEX_SIZE = 32; + + // How many full blocks can be expected for a single implicit producer? This should + // reflect that number's maximum for optimal performance. Must be a power of 2. + static const size_t IMPLICIT_INITIAL_INDEX_SIZE = 32; + + // The initial size of the hash table mapping thread IDs to implicit producers. + // Note that the hash is resized every time it becomes half full. + // Must be a power of two, and either 0 or at least 1. If 0, implicit production + // (using the enqueue methods without an explicit producer token) is disabled. + static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = 32; + + // Controls the number of items that an explicit consumer (i.e. one with a token) + // must consume before it causes all consumers to rotate and move on to the next + // internal queue. + static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 256; + + // The maximum number of elements (inclusive) that can be enqueued to a sub-queue. + // Enqueue operations that would cause this limit to be surpassed will fail. Note + // that this limit is enforced at the block level (for performance reasons), i.e. + // it's rounded up to the nearest block size. + static const size_t MAX_SUBQUEUE_SIZE = details::const_numeric_max::value; + + +#ifndef MCDBGQ_USE_RELACY + // Memory allocation can be customized if needed. + // malloc should return nullptr on failure, and handle alignment like std::malloc. +#if defined(malloc) || defined(free) + // Gah, this is 2015, stop defining macros that break standard code already! + // Work around malloc/free being special macros: + static inline void* WORKAROUND_malloc(size_t size) { return malloc(size); } + static inline void WORKAROUND_free(void* ptr) { return free(ptr); } + static inline void* (malloc)(size_t size) { return WORKAROUND_malloc(size); } + static inline void (free)(void* ptr) { return WORKAROUND_free(ptr); } +#else + static inline void* malloc(size_t size) { return std::malloc(size); } + static inline void free(void* ptr) { return std::free(ptr); } +#endif +#else + // Debug versions when running under the Relacy race detector (ignore + // these in user code) + static inline void* malloc(size_t size) { return rl::rl_malloc(size, $); } + static inline void free(void* ptr) { return rl::rl_free(ptr, $); } +#endif +}; + + +// When producing or consuming many elements, the most efficient way is to: +// 1) Use one of the bulk-operation methods of the queue with a token +// 2) Failing that, use the bulk-operation methods without a token +// 3) Failing that, create a token and use that with the single-item methods +// 4) Failing that, use the single-parameter methods of the queue +// Having said that, don't create tokens willy-nilly -- ideally there should be +// a maximum of one token per thread (of each kind). +struct ProducerToken; +struct ConsumerToken; + +template class ConcurrentQueue; +template class BlockingConcurrentQueue; +class ConcurrentQueueTests; + + +namespace details +{ + struct ConcurrentQueueProducerTypelessBase + { + ConcurrentQueueProducerTypelessBase* next; + std::atomic inactive; + ProducerToken* token; + + ConcurrentQueueProducerTypelessBase() + : next(nullptr), inactive(false), token(nullptr) + { + } + }; + + template struct _hash_32_or_64 { + static inline std::uint32_t hash(std::uint32_t h) + { + // MurmurHash3 finalizer -- see https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp + // Since the thread ID is already unique, all we really want to do is propagate that + // uniqueness evenly across all the bits, so that we can use a subset of the bits while + // reducing collisions significantly + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + return h ^ (h >> 16); + } + }; + template<> struct _hash_32_or_64<1> { + static inline std::uint64_t hash(std::uint64_t h) + { + h ^= h >> 33; + h *= 0xff51afd7ed558ccd; + h ^= h >> 33; + h *= 0xc4ceb9fe1a85ec53; + return h ^ (h >> 33); + } + }; + template struct hash_32_or_64 : public _hash_32_or_64<(size > 4)> { }; + + static inline size_t hash_thread_id(thread_id_t id) + { + static_assert(sizeof(thread_id_t) <= 8, "Expected a platform where thread IDs are at most 64-bit values"); + return static_cast(hash_32_or_64::thread_id_hash_t)>::hash( + thread_id_converter::prehash(id))); + } + + template + static inline bool circular_less_than(T a, T b) + { +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable: 4554) +#endif + static_assert(std::is_integral::value && !std::numeric_limits::is_signed, "circular_less_than is intended to be used only with unsigned integer types"); + return static_cast(a - b) > static_cast(static_cast(1) << static_cast(sizeof(T) * CHAR_BIT - 1)); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + } + + template + static inline char* align_for(char* ptr) + { + const std::size_t alignment = std::alignment_of::value; + return ptr + (alignment - (reinterpret_cast(ptr) % alignment)) % alignment; + } + + template + static inline T ceil_to_pow_2(T x) + { + static_assert(std::is_integral::value && !std::numeric_limits::is_signed, "ceil_to_pow_2 is intended to be used only with unsigned integer types"); + + // Adapted from http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 + --x; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + for (std::size_t i = 1; i < sizeof(T); i <<= 1) { + x |= x >> (i << 3); + } + ++x; + return x; + } + + template + static inline void swap_relaxed(std::atomic& left, std::atomic& right) + { + T temp = std::move(left.load(std::memory_order_relaxed)); + left.store(std::move(right.load(std::memory_order_relaxed)), std::memory_order_relaxed); + right.store(std::move(temp), std::memory_order_relaxed); + } + + template + static inline T const& nomove(T const& x) + { + return x; + } + + template + struct nomove_if + { + template + static inline T const& eval(T const& x) + { + return x; + } + }; + + template<> + struct nomove_if + { + template + static inline auto eval(U&& x) + -> decltype(std::forward(x)) + { + return std::forward(x); + } + }; + + template + static inline auto deref_noexcept(It& it) MOODYCAMEL_NOEXCEPT -> decltype(*it) + { + return *it; + } + +#if defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + template struct is_trivially_destructible : std::is_trivially_destructible { }; +#else + template struct is_trivially_destructible : std::has_trivial_destructor { }; +#endif + +#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED +#ifdef MCDBGQ_USE_RELACY + typedef RelacyThreadExitListener ThreadExitListener; + typedef RelacyThreadExitNotifier ThreadExitNotifier; +#else + struct ThreadExitListener + { + typedef void (*callback_t)(void*); + callback_t callback; + void* userData; + + ThreadExitListener* next; // reserved for use by the ThreadExitNotifier + }; + + + class ThreadExitNotifier + { + public: + static void subscribe(ThreadExitListener* listener) + { + auto& tlsInst = instance(); + listener->next = tlsInst.tail; + tlsInst.tail = listener; + } + + static void unsubscribe(ThreadExitListener* listener) + { + auto& tlsInst = instance(); + ThreadExitListener** prev = &tlsInst.tail; + for (auto ptr = tlsInst.tail; ptr != nullptr; ptr = ptr->next) { + if (ptr == listener) { + *prev = ptr->next; + break; + } + prev = &ptr->next; + } + } + + private: + ThreadExitNotifier() : tail(nullptr) { } + ThreadExitNotifier(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION; + ThreadExitNotifier& operator=(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION; + + ~ThreadExitNotifier() + { + // This thread is about to exit, let everyone know! + assert(this == &instance() && "If this assert fails, you likely have a buggy compiler! Change the preprocessor conditions such that MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is no longer defined."); + for (auto ptr = tail; ptr != nullptr; ptr = ptr->next) { + ptr->callback(ptr->userData); + } + } + + // Thread-local + static inline ThreadExitNotifier& instance() + { + static thread_local ThreadExitNotifier notifier; + return notifier; + } + + private: + ThreadExitListener* tail; + }; +#endif +#endif + + template struct static_is_lock_free_num { enum { value = 0 }; }; + template<> struct static_is_lock_free_num { enum { value = ATOMIC_CHAR_LOCK_FREE }; }; + template<> struct static_is_lock_free_num { enum { value = ATOMIC_SHORT_LOCK_FREE }; }; + template<> struct static_is_lock_free_num { enum { value = ATOMIC_INT_LOCK_FREE }; }; + template<> struct static_is_lock_free_num { enum { value = ATOMIC_LONG_LOCK_FREE }; }; + template<> struct static_is_lock_free_num { enum { value = ATOMIC_LLONG_LOCK_FREE }; }; + template struct static_is_lock_free : static_is_lock_free_num::type> { }; + template<> struct static_is_lock_free { enum { value = ATOMIC_BOOL_LOCK_FREE }; }; + template struct static_is_lock_free { enum { value = ATOMIC_POINTER_LOCK_FREE }; }; +} + + +struct ProducerToken +{ + template + explicit ProducerToken(ConcurrentQueue& queue); + + template + explicit ProducerToken(BlockingConcurrentQueue& queue); + + ProducerToken(ProducerToken&& other) MOODYCAMEL_NOEXCEPT + : producer(other.producer) + { + other.producer = nullptr; + if (producer != nullptr) { + producer->token = this; + } + } + + inline ProducerToken& operator=(ProducerToken&& other) MOODYCAMEL_NOEXCEPT + { + swap(other); + return *this; + } + + void swap(ProducerToken& other) MOODYCAMEL_NOEXCEPT + { + std::swap(producer, other.producer); + if (producer != nullptr) { + producer->token = this; + } + if (other.producer != nullptr) { + other.producer->token = &other; + } + } + + // A token is always valid unless: + // 1) Memory allocation failed during construction + // 2) It was moved via the move constructor + // (Note: assignment does a swap, leaving both potentially valid) + // 3) The associated queue was destroyed + // Note that if valid() returns true, that only indicates + // that the token is valid for use with a specific queue, + // but not which one; that's up to the user to track. + inline bool valid() const { return producer != nullptr; } + + ~ProducerToken() + { + if (producer != nullptr) { + producer->token = nullptr; + producer->inactive.store(true, std::memory_order_release); + } + } + + // Disable copying and assignment + ProducerToken(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION; + ProducerToken& operator=(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION; + +private: + template friend class ConcurrentQueue; + friend class ConcurrentQueueTests; + +protected: + details::ConcurrentQueueProducerTypelessBase* producer; +}; + + +struct ConsumerToken +{ + template + explicit ConsumerToken(ConcurrentQueue& q); + + template + explicit ConsumerToken(BlockingConcurrentQueue& q); + + ConsumerToken(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT + : initialOffset(other.initialOffset), lastKnownGlobalOffset(other.lastKnownGlobalOffset), itemsConsumedFromCurrent(other.itemsConsumedFromCurrent), currentProducer(other.currentProducer), desiredProducer(other.desiredProducer) + { + } + + inline ConsumerToken& operator=(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT + { + swap(other); + return *this; + } + + void swap(ConsumerToken& other) MOODYCAMEL_NOEXCEPT + { + std::swap(initialOffset, other.initialOffset); + std::swap(lastKnownGlobalOffset, other.lastKnownGlobalOffset); + std::swap(itemsConsumedFromCurrent, other.itemsConsumedFromCurrent); + std::swap(currentProducer, other.currentProducer); + std::swap(desiredProducer, other.desiredProducer); + } + + // Disable copying and assignment + ConsumerToken(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION; + ConsumerToken& operator=(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION; + +private: + template friend class ConcurrentQueue; + friend class ConcurrentQueueTests; + +private: // but shared with ConcurrentQueue + std::uint32_t initialOffset; + std::uint32_t lastKnownGlobalOffset; + std::uint32_t itemsConsumedFromCurrent; + details::ConcurrentQueueProducerTypelessBase* currentProducer; + details::ConcurrentQueueProducerTypelessBase* desiredProducer; +}; + +// Need to forward-declare this swap because it's in a namespace. +// See http://stackoverflow.com/questions/4492062/why-does-a-c-friend-class-need-a-forward-declaration-only-in-other-namespaces +template +inline void swap(typename ConcurrentQueue::ImplicitProducerKVP& a, typename ConcurrentQueue::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT; + + +template +class ConcurrentQueue +{ +public: + typedef ::moodycamel::ProducerToken producer_token_t; + typedef ::moodycamel::ConsumerToken consumer_token_t; + + typedef typename Traits::index_t index_t; + typedef typename Traits::size_t size_t; + + static const size_t BLOCK_SIZE = static_cast(Traits::BLOCK_SIZE); + static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = static_cast(Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD); + static const size_t EXPLICIT_INITIAL_INDEX_SIZE = static_cast(Traits::EXPLICIT_INITIAL_INDEX_SIZE); + static const size_t IMPLICIT_INITIAL_INDEX_SIZE = static_cast(Traits::IMPLICIT_INITIAL_INDEX_SIZE); + static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = static_cast(Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE); + static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = static_cast(Traits::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE); +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable: 4307) // + integral constant overflow (that's what the ternary expression is for!) +#pragma warning(disable: 4309) // static_cast: Truncation of constant value +#endif + static const size_t MAX_SUBQUEUE_SIZE = (details::const_numeric_max::value - static_cast(Traits::MAX_SUBQUEUE_SIZE) < BLOCK_SIZE) ? details::const_numeric_max::value : ((static_cast(Traits::MAX_SUBQUEUE_SIZE) + (BLOCK_SIZE - 1)) / BLOCK_SIZE * BLOCK_SIZE); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + + static_assert(!std::numeric_limits::is_signed && std::is_integral::value, "Traits::size_t must be an unsigned integral type"); + static_assert(!std::numeric_limits::is_signed && std::is_integral::value, "Traits::index_t must be an unsigned integral type"); + static_assert(sizeof(index_t) >= sizeof(size_t), "Traits::index_t must be at least as wide as Traits::size_t"); + static_assert((BLOCK_SIZE > 1) && !(BLOCK_SIZE & (BLOCK_SIZE - 1)), "Traits::BLOCK_SIZE must be a power of 2 (and at least 2)"); + static_assert((EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD > 1) && !(EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD & (EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD - 1)), "Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD must be a power of 2 (and greater than 1)"); + static_assert((EXPLICIT_INITIAL_INDEX_SIZE > 1) && !(EXPLICIT_INITIAL_INDEX_SIZE & (EXPLICIT_INITIAL_INDEX_SIZE - 1)), "Traits::EXPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)"); + static_assert((IMPLICIT_INITIAL_INDEX_SIZE > 1) && !(IMPLICIT_INITIAL_INDEX_SIZE & (IMPLICIT_INITIAL_INDEX_SIZE - 1)), "Traits::IMPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)"); + static_assert((INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) || !(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE & (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - 1)), "Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be a power of 2"); + static_assert(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0 || INITIAL_IMPLICIT_PRODUCER_HASH_SIZE >= 1, "Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be at least 1 (or 0 to disable implicit enqueueing)"); + +public: + // Creates a queue with at least `capacity` element slots; note that the + // actual number of elements that can be inserted without additional memory + // allocation depends on the number of producers and the block size (e.g. if + // the block size is equal to `capacity`, only a single block will be allocated + // up-front, which means only a single producer will be able to enqueue elements + // without an extra allocation -- blocks aren't shared between producers). + // This method is not thread safe -- it is up to the user to ensure that the + // queue is fully constructed before it starts being used by other threads (this + // includes making the memory effects of construction visible, possibly with a + // memory barrier). + explicit ConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE) + : producerListTail(nullptr), + producerCount(0), + initialBlockPoolIndex(0), + nextExplicitConsumerId(0), + globalExplicitConsumerOffset(0) + { + implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); + populate_initial_implicit_producer_hash(); + populate_initial_block_list(capacity / BLOCK_SIZE + ((capacity & (BLOCK_SIZE - 1)) == 0 ? 0 : 1)); + +#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG + // Track all the producers using a fully-resolved typed list for + // each kind; this makes it possible to debug them starting from + // the root queue object (otherwise wacky casts are needed that + // don't compile in the debugger's expression evaluator). + explicitProducers.store(nullptr, std::memory_order_relaxed); + implicitProducers.store(nullptr, std::memory_order_relaxed); +#endif + } + + // Computes the correct amount of pre-allocated blocks for you based + // on the minimum number of elements you want available at any given + // time, and the maximum concurrent number of each type of producer. + ConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers) + : producerListTail(nullptr), + producerCount(0), + initialBlockPoolIndex(0), + nextExplicitConsumerId(0), + globalExplicitConsumerOffset(0) + { + implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); + populate_initial_implicit_producer_hash(); + size_t blocks = (((minCapacity + BLOCK_SIZE - 1) / BLOCK_SIZE) - 1) * (maxExplicitProducers + 1) + 2 * (maxExplicitProducers + maxImplicitProducers); + populate_initial_block_list(blocks); + +#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG + explicitProducers.store(nullptr, std::memory_order_relaxed); + implicitProducers.store(nullptr, std::memory_order_relaxed); +#endif + } + + // Note: The queue should not be accessed concurrently while it's + // being deleted. It's up to the user to synchronize this. + // This method is not thread safe. + ~ConcurrentQueue() + { + // Destroy producers + auto ptr = producerListTail.load(std::memory_order_relaxed); + while (ptr != nullptr) { + auto next = ptr->next_prod(); + if (ptr->token != nullptr) { + ptr->token->producer = nullptr; + } + destroy(ptr); + ptr = next; + } + + // Destroy implicit producer hash tables + if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE != 0) { + auto hash = implicitProducerHash.load(std::memory_order_relaxed); + while (hash != nullptr) { + auto prev = hash->prev; + if (prev != nullptr) { // The last hash is part of this object and was not allocated dynamically + for (size_t i = 0; i != hash->capacity; ++i) { + hash->entries[i].~ImplicitProducerKVP(); + } + hash->~ImplicitProducerHash(); + (Traits::free)(hash); + } + hash = prev; + } + } + + // Destroy global free list + auto block = freeList.head_unsafe(); + while (block != nullptr) { + auto next = block->freeListNext.load(std::memory_order_relaxed); + if (block->dynamicallyAllocated) { + destroy(block); + } + block = next; + } + + // Destroy initial free list + destroy_array(initialBlockPool, initialBlockPoolSize); + } + + // Disable copying and copy assignment + ConcurrentQueue(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; + ConcurrentQueue& operator=(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; + + // Moving is supported, but note that it is *not* a thread-safe operation. + // Nobody can use the queue while it's being moved, and the memory effects + // of that move must be propagated to other threads before they can use it. + // Note: When a queue is moved, its tokens are still valid but can only be + // used with the destination queue (i.e. semantically they are moved along + // with the queue itself). + ConcurrentQueue(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT + : producerListTail(other.producerListTail.load(std::memory_order_relaxed)), + producerCount(other.producerCount.load(std::memory_order_relaxed)), + initialBlockPoolIndex(other.initialBlockPoolIndex.load(std::memory_order_relaxed)), + initialBlockPool(other.initialBlockPool), + initialBlockPoolSize(other.initialBlockPoolSize), + freeList(std::move(other.freeList)), + nextExplicitConsumerId(other.nextExplicitConsumerId.load(std::memory_order_relaxed)), + globalExplicitConsumerOffset(other.globalExplicitConsumerOffset.load(std::memory_order_relaxed)) + { + // Move the other one into this, and leave the other one as an empty queue + implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); + populate_initial_implicit_producer_hash(); + swap_implicit_producer_hashes(other); + + other.producerListTail.store(nullptr, std::memory_order_relaxed); + other.producerCount.store(0, std::memory_order_relaxed); + other.nextExplicitConsumerId.store(0, std::memory_order_relaxed); + other.globalExplicitConsumerOffset.store(0, std::memory_order_relaxed); + +#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG + explicitProducers.store(other.explicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed); + other.explicitProducers.store(nullptr, std::memory_order_relaxed); + implicitProducers.store(other.implicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed); + other.implicitProducers.store(nullptr, std::memory_order_relaxed); +#endif + + other.initialBlockPoolIndex.store(0, std::memory_order_relaxed); + other.initialBlockPoolSize = 0; + other.initialBlockPool = nullptr; + + reown_producers(); + } + + inline ConcurrentQueue& operator=(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT + { + return swap_internal(other); + } + + // Swaps this queue's state with the other's. Not thread-safe. + // Swapping two queues does not invalidate their tokens, however + // the tokens that were created for one queue must be used with + // only the swapped queue (i.e. the tokens are tied to the + // queue's movable state, not the object itself). + inline void swap(ConcurrentQueue& other) MOODYCAMEL_NOEXCEPT + { + swap_internal(other); + } + +private: + ConcurrentQueue& swap_internal(ConcurrentQueue& other) + { + if (this == &other) { + return *this; + } + + details::swap_relaxed(producerListTail, other.producerListTail); + details::swap_relaxed(producerCount, other.producerCount); + details::swap_relaxed(initialBlockPoolIndex, other.initialBlockPoolIndex); + std::swap(initialBlockPool, other.initialBlockPool); + std::swap(initialBlockPoolSize, other.initialBlockPoolSize); + freeList.swap(other.freeList); + details::swap_relaxed(nextExplicitConsumerId, other.nextExplicitConsumerId); + details::swap_relaxed(globalExplicitConsumerOffset, other.globalExplicitConsumerOffset); + + swap_implicit_producer_hashes(other); + + reown_producers(); + other.reown_producers(); + +#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG + details::swap_relaxed(explicitProducers, other.explicitProducers); + details::swap_relaxed(implicitProducers, other.implicitProducers); +#endif + + return *this; + } + +public: + // Enqueues a single item (by copying it). + // Allocates memory if required. Only fails if memory allocation fails (or implicit + // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, + // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Thread-safe. + inline bool enqueue(T const& item) + { + if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; + return inner_enqueue(item); + } + + // Enqueues a single item (by moving it, if possible). + // Allocates memory if required. Only fails if memory allocation fails (or implicit + // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, + // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Thread-safe. + inline bool enqueue(T&& item) + { + if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; + return inner_enqueue(std::move(item)); + } + + // Enqueues a single item (by copying it) using an explicit producer token. + // Allocates memory if required. Only fails if memory allocation fails (or + // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Thread-safe. + inline bool enqueue(producer_token_t const& token, T const& item) + { + return inner_enqueue(token, item); + } + + // Enqueues a single item (by moving it, if possible) using an explicit producer token. + // Allocates memory if required. Only fails if memory allocation fails (or + // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Thread-safe. + inline bool enqueue(producer_token_t const& token, T&& item) + { + return inner_enqueue(token, std::move(item)); + } + + // Enqueues several items. + // Allocates memory if required. Only fails if memory allocation fails (or + // implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE + // is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Note: Use std::make_move_iterator if the elements should be moved instead of copied. + // Thread-safe. + template + bool enqueue_bulk(It itemFirst, size_t count) + { + if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; + return inner_enqueue_bulk(itemFirst, count); + } + + // Enqueues several items using an explicit producer token. + // Allocates memory if required. Only fails if memory allocation fails + // (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). + // Note: Use std::make_move_iterator if the elements should be moved + // instead of copied. + // Thread-safe. + template + bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) + { + return inner_enqueue_bulk(token, itemFirst, count); + } + + // Enqueues a single item (by copying it). + // Does not allocate memory. Fails if not enough room to enqueue (or implicit + // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE + // is 0). + // Thread-safe. + inline bool try_enqueue(T const& item) + { + if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; + return inner_enqueue(item); + } + + // Enqueues a single item (by moving it, if possible). + // Does not allocate memory (except for one-time implicit producer). + // Fails if not enough room to enqueue (or implicit production is + // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). + // Thread-safe. + inline bool try_enqueue(T&& item) + { + if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; + return inner_enqueue(std::move(item)); + } + + // Enqueues a single item (by copying it) using an explicit producer token. + // Does not allocate memory. Fails if not enough room to enqueue. + // Thread-safe. + inline bool try_enqueue(producer_token_t const& token, T const& item) + { + return inner_enqueue(token, item); + } + + // Enqueues a single item (by moving it, if possible) using an explicit producer token. + // Does not allocate memory. Fails if not enough room to enqueue. + // Thread-safe. + inline bool try_enqueue(producer_token_t const& token, T&& item) + { + return inner_enqueue(token, std::move(item)); + } + + // Enqueues several items. + // Does not allocate memory (except for one-time implicit producer). + // Fails if not enough room to enqueue (or implicit production is + // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). + // Note: Use std::make_move_iterator if the elements should be moved + // instead of copied. + // Thread-safe. + template + bool try_enqueue_bulk(It itemFirst, size_t count) + { + if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; + return inner_enqueue_bulk(itemFirst, count); + } + + // Enqueues several items using an explicit producer token. + // Does not allocate memory. Fails if not enough room to enqueue. + // Note: Use std::make_move_iterator if the elements should be moved + // instead of copied. + // Thread-safe. + template + bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) + { + return inner_enqueue_bulk(token, itemFirst, count); + } + + + + // Attempts to dequeue from the queue. + // Returns false if all producer streams appeared empty at the time they + // were checked (so, the queue is likely but not guaranteed to be empty). + // Never allocates. Thread-safe. + template + bool try_dequeue(U& item) + { + // Instead of simply trying each producer in turn (which could cause needless contention on the first + // producer), we score them heuristically. + size_t nonEmptyCount = 0; + ProducerBase* best = nullptr; + size_t bestSize = 0; + for (auto ptr = producerListTail.load(std::memory_order_acquire); nonEmptyCount < 3 && ptr != nullptr; ptr = ptr->next_prod()) { + auto size = ptr->size_approx(); + if (size > 0) { + if (size > bestSize) { + bestSize = size; + best = ptr; + } + ++nonEmptyCount; + } + } + + // If there was at least one non-empty queue but it appears empty at the time + // we try to dequeue from it, we need to make sure every queue's been tried + if (nonEmptyCount > 0) { + if ((details::likely)(best->dequeue(item))) { + return true; + } + for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { + if (ptr != best && ptr->dequeue(item)) { + return true; + } + } + } + return false; + } + + // Attempts to dequeue from the queue. + // Returns false if all producer streams appeared empty at the time they + // were checked (so, the queue is likely but not guaranteed to be empty). + // This differs from the try_dequeue(item) method in that this one does + // not attempt to reduce contention by interleaving the order that producer + // streams are dequeued from. So, using this method can reduce overall throughput + // under contention, but will give more predictable results in single-threaded + // consumer scenarios. This is mostly only useful for internal unit tests. + // Never allocates. Thread-safe. + template + bool try_dequeue_non_interleaved(U& item) + { + for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { + if (ptr->dequeue(item)) { + return true; + } + } + return false; + } + + // Attempts to dequeue from the queue using an explicit consumer token. + // Returns false if all producer streams appeared empty at the time they + // were checked (so, the queue is likely but not guaranteed to be empty). + // Never allocates. Thread-safe. + template + bool try_dequeue(consumer_token_t& token, U& item) + { + // The idea is roughly as follows: + // Every 256 items from one producer, make everyone rotate (increase the global offset) -> this means the highest efficiency consumer dictates the rotation speed of everyone else, more or less + // If you see that the global offset has changed, you must reset your consumption counter and move to your designated place + // If there's no items where you're supposed to be, keep moving until you find a producer with some items + // If the global offset has not changed but you've run out of items to consume, move over from your current position until you find an producer with something in it + + if (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) { + if (!update_current_producer_after_rotation(token)) { + return false; + } + } + + // If there was at least one non-empty queue but it appears empty at the time + // we try to dequeue from it, we need to make sure every queue's been tried + if (static_cast(token.currentProducer)->dequeue(item)) { + if (++token.itemsConsumedFromCurrent == EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) { + globalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed); + } + return true; + } + + auto tail = producerListTail.load(std::memory_order_acquire); + auto ptr = static_cast(token.currentProducer)->next_prod(); + if (ptr == nullptr) { + ptr = tail; + } + while (ptr != static_cast(token.currentProducer)) { + if (ptr->dequeue(item)) { + token.currentProducer = ptr; + token.itemsConsumedFromCurrent = 1; + return true; + } + ptr = ptr->next_prod(); + if (ptr == nullptr) { + ptr = tail; + } + } + return false; + } + + // Attempts to dequeue several elements from the queue. + // Returns the number of items actually dequeued. + // Returns 0 if all producer streams appeared empty at the time they + // were checked (so, the queue is likely but not guaranteed to be empty). + // Never allocates. Thread-safe. + template + size_t try_dequeue_bulk(It itemFirst, size_t max) + { + size_t count = 0; + for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { + count += ptr->dequeue_bulk(itemFirst, max - count); + if (count == max) { + break; + } + } + return count; + } + + // Attempts to dequeue several elements from the queue using an explicit consumer token. + // Returns the number of items actually dequeued. + // Returns 0 if all producer streams appeared empty at the time they + // were checked (so, the queue is likely but not guaranteed to be empty). + // Never allocates. Thread-safe. + template + size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) + { + if (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) { + if (!update_current_producer_after_rotation(token)) { + return 0; + } + } + + size_t count = static_cast(token.currentProducer)->dequeue_bulk(itemFirst, max); + if (count == max) { + if ((token.itemsConsumedFromCurrent += static_cast(max)) >= EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) { + globalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed); + } + return max; + } + token.itemsConsumedFromCurrent += static_cast(count); + max -= count; + + auto tail = producerListTail.load(std::memory_order_acquire); + auto ptr = static_cast(token.currentProducer)->next_prod(); + if (ptr == nullptr) { + ptr = tail; + } + while (ptr != static_cast(token.currentProducer)) { + auto dequeued = ptr->dequeue_bulk(itemFirst, max); + count += dequeued; + if (dequeued != 0) { + token.currentProducer = ptr; + token.itemsConsumedFromCurrent = static_cast(dequeued); + } + if (dequeued == max) { + break; + } + max -= dequeued; + ptr = ptr->next_prod(); + if (ptr == nullptr) { + ptr = tail; + } + } + return count; + } + + + + // Attempts to dequeue from a specific producer's inner queue. + // If you happen to know which producer you want to dequeue from, this + // is significantly faster than using the general-case try_dequeue methods. + // Returns false if the producer's queue appeared empty at the time it + // was checked (so, the queue is likely but not guaranteed to be empty). + // Never allocates. Thread-safe. + template + inline bool try_dequeue_from_producer(producer_token_t const& producer, U& item) + { + return static_cast(producer.producer)->dequeue(item); + } + + // Attempts to dequeue several elements from a specific producer's inner queue. + // Returns the number of items actually dequeued. + // If you happen to know which producer you want to dequeue from, this + // is significantly faster than using the general-case try_dequeue methods. + // Returns 0 if the producer's queue appeared empty at the time it + // was checked (so, the queue is likely but not guaranteed to be empty). + // Never allocates. Thread-safe. + template + inline size_t try_dequeue_bulk_from_producer(producer_token_t const& producer, It itemFirst, size_t max) + { + return static_cast(producer.producer)->dequeue_bulk(itemFirst, max); + } + + + // Returns an estimate of the total number of elements currently in the queue. This + // estimate is only accurate if the queue has completely stabilized before it is called + // (i.e. all enqueue and dequeue operations have completed and their memory effects are + // visible on the calling thread, and no further operations start while this method is + // being called). + // Thread-safe. + size_t size_approx() const + { + size_t size = 0; + for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { + size += ptr->size_approx(); + } + return size; + } + + + // Returns true if the underlying atomic variables used by + // the queue are lock-free (they should be on most platforms). + // Thread-safe. + static bool is_lock_free() + { + return + details::static_is_lock_free::value == 2 && + details::static_is_lock_free::value == 2 && + details::static_is_lock_free::value == 2 && + details::static_is_lock_free::value == 2 && + details::static_is_lock_free::value == 2 && + details::static_is_lock_free::thread_id_numeric_size_t>::value == 2; + } + + +private: + friend struct ProducerToken; + friend struct ConsumerToken; + struct ExplicitProducer; + friend struct ExplicitProducer; + struct ImplicitProducer; + friend struct ImplicitProducer; + friend class ConcurrentQueueTests; + + enum AllocationMode { CanAlloc, CannotAlloc }; + + + /////////////////////////////// + // Queue methods + /////////////////////////////// + + template + inline bool inner_enqueue(producer_token_t const& token, U&& element) + { + return static_cast(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue(std::forward(element)); + } + + template + inline bool inner_enqueue(U&& element) + { + auto producer = get_or_add_implicit_producer(); + return producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue(std::forward(element)); + } + + template + inline bool inner_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) + { + return static_cast(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue_bulk(itemFirst, count); + } + + template + inline bool inner_enqueue_bulk(It itemFirst, size_t count) + { + auto producer = get_or_add_implicit_producer(); + return producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue_bulk(itemFirst, count); + } + + inline bool update_current_producer_after_rotation(consumer_token_t& token) + { + // Ah, there's been a rotation, figure out where we should be! + auto tail = producerListTail.load(std::memory_order_acquire); + if (token.desiredProducer == nullptr && tail == nullptr) { + return false; + } + auto prodCount = producerCount.load(std::memory_order_relaxed); + auto globalOffset = globalExplicitConsumerOffset.load(std::memory_order_relaxed); + if ((details::unlikely)(token.desiredProducer == nullptr)) { + // Aha, first time we're dequeueing anything. + // Figure out our local position + // Note: offset is from start, not end, but we're traversing from end -- subtract from count first + std::uint32_t offset = prodCount - 1 - (token.initialOffset % prodCount); + token.desiredProducer = tail; + for (std::uint32_t i = 0; i != offset; ++i) { + token.desiredProducer = static_cast(token.desiredProducer)->next_prod(); + if (token.desiredProducer == nullptr) { + token.desiredProducer = tail; + } + } + } + + std::uint32_t delta = globalOffset - token.lastKnownGlobalOffset; + if (delta >= prodCount) { + delta = delta % prodCount; + } + for (std::uint32_t i = 0; i != delta; ++i) { + token.desiredProducer = static_cast(token.desiredProducer)->next_prod(); + if (token.desiredProducer == nullptr) { + token.desiredProducer = tail; + } + } + + token.lastKnownGlobalOffset = globalOffset; + token.currentProducer = token.desiredProducer; + token.itemsConsumedFromCurrent = 0; + return true; + } + + + /////////////////////////// + // Free list + /////////////////////////// + + template + struct FreeListNode + { + FreeListNode() : freeListRefs(0), freeListNext(nullptr) { } + + std::atomic freeListRefs; + std::atomic freeListNext; + }; + + // A simple CAS-based lock-free free list. Not the fastest thing in the world under heavy contention, but + // simple and correct (assuming nodes are never freed until after the free list is destroyed), and fairly + // speedy under low contention. + template // N must inherit FreeListNode or have the same fields (and initialization of them) + struct FreeList + { + FreeList() : freeListHead(nullptr) { } + FreeList(FreeList&& other) : freeListHead(other.freeListHead.load(std::memory_order_relaxed)) { other.freeListHead.store(nullptr, std::memory_order_relaxed); } + void swap(FreeList& other) { details::swap_relaxed(freeListHead, other.freeListHead); } + + FreeList(FreeList const&) MOODYCAMEL_DELETE_FUNCTION; + FreeList& operator=(FreeList const&) MOODYCAMEL_DELETE_FUNCTION; + + inline void add(N* node) + { +#if MCDBGQ_NOLOCKFREE_FREELIST + debug::DebugLock lock(mutex); +#endif + // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to + // set it using a fetch_add + if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST, std::memory_order_acq_rel) == 0) { + // Oh look! We were the last ones referencing this node, and we know + // we want to add it to the free list, so let's do it! + add_knowing_refcount_is_zero(node); + } + } + + inline N* try_get() + { +#if MCDBGQ_NOLOCKFREE_FREELIST + debug::DebugLock lock(mutex); +#endif + auto head = freeListHead.load(std::memory_order_acquire); + while (head != nullptr) { + auto prevHead = head; + auto refs = head->freeListRefs.load(std::memory_order_relaxed); + if ((refs & REFS_MASK) == 0 || !head->freeListRefs.compare_exchange_strong(refs, refs + 1, std::memory_order_acquire, std::memory_order_relaxed)) { + head = freeListHead.load(std::memory_order_acquire); + continue; + } + + // Good, reference count has been incremented (it wasn't at zero), which means we can read the + // next and not worry about it changing between now and the time we do the CAS + auto next = head->freeListNext.load(std::memory_order_relaxed); + if (freeListHead.compare_exchange_strong(head, next, std::memory_order_acquire, std::memory_order_relaxed)) { + // Yay, got the node. This means it was on the list, which means shouldBeOnFreeList must be false no + // matter the refcount (because nobody else knows it's been taken off yet, it can't have been put back on). + assert((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0); + + // Decrease refcount twice, once for our ref, and once for the list's ref + head->freeListRefs.fetch_sub(2, std::memory_order_release); + return head; + } + + // OK, the head must have changed on us, but we still need to decrease the refcount we increased. + // Note that we don't need to release any memory effects, but we do need to ensure that the reference + // count decrement happens-after the CAS on the head. + refs = prevHead->freeListRefs.fetch_sub(1, std::memory_order_acq_rel); + if (refs == SHOULD_BE_ON_FREELIST + 1) { + add_knowing_refcount_is_zero(prevHead); + } + } + + return nullptr; + } + + // Useful for traversing the list when there's no contention (e.g. to destroy remaining nodes) + N* head_unsafe() const { return freeListHead.load(std::memory_order_relaxed); } + + private: + inline void add_knowing_refcount_is_zero(N* node) + { + // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we run + // only one copy of this method per node at a time, i.e. the single thread case), then we know + // we can safely change the next pointer of the node; however, once the refcount is back above + // zero, then other threads could increase it (happens under heavy contention, when the refcount + // goes to zero in between a load and a refcount increment of a node in try_get, then back up to + // something non-zero, then the refcount increment is done by the other thread) -- so, if the CAS + // to add the node to the actual list fails, decrease the refcount and leave the add operation to + // the next thread who puts the refcount back at zero (which could be us, hence the loop). + auto head = freeListHead.load(std::memory_order_relaxed); + while (true) { + node->freeListNext.store(head, std::memory_order_relaxed); + node->freeListRefs.store(1, std::memory_order_release); + if (!freeListHead.compare_exchange_strong(head, node, std::memory_order_release, std::memory_order_relaxed)) { + // Hmm, the add failed, but we can only try again when the refcount goes back to zero + if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST - 1, std::memory_order_release) == 1) { + continue; + } + } + return; + } + } + + private: + // Implemented like a stack, but where node order doesn't matter (nodes are inserted out of order under contention) + std::atomic freeListHead; + + static const std::uint32_t REFS_MASK = 0x7FFFFFFF; + static const std::uint32_t SHOULD_BE_ON_FREELIST = 0x80000000; + +#if MCDBGQ_NOLOCKFREE_FREELIST + debug::DebugMutex mutex; +#endif + }; + + + /////////////////////////// + // Block + /////////////////////////// + + enum InnerQueueContext { implicit_context = 0, explicit_context = 1 }; + + struct Block + { + Block() + : next(nullptr), elementsCompletelyDequeued(0), freeListRefs(0), freeListNext(nullptr), shouldBeOnFreeList(false), dynamicallyAllocated(true) + { +#if MCDBGQ_TRACKMEM + owner = nullptr; +#endif + } + + template + inline bool is_empty() const + { + if (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { + // Check flags + for (size_t i = 0; i < BLOCK_SIZE; ++i) { + if (!emptyFlags[i].load(std::memory_order_relaxed)) { + return false; + } + } + + // Aha, empty; make sure we have all other memory effects that happened before the empty flags were set + std::atomic_thread_fence(std::memory_order_acquire); + return true; + } + else { + // Check counter + if (elementsCompletelyDequeued.load(std::memory_order_relaxed) == BLOCK_SIZE) { + std::atomic_thread_fence(std::memory_order_acquire); + return true; + } + assert(elementsCompletelyDequeued.load(std::memory_order_relaxed) <= BLOCK_SIZE); + return false; + } + } + + // Returns true if the block is now empty (does not apply in explicit context) + template + inline bool set_empty(index_t i) + { + if (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { + // Set flag + assert(!emptyFlags[BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1))].load(std::memory_order_relaxed)); + emptyFlags[BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1))].store(true, std::memory_order_release); + return false; + } + else { + // Increment counter + auto prevVal = elementsCompletelyDequeued.fetch_add(1, std::memory_order_release); + assert(prevVal < BLOCK_SIZE); + return prevVal == BLOCK_SIZE - 1; + } + } + + // Sets multiple contiguous item statuses to 'empty' (assumes no wrapping and count > 0). + // Returns true if the block is now empty (does not apply in explicit context). + template + inline bool set_many_empty(index_t i, size_t count) + { + if (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { + // Set flags + std::atomic_thread_fence(std::memory_order_release); + i = BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1)) - count + 1; + for (size_t j = 0; j != count; ++j) { + assert(!emptyFlags[i + j].load(std::memory_order_relaxed)); + emptyFlags[i + j].store(true, std::memory_order_relaxed); + } + return false; + } + else { + // Increment counter + auto prevVal = elementsCompletelyDequeued.fetch_add(count, std::memory_order_release); + assert(prevVal + count <= BLOCK_SIZE); + return prevVal + count == BLOCK_SIZE; + } + } + + template + inline void set_all_empty() + { + if (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { + // Set all flags + for (size_t i = 0; i != BLOCK_SIZE; ++i) { + emptyFlags[i].store(true, std::memory_order_relaxed); + } + } + else { + // Reset counter + elementsCompletelyDequeued.store(BLOCK_SIZE, std::memory_order_relaxed); + } + } + + template + inline void reset_empty() + { + if (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { + // Reset flags + for (size_t i = 0; i != BLOCK_SIZE; ++i) { + emptyFlags[i].store(false, std::memory_order_relaxed); + } + } + else { + // Reset counter + elementsCompletelyDequeued.store(0, std::memory_order_relaxed); + } + } + + inline T* operator[](index_t idx) MOODYCAMEL_NOEXCEPT { return static_cast(static_cast(elements)) + static_cast(idx & static_cast(BLOCK_SIZE - 1)); } + inline T const* operator[](index_t idx) const MOODYCAMEL_NOEXCEPT { return static_cast(static_cast(elements)) + static_cast(idx & static_cast(BLOCK_SIZE - 1)); } + + private: + // IMPORTANT: This must be the first member in Block, so that if T depends on the alignment of + // addresses returned by malloc, that alignment will be preserved. Apparently clang actually + // generates code that uses this assumption for AVX instructions in some cases. Ideally, we + // should also align Block to the alignment of T in case it's higher than malloc's 16-byte + // alignment, but this is hard to do in a cross-platform way. Assert for this case: + static_assert(std::alignment_of::value <= std::alignment_of::value, "The queue does not support super-aligned types at this time"); + // Additionally, we need the alignment of Block itself to be a multiple of max_align_t since + // otherwise the appropriate padding will not be added at the end of Block in order to make + // arrays of Blocks all be properly aligned (not just the first one). We use a union to force + // this. + union { + char elements[sizeof(T) * BLOCK_SIZE]; + details::max_align_t dummy; + }; + public: + Block* next; + std::atomic elementsCompletelyDequeued; + std::atomic emptyFlags[BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD ? BLOCK_SIZE : 1]; + public: + std::atomic freeListRefs; + std::atomic freeListNext; + std::atomic shouldBeOnFreeList; + bool dynamicallyAllocated; // Perhaps a better name for this would be 'isNotPartOfInitialBlockPool' + +#if MCDBGQ_TRACKMEM + void* owner; +#endif + }; + static_assert(std::alignment_of::value >= std::alignment_of::value, "Internal error: Blocks must be at least as aligned as the type they are wrapping"); + + +#if MCDBGQ_TRACKMEM +public: + struct MemStats; +private: +#endif + + /////////////////////////// + // Producer base + /////////////////////////// + + struct ProducerBase : public details::ConcurrentQueueProducerTypelessBase + { + ProducerBase(ConcurrentQueue* parent_, bool isExplicit_) : + tailIndex(0), + headIndex(0), + dequeueOptimisticCount(0), + dequeueOvercommit(0), + tailBlock(nullptr), + isExplicit(isExplicit_), + parent(parent_) + { + } + + virtual ~ProducerBase() { }; + + template + inline bool dequeue(U& element) + { + if (isExplicit) { + return static_cast(this)->dequeue(element); + } + else { + return static_cast(this)->dequeue(element); + } + } + + template + inline size_t dequeue_bulk(It& itemFirst, size_t max) + { + if (isExplicit) { + return static_cast(this)->dequeue_bulk(itemFirst, max); + } + else { + return static_cast(this)->dequeue_bulk(itemFirst, max); + } + } + + inline ProducerBase* next_prod() const { return static_cast(next); } + + inline size_t size_approx() const + { + auto tail = tailIndex.load(std::memory_order_relaxed); + auto head = headIndex.load(std::memory_order_relaxed); + return details::circular_less_than(head, tail) ? static_cast(tail - head) : 0; + } + + inline index_t getTail() const { return tailIndex.load(std::memory_order_relaxed); } + protected: + std::atomic tailIndex; // Where to enqueue to next + std::atomic headIndex; // Where to dequeue from next + + std::atomic dequeueOptimisticCount; + std::atomic dequeueOvercommit; + + Block* tailBlock; + + public: + bool isExplicit; + ConcurrentQueue* parent; + + protected: +#if MCDBGQ_TRACKMEM + friend struct MemStats; +#endif + }; + + + /////////////////////////// + // Explicit queue + /////////////////////////// + + struct ExplicitProducer : public ProducerBase + { + explicit ExplicitProducer(ConcurrentQueue* parent) : + ProducerBase(parent, true), + blockIndex(nullptr), + pr_blockIndexSlotsUsed(0), + pr_blockIndexSize(EXPLICIT_INITIAL_INDEX_SIZE >> 1), + pr_blockIndexFront(0), + pr_blockIndexEntries(nullptr), + pr_blockIndexRaw(nullptr) + { + size_t poolBasedIndexSize = details::ceil_to_pow_2(parent->initialBlockPoolSize) >> 1; + if (poolBasedIndexSize > pr_blockIndexSize) { + pr_blockIndexSize = poolBasedIndexSize; + } + + new_block_index(0); // This creates an index with double the number of current entries, i.e. EXPLICIT_INITIAL_INDEX_SIZE + } + + ~ExplicitProducer() + { + // Destruct any elements not yet dequeued. + // Since we're in the destructor, we can assume all elements + // are either completely dequeued or completely not (no halfways). + if (this->tailBlock != nullptr) { // Note this means there must be a block index too + // First find the block that's partially dequeued, if any + Block* halfDequeuedBlock = nullptr; + if ((this->headIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)) != 0) { + // The head's not on a block boundary, meaning a block somewhere is partially dequeued + // (or the head block is the tail block and was fully dequeued, but the head/tail are still not on a boundary) + size_t i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & (pr_blockIndexSize - 1); + while (details::circular_less_than(pr_blockIndexEntries[i].base + BLOCK_SIZE, this->headIndex.load(std::memory_order_relaxed))) { + i = (i + 1) & (pr_blockIndexSize - 1); + } + assert(details::circular_less_than(pr_blockIndexEntries[i].base, this->headIndex.load(std::memory_order_relaxed))); + halfDequeuedBlock = pr_blockIndexEntries[i].block; + } + + // Start at the head block (note the first line in the loop gives us the head from the tail on the first iteration) + auto block = this->tailBlock; + do { + block = block->next; + if (block->ConcurrentQueue::Block::template is_empty()) { + continue; + } + + size_t i = 0; // Offset into block + if (block == halfDequeuedBlock) { + i = static_cast(this->headIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)); + } + + // Walk through all the items in the block; if this is the tail block, we need to stop when we reach the tail index + auto lastValidIndex = (this->tailIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)) == 0 ? BLOCK_SIZE : static_cast(this->tailIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)); + while (i != BLOCK_SIZE && (block != this->tailBlock || i != lastValidIndex)) { + (*block)[i++]->~T(); + } + } while (block != this->tailBlock); + } + + // Destroy all blocks that we own + if (this->tailBlock != nullptr) { + auto block = this->tailBlock; + do { + auto nextBlock = block->next; + if (block->dynamicallyAllocated) { + destroy(block); + } + else { + this->parent->add_block_to_free_list(block); + } + block = nextBlock; + } while (block != this->tailBlock); + } + + // Destroy the block indices + auto header = static_cast(pr_blockIndexRaw); + while (header != nullptr) { + auto prev = static_cast(header->prev); + header->~BlockIndexHeader(); + (Traits::free)(header); + header = prev; + } + } + + template + inline bool enqueue(U&& element) + { + index_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed); + index_t newTailIndex = 1 + currentTailIndex; + if ((currentTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { + // We reached the end of a block, start a new one + auto startBlock = this->tailBlock; + auto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed; + if (this->tailBlock != nullptr && this->tailBlock->next->ConcurrentQueue::Block::template is_empty()) { + // We can re-use the block ahead of us, it's empty! + this->tailBlock = this->tailBlock->next; + this->tailBlock->ConcurrentQueue::Block::template reset_empty(); + + // We'll put the block on the block index (guaranteed to be room since we're conceptually removing the + // last block from it first -- except instead of removing then adding, we can just overwrite). + // Note that there must be a valid block index here, since even if allocation failed in the ctor, + // it would have been re-attempted when adding the first block to the queue; since there is such + // a block, a block index must have been successfully allocated. + } + else { + // Whatever head value we see here is >= the last value we saw here (relatively), + // and <= its current value. Since we have the most recent tail, the head must be + // <= to it. + auto head = this->headIndex.load(std::memory_order_relaxed); + assert(!details::circular_less_than(currentTailIndex, head)); + if (!details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) + || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) { + // We can't enqueue in another block because there's not enough leeway -- the + // tail could surpass the head by the time the block fills up! (Or we'll exceed + // the size limit, if the second part of the condition was true.) + return false; + } + // We're going to need a new block; check that the block index has room + if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize) { + // Hmm, the circular block index is already full -- we'll need + // to allocate a new index. Note pr_blockIndexRaw can only be nullptr if + // the initial allocation failed in the constructor. + + if (allocMode == CannotAlloc || !new_block_index(pr_blockIndexSlotsUsed)) { + return false; + } + } + + // Insert a new block in the circular linked list + auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); + if (newBlock == nullptr) { + return false; + } +#if MCDBGQ_TRACKMEM + newBlock->owner = this; +#endif + newBlock->ConcurrentQueue::Block::template reset_empty(); + if (this->tailBlock == nullptr) { + newBlock->next = newBlock; + } + else { + newBlock->next = this->tailBlock->next; + this->tailBlock->next = newBlock; + } + this->tailBlock = newBlock; + ++pr_blockIndexSlotsUsed; + } + + if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward(element)))) { + // The constructor may throw. We want the element not to appear in the queue in + // that case (without corrupting the queue): + MOODYCAMEL_TRY { + new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); + } + MOODYCAMEL_CATCH (...) { + // Revert change to the current block, but leave the new block available + // for next time + pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; + this->tailBlock = startBlock == nullptr ? this->tailBlock : startBlock; + MOODYCAMEL_RETHROW; + } + } + else { + (void)startBlock; + (void)originalBlockIndexSlotsUsed; + } + + // Add block to block index + auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; + entry.base = currentTailIndex; + entry.block = this->tailBlock; + blockIndex.load(std::memory_order_relaxed)->front.store(pr_blockIndexFront, std::memory_order_release); + pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); + + if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward(element)))) { + this->tailIndex.store(newTailIndex, std::memory_order_release); + return true; + } + } + + // Enqueue + new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); + + this->tailIndex.store(newTailIndex, std::memory_order_release); + return true; + } + + template + bool dequeue(U& element) + { + auto tail = this->tailIndex.load(std::memory_order_relaxed); + auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); + if (details::circular_less_than(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) { + // Might be something to dequeue, let's give it a try + + // Note that this if is purely for performance purposes in the common case when the queue is + // empty and the values are eventually consistent -- we may enter here spuriously. + + // Note that whatever the values of overcommit and tail are, they are not going to change (unless we + // change them) and must be the same value at this point (inside the if) as when the if condition was + // evaluated. + + // We insert an acquire fence here to synchronize-with the release upon incrementing dequeueOvercommit below. + // This ensures that whatever the value we got loaded into overcommit, the load of dequeueOptisticCount in + // the fetch_add below will result in a value at least as recent as that (and therefore at least as large). + // Note that I believe a compiler (signal) fence here would be sufficient due to the nature of fetch_add (all + // read-modify-write operations are guaranteed to work on the latest value in the modification order), but + // unfortunately that can't be shown to be correct using only the C++11 standard. + // See http://stackoverflow.com/questions/18223161/what-are-the-c11-memory-ordering-guarantees-in-this-corner-case + std::atomic_thread_fence(std::memory_order_acquire); + + // Increment optimistic counter, then check if it went over the boundary + auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed); + + // Note that since dequeueOvercommit must be <= dequeueOptimisticCount (because dequeueOvercommit is only ever + // incremented after dequeueOptimisticCount -- this is enforced in the `else` block below), and since we now + // have a version of dequeueOptimisticCount that is at least as recent as overcommit (due to the release upon + // incrementing dequeueOvercommit and the acquire above that synchronizes with it), overcommit <= myDequeueCount. + // However, we can't assert this since both dequeueOptimisticCount and dequeueOvercommit may (independently) + // overflow; in such a case, though, the logic still holds since the difference between the two is maintained. + + // Note that we reload tail here in case it changed; it will be the same value as before or greater, since + // this load is sequenced after (happens after) the earlier load above. This is supported by read-read + // coherency (as defined in the standard), explained here: http://en.cppreference.com/w/cpp/atomic/memory_order + tail = this->tailIndex.load(std::memory_order_acquire); + if ((details::likely)(details::circular_less_than(myDequeueCount - overcommit, tail))) { + // Guaranteed to be at least one element to dequeue! + + // Get the index. Note that since there's guaranteed to be at least one element, this + // will never exceed tail. We need to do an acquire-release fence here since it's possible + // that whatever condition got us to this point was for an earlier enqueued element (that + // we already see the memory effects for), but that by the time we increment somebody else + // has incremented it, and we need to see the memory effects for *that* element, which is + // in such a case is necessarily visible on the thread that incremented it in the first + // place with the more current condition (they must have acquired a tail that is at least + // as recent). + auto index = this->headIndex.fetch_add(1, std::memory_order_acq_rel); + + + // Determine which block the element is in + + auto localBlockIndex = blockIndex.load(std::memory_order_acquire); + auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire); + + // We need to be careful here about subtracting and dividing because of index wrap-around. + // When an index wraps, we need to preserve the sign of the offset when dividing it by the + // block size (in order to get a correct signed block count offset in all cases): + auto headBase = localBlockIndex->entries[localBlockIndexHead].base; + auto blockBaseIndex = index & ~static_cast(BLOCK_SIZE - 1); + auto offset = static_cast(static_cast::type>(blockBaseIndex - headBase) / BLOCK_SIZE); + auto block = localBlockIndex->entries[(localBlockIndexHead + offset) & (localBlockIndex->size - 1)].block; + + // Dequeue + auto& el = *((*block)[index]); + if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) { + // Make sure the element is still fully dequeued and destroyed even if the assignment + // throws + struct Guard { + Block* block; + index_t index; + + ~Guard() + { + (*block)[index]->~T(); + block->ConcurrentQueue::Block::template set_empty(index); + } + } guard = { block, index }; + + element = std::move(el); + } + else { + element = std::move(el); + el.~T(); + block->ConcurrentQueue::Block::template set_empty(index); + } + + return true; + } + else { + // Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent + this->dequeueOvercommit.fetch_add(1, std::memory_order_release); // Release so that the fetch_add on dequeueOptimisticCount is guaranteed to happen before this write + } + } + + return false; + } + + template + bool enqueue_bulk(It itemFirst, size_t count) + { + // First, we need to make sure we have enough room to enqueue all of the elements; + // this means pre-allocating blocks and putting them in the block index (but only if + // all the allocations succeeded). + index_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed); + auto startBlock = this->tailBlock; + auto originalBlockIndexFront = pr_blockIndexFront; + auto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed; + + Block* firstAllocatedBlock = nullptr; + + // Figure out how many blocks we'll need to allocate, and do so + size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1)); + index_t currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); + if (blockBaseDiff > 0) { + // Allocate as many blocks as possible from ahead + while (blockBaseDiff > 0 && this->tailBlock != nullptr && this->tailBlock->next != firstAllocatedBlock && this->tailBlock->next->ConcurrentQueue::Block::template is_empty()) { + blockBaseDiff -= static_cast(BLOCK_SIZE); + currentTailIndex += static_cast(BLOCK_SIZE); + + this->tailBlock = this->tailBlock->next; + firstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock; + + auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; + entry.base = currentTailIndex; + entry.block = this->tailBlock; + pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); + } + + // Now allocate as many blocks as necessary from the block pool + while (blockBaseDiff > 0) { + blockBaseDiff -= static_cast(BLOCK_SIZE); + currentTailIndex += static_cast(BLOCK_SIZE); + + auto head = this->headIndex.load(std::memory_order_relaxed); + assert(!details::circular_less_than(currentTailIndex, head)); + bool full = !details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head)); + if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize || full) { + if (allocMode == CannotAlloc || full || !new_block_index(originalBlockIndexSlotsUsed)) { + // Failed to allocate, undo changes (but keep injected blocks) + pr_blockIndexFront = originalBlockIndexFront; + pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; + this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; + return false; + } + + // pr_blockIndexFront is updated inside new_block_index, so we need to + // update our fallback value too (since we keep the new index even if we + // later fail) + originalBlockIndexFront = originalBlockIndexSlotsUsed; + } + + // Insert a new block in the circular linked list + auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); + if (newBlock == nullptr) { + pr_blockIndexFront = originalBlockIndexFront; + pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; + this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; + return false; + } + +#if MCDBGQ_TRACKMEM + newBlock->owner = this; +#endif + newBlock->ConcurrentQueue::Block::template set_all_empty(); + if (this->tailBlock == nullptr) { + newBlock->next = newBlock; + } + else { + newBlock->next = this->tailBlock->next; + this->tailBlock->next = newBlock; + } + this->tailBlock = newBlock; + firstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock; + + ++pr_blockIndexSlotsUsed; + + auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; + entry.base = currentTailIndex; + entry.block = this->tailBlock; + pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); + } + + // Excellent, all allocations succeeded. Reset each block's emptiness before we fill them up, and + // publish the new block index front + auto block = firstAllocatedBlock; + while (true) { + block->ConcurrentQueue::Block::template reset_empty(); + if (block == this->tailBlock) { + break; + } + block = block->next; + } + + if (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))) { + blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release); + } + } + + // Enqueue, one block at a time + index_t newTailIndex = startTailIndex + static_cast(count); + currentTailIndex = startTailIndex; + auto endBlock = this->tailBlock; + this->tailBlock = startBlock; + assert((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0); + if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) { + this->tailBlock = firstAllocatedBlock; + } + while (true) { + auto stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); + if (details::circular_less_than(newTailIndex, stopIndex)) { + stopIndex = newTailIndex; + } + if (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))) { + while (currentTailIndex != stopIndex) { + new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++); + } + } + else { + MOODYCAMEL_TRY { + while (currentTailIndex != stopIndex) { + // Must use copy constructor even if move constructor is available + // because we may have to revert if there's an exception. + // Sorry about the horrible templated next line, but it was the only way + // to disable moving *at compile time*, which is important because a type + // may only define a (noexcept) move constructor, and so calls to the + // cctor will not compile, even if they are in an if branch that will never + // be executed + new ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if<(bool)!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst)); + ++currentTailIndex; + ++itemFirst; + } + } + MOODYCAMEL_CATCH (...) { + // Oh dear, an exception's been thrown -- destroy the elements that + // were enqueued so far and revert the entire bulk operation (we'll keep + // any allocated blocks in our linked list for later, though). + auto constructedStopIndex = currentTailIndex; + auto lastBlockEnqueued = this->tailBlock; + + pr_blockIndexFront = originalBlockIndexFront; + pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; + this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; + + if (!details::is_trivially_destructible::value) { + auto block = startBlock; + if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { + block = firstAllocatedBlock; + } + currentTailIndex = startTailIndex; + while (true) { + stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); + if (details::circular_less_than(constructedStopIndex, stopIndex)) { + stopIndex = constructedStopIndex; + } + while (currentTailIndex != stopIndex) { + (*block)[currentTailIndex++]->~T(); + } + if (block == lastBlockEnqueued) { + break; + } + block = block->next; + } + } + MOODYCAMEL_RETHROW; + } + } + + if (this->tailBlock == endBlock) { + assert(currentTailIndex == newTailIndex); + break; + } + this->tailBlock = this->tailBlock->next; + } + + if (!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst))) && firstAllocatedBlock != nullptr) { + blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release); + } + + this->tailIndex.store(newTailIndex, std::memory_order_release); + return true; + } + + template + size_t dequeue_bulk(It& itemFirst, size_t max) + { + auto tail = this->tailIndex.load(std::memory_order_relaxed); + auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); + auto desiredCount = static_cast(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit)); + if (details::circular_less_than(0, desiredCount)) { + desiredCount = desiredCount < max ? desiredCount : max; + std::atomic_thread_fence(std::memory_order_acquire); + + auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed);; + + tail = this->tailIndex.load(std::memory_order_acquire); + auto actualCount = static_cast(tail - (myDequeueCount - overcommit)); + if (details::circular_less_than(0, actualCount)) { + actualCount = desiredCount < actualCount ? desiredCount : actualCount; + if (actualCount < desiredCount) { + this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release); + } + + // Get the first index. Note that since there's guaranteed to be at least actualCount elements, this + // will never exceed tail. + auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel); + + // Determine which block the first element is in + auto localBlockIndex = blockIndex.load(std::memory_order_acquire); + auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire); + + auto headBase = localBlockIndex->entries[localBlockIndexHead].base; + auto firstBlockBaseIndex = firstIndex & ~static_cast(BLOCK_SIZE - 1); + auto offset = static_cast(static_cast::type>(firstBlockBaseIndex - headBase) / BLOCK_SIZE); + auto indexIndex = (localBlockIndexHead + offset) & (localBlockIndex->size - 1); + + // Iterate the blocks and dequeue + auto index = firstIndex; + do { + auto firstIndexInBlock = index; + auto endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); + endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; + auto block = localBlockIndex->entries[indexIndex].block; + if (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) { + while (index != endIndex) { + auto& el = *((*block)[index]); + *itemFirst++ = std::move(el); + el.~T(); + ++index; + } + } + else { + MOODYCAMEL_TRY { + while (index != endIndex) { + auto& el = *((*block)[index]); + *itemFirst = std::move(el); + ++itemFirst; + el.~T(); + ++index; + } + } + MOODYCAMEL_CATCH (...) { + // It's too late to revert the dequeue, but we can make sure that all + // the dequeued objects are properly destroyed and the block index + // (and empty count) are properly updated before we propagate the exception + do { + block = localBlockIndex->entries[indexIndex].block; + while (index != endIndex) { + (*block)[index++]->~T(); + } + block->ConcurrentQueue::Block::template set_many_empty(firstIndexInBlock, static_cast(endIndex - firstIndexInBlock)); + indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1); + + firstIndexInBlock = index; + endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); + endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; + } while (index != firstIndex + actualCount); + + MOODYCAMEL_RETHROW; + } + } + block->ConcurrentQueue::Block::template set_many_empty(firstIndexInBlock, static_cast(endIndex - firstIndexInBlock)); + indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1); + } while (index != firstIndex + actualCount); + + return actualCount; + } + else { + // Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent + this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release); + } + } + + return 0; + } + + private: + struct BlockIndexEntry + { + index_t base; + Block* block; + }; + + struct BlockIndexHeader + { + size_t size; + std::atomic front; // Current slot (not next, like pr_blockIndexFront) + BlockIndexEntry* entries; + void* prev; + }; + + + bool new_block_index(size_t numberOfFilledSlotsToExpose) + { + auto prevBlockSizeMask = pr_blockIndexSize - 1; + + // Create the new block + pr_blockIndexSize <<= 1; + auto newRawPtr = static_cast((Traits::malloc)(sizeof(BlockIndexHeader) + std::alignment_of::value - 1 + sizeof(BlockIndexEntry) * pr_blockIndexSize)); + if (newRawPtr == nullptr) { + pr_blockIndexSize >>= 1; // Reset to allow graceful retry + return false; + } + + auto newBlockIndexEntries = reinterpret_cast(details::align_for(newRawPtr + sizeof(BlockIndexHeader))); + + // Copy in all the old indices, if any + size_t j = 0; + if (pr_blockIndexSlotsUsed != 0) { + auto i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & prevBlockSizeMask; + do { + newBlockIndexEntries[j++] = pr_blockIndexEntries[i]; + i = (i + 1) & prevBlockSizeMask; + } while (i != pr_blockIndexFront); + } + + // Update everything + auto header = new (newRawPtr) BlockIndexHeader; + header->size = pr_blockIndexSize; + header->front.store(numberOfFilledSlotsToExpose - 1, std::memory_order_relaxed); + header->entries = newBlockIndexEntries; + header->prev = pr_blockIndexRaw; // we link the new block to the old one so we can free it later + + pr_blockIndexFront = j; + pr_blockIndexEntries = newBlockIndexEntries; + pr_blockIndexRaw = newRawPtr; + blockIndex.store(header, std::memory_order_release); + + return true; + } + + private: + std::atomic blockIndex; + + // To be used by producer only -- consumer must use the ones in referenced by blockIndex + size_t pr_blockIndexSlotsUsed; + size_t pr_blockIndexSize; + size_t pr_blockIndexFront; // Next slot (not current) + BlockIndexEntry* pr_blockIndexEntries; + void* pr_blockIndexRaw; + +#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG + public: + ExplicitProducer* nextExplicitProducer; + private: +#endif + +#if MCDBGQ_TRACKMEM + friend struct MemStats; +#endif + }; + + + ////////////////////////////////// + // Implicit queue + ////////////////////////////////// + + struct ImplicitProducer : public ProducerBase + { + ImplicitProducer(ConcurrentQueue* parent) : + ProducerBase(parent, false), + nextBlockIndexCapacity(IMPLICIT_INITIAL_INDEX_SIZE), + blockIndex(nullptr) + { + new_block_index(); + } + + ~ImplicitProducer() + { + // Note that since we're in the destructor we can assume that all enqueue/dequeue operations + // completed already; this means that all undequeued elements are placed contiguously across + // contiguous blocks, and that only the first and last remaining blocks can be only partially + // empty (all other remaining blocks must be completely full). + +#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED + // Unregister ourselves for thread termination notification + if (!this->inactive.load(std::memory_order_relaxed)) { + details::ThreadExitNotifier::unsubscribe(&threadExitListener); + } +#endif + + // Destroy all remaining elements! + auto tail = this->tailIndex.load(std::memory_order_relaxed); + auto index = this->headIndex.load(std::memory_order_relaxed); + Block* block = nullptr; + assert(index == tail || details::circular_less_than(index, tail)); + bool forceFreeLastBlock = index != tail; // If we enter the loop, then the last (tail) block will not be freed + while (index != tail) { + if ((index & static_cast(BLOCK_SIZE - 1)) == 0 || block == nullptr) { + if (block != nullptr) { + // Free the old block + this->parent->add_block_to_free_list(block); + } + + block = get_block_index_entry_for_index(index)->value.load(std::memory_order_relaxed); + } + + ((*block)[index])->~T(); + ++index; + } + // Even if the queue is empty, there's still one block that's not on the free list + // (unless the head index reached the end of it, in which case the tail will be poised + // to create a new block). + if (this->tailBlock != nullptr && (forceFreeLastBlock || (tail & static_cast(BLOCK_SIZE - 1)) != 0)) { + this->parent->add_block_to_free_list(this->tailBlock); + } + + // Destroy block index + auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); + if (localBlockIndex != nullptr) { + for (size_t i = 0; i != localBlockIndex->capacity; ++i) { + localBlockIndex->index[i]->~BlockIndexEntry(); + } + do { + auto prev = localBlockIndex->prev; + localBlockIndex->~BlockIndexHeader(); + (Traits::free)(localBlockIndex); + localBlockIndex = prev; + } while (localBlockIndex != nullptr); + } + } + + template + inline bool enqueue(U&& element) + { + index_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed); + index_t newTailIndex = 1 + currentTailIndex; + if ((currentTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { + // We reached the end of a block, start a new one + auto head = this->headIndex.load(std::memory_order_relaxed); + assert(!details::circular_less_than(currentTailIndex, head)); + if (!details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) { + return false; + } +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX + debug::DebugLock lock(mutex); +#endif + // Find out where we'll be inserting this block in the block index + BlockIndexEntry* idxEntry; + if (!insert_block_index_entry(idxEntry, currentTailIndex)) { + return false; + } + + // Get ahold of a new block + auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); + if (newBlock == nullptr) { + rewind_block_index_tail(); + idxEntry->value.store(nullptr, std::memory_order_relaxed); + return false; + } +#if MCDBGQ_TRACKMEM + newBlock->owner = this; +#endif + newBlock->ConcurrentQueue::Block::template reset_empty(); + + if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward(element)))) { + // May throw, try to insert now before we publish the fact that we have this new block + MOODYCAMEL_TRY { + new ((*newBlock)[currentTailIndex]) T(std::forward(element)); + } + MOODYCAMEL_CATCH (...) { + rewind_block_index_tail(); + idxEntry->value.store(nullptr, std::memory_order_relaxed); + this->parent->add_block_to_free_list(newBlock); + MOODYCAMEL_RETHROW; + } + } + + // Insert the new block into the index + idxEntry->value.store(newBlock, std::memory_order_relaxed); + + this->tailBlock = newBlock; + + if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (nullptr) T(std::forward(element)))) { + this->tailIndex.store(newTailIndex, std::memory_order_release); + return true; + } + } + + // Enqueue + new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); + + this->tailIndex.store(newTailIndex, std::memory_order_release); + return true; + } + + template + bool dequeue(U& element) + { + // See ExplicitProducer::dequeue for rationale and explanation + index_t tail = this->tailIndex.load(std::memory_order_relaxed); + index_t overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); + if (details::circular_less_than(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) { + std::atomic_thread_fence(std::memory_order_acquire); + + index_t myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed); + tail = this->tailIndex.load(std::memory_order_acquire); + if ((details::likely)(details::circular_less_than(myDequeueCount - overcommit, tail))) { + index_t index = this->headIndex.fetch_add(1, std::memory_order_acq_rel); + + // Determine which block the element is in + auto entry = get_block_index_entry_for_index(index); + + // Dequeue + auto block = entry->value.load(std::memory_order_relaxed); + auto& el = *((*block)[index]); + + if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) { +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX + // Note: Acquiring the mutex with every dequeue instead of only when a block + // is released is very sub-optimal, but it is, after all, purely debug code. + debug::DebugLock lock(producer->mutex); +#endif + struct Guard { + Block* block; + index_t index; + BlockIndexEntry* entry; + ConcurrentQueue* parent; + + ~Guard() + { + (*block)[index]->~T(); + if (block->ConcurrentQueue::Block::template set_empty(index)) { + entry->value.store(nullptr, std::memory_order_relaxed); + parent->add_block_to_free_list(block); + } + } + } guard = { block, index, entry, this->parent }; + + element = std::move(el); + } + else { + element = std::move(el); + el.~T(); + + if (block->ConcurrentQueue::Block::template set_empty(index)) { + { +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX + debug::DebugLock lock(mutex); +#endif + // Add the block back into the global free pool (and remove from block index) + entry->value.store(nullptr, std::memory_order_relaxed); + } + this->parent->add_block_to_free_list(block); // releases the above store + } + } + + return true; + } + else { + this->dequeueOvercommit.fetch_add(1, std::memory_order_release); + } + } + + return false; + } + + template + bool enqueue_bulk(It itemFirst, size_t count) + { + // First, we need to make sure we have enough room to enqueue all of the elements; + // this means pre-allocating blocks and putting them in the block index (but only if + // all the allocations succeeded). + + // Note that the tailBlock we start off with may not be owned by us any more; + // this happens if it was filled up exactly to the top (setting tailIndex to + // the first index of the next block which is not yet allocated), then dequeued + // completely (putting it on the free list) before we enqueue again. + + index_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed); + auto startBlock = this->tailBlock; + Block* firstAllocatedBlock = nullptr; + auto endBlock = this->tailBlock; + + // Figure out how many blocks we'll need to allocate, and do so + size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1)); + index_t currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); + if (blockBaseDiff > 0) { +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX + debug::DebugLock lock(mutex); +#endif + do { + blockBaseDiff -= static_cast(BLOCK_SIZE); + currentTailIndex += static_cast(BLOCK_SIZE); + + // Find out where we'll be inserting this block in the block index + BlockIndexEntry* idxEntry = nullptr; // initialization here unnecessary but compiler can't always tell + Block* newBlock; + bool indexInserted = false; + auto head = this->headIndex.load(std::memory_order_relaxed); + assert(!details::circular_less_than(currentTailIndex, head)); + bool full = !details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head)); + if (full || !(indexInserted = insert_block_index_entry(idxEntry, currentTailIndex)) || (newBlock = this->parent->ConcurrentQueue::template requisition_block()) == nullptr) { + // Index allocation or block allocation failed; revert any other allocations + // and index insertions done so far for this operation + if (indexInserted) { + rewind_block_index_tail(); + idxEntry->value.store(nullptr, std::memory_order_relaxed); + } + currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); + for (auto block = firstAllocatedBlock; block != nullptr; block = block->next) { + currentTailIndex += static_cast(BLOCK_SIZE); + idxEntry = get_block_index_entry_for_index(currentTailIndex); + idxEntry->value.store(nullptr, std::memory_order_relaxed); + rewind_block_index_tail(); + } + this->parent->add_blocks_to_free_list(firstAllocatedBlock); + this->tailBlock = startBlock; + + return false; + } + +#if MCDBGQ_TRACKMEM + newBlock->owner = this; +#endif + newBlock->ConcurrentQueue::Block::template reset_empty(); + newBlock->next = nullptr; + + // Insert the new block into the index + idxEntry->value.store(newBlock, std::memory_order_relaxed); + + // Store the chain of blocks so that we can undo if later allocations fail, + // and so that we can find the blocks when we do the actual enqueueing + if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr) { + assert(this->tailBlock != nullptr); + this->tailBlock->next = newBlock; + } + this->tailBlock = newBlock; + endBlock = newBlock; + firstAllocatedBlock = firstAllocatedBlock == nullptr ? newBlock : firstAllocatedBlock; + } while (blockBaseDiff > 0); + } + + // Enqueue, one block at a time + index_t newTailIndex = startTailIndex + static_cast(count); + currentTailIndex = startTailIndex; + this->tailBlock = startBlock; + assert((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0); + if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) { + this->tailBlock = firstAllocatedBlock; + } + while (true) { + auto stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); + if (details::circular_less_than(newTailIndex, stopIndex)) { + stopIndex = newTailIndex; + } + if (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))) { + while (currentTailIndex != stopIndex) { + new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++); + } + } + else { + MOODYCAMEL_TRY { + while (currentTailIndex != stopIndex) { + new ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if<(bool)!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (nullptr) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst)); + ++currentTailIndex; + ++itemFirst; + } + } + MOODYCAMEL_CATCH (...) { + auto constructedStopIndex = currentTailIndex; + auto lastBlockEnqueued = this->tailBlock; + + if (!details::is_trivially_destructible::value) { + auto block = startBlock; + if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { + block = firstAllocatedBlock; + } + currentTailIndex = startTailIndex; + while (true) { + stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); + if (details::circular_less_than(constructedStopIndex, stopIndex)) { + stopIndex = constructedStopIndex; + } + while (currentTailIndex != stopIndex) { + (*block)[currentTailIndex++]->~T(); + } + if (block == lastBlockEnqueued) { + break; + } + block = block->next; + } + } + + currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); + for (auto block = firstAllocatedBlock; block != nullptr; block = block->next) { + currentTailIndex += static_cast(BLOCK_SIZE); + auto idxEntry = get_block_index_entry_for_index(currentTailIndex); + idxEntry->value.store(nullptr, std::memory_order_relaxed); + rewind_block_index_tail(); + } + this->parent->add_blocks_to_free_list(firstAllocatedBlock); + this->tailBlock = startBlock; + MOODYCAMEL_RETHROW; + } + } + + if (this->tailBlock == endBlock) { + assert(currentTailIndex == newTailIndex); + break; + } + this->tailBlock = this->tailBlock->next; + } + this->tailIndex.store(newTailIndex, std::memory_order_release); + return true; + } + + template + size_t dequeue_bulk(It& itemFirst, size_t max) + { + auto tail = this->tailIndex.load(std::memory_order_relaxed); + auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); + auto desiredCount = static_cast(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit)); + if (details::circular_less_than(0, desiredCount)) { + desiredCount = desiredCount < max ? desiredCount : max; + std::atomic_thread_fence(std::memory_order_acquire); + + auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed); + + tail = this->tailIndex.load(std::memory_order_acquire); + auto actualCount = static_cast(tail - (myDequeueCount - overcommit)); + if (details::circular_less_than(0, actualCount)) { + actualCount = desiredCount < actualCount ? desiredCount : actualCount; + if (actualCount < desiredCount) { + this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release); + } + + // Get the first index. Note that since there's guaranteed to be at least actualCount elements, this + // will never exceed tail. + auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel); + + // Iterate the blocks and dequeue + auto index = firstIndex; + BlockIndexHeader* localBlockIndex; + auto indexIndex = get_block_index_index_for_index(index, localBlockIndex); + do { + auto blockStartIndex = index; + auto endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); + endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; + + auto entry = localBlockIndex->index[indexIndex]; + auto block = entry->value.load(std::memory_order_relaxed); + if (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) { + while (index != endIndex) { + auto& el = *((*block)[index]); + *itemFirst++ = std::move(el); + el.~T(); + ++index; + } + } + else { + MOODYCAMEL_TRY { + while (index != endIndex) { + auto& el = *((*block)[index]); + *itemFirst = std::move(el); + ++itemFirst; + el.~T(); + ++index; + } + } + MOODYCAMEL_CATCH (...) { + do { + entry = localBlockIndex->index[indexIndex]; + block = entry->value.load(std::memory_order_relaxed); + while (index != endIndex) { + (*block)[index++]->~T(); + } + + if (block->ConcurrentQueue::Block::template set_many_empty(blockStartIndex, static_cast(endIndex - blockStartIndex))) { +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX + debug::DebugLock lock(mutex); +#endif + entry->value.store(nullptr, std::memory_order_relaxed); + this->parent->add_block_to_free_list(block); + } + indexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1); + + blockStartIndex = index; + endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); + endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; + } while (index != firstIndex + actualCount); + + MOODYCAMEL_RETHROW; + } + } + if (block->ConcurrentQueue::Block::template set_many_empty(blockStartIndex, static_cast(endIndex - blockStartIndex))) { + { +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX + debug::DebugLock lock(mutex); +#endif + // Note that the set_many_empty above did a release, meaning that anybody who acquires the block + // we're about to free can use it safely since our writes (and reads!) will have happened-before then. + entry->value.store(nullptr, std::memory_order_relaxed); + } + this->parent->add_block_to_free_list(block); // releases the above store + } + indexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1); + } while (index != firstIndex + actualCount); + + return actualCount; + } + else { + this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release); + } + } + + return 0; + } + + private: + // The block size must be > 1, so any number with the low bit set is an invalid block base index + static const index_t INVALID_BLOCK_BASE = 1; + + struct BlockIndexEntry + { + std::atomic key; + std::atomic value; + }; + + struct BlockIndexHeader + { + size_t capacity; + std::atomic tail; + BlockIndexEntry* entries; + BlockIndexEntry** index; + BlockIndexHeader* prev; + }; + + template + inline bool insert_block_index_entry(BlockIndexEntry*& idxEntry, index_t blockStartIndex) + { + auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); // We're the only writer thread, relaxed is OK + if (localBlockIndex == nullptr) { + return false; // this can happen if new_block_index failed in the constructor + } + auto newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1); + idxEntry = localBlockIndex->index[newTail]; + if (idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE || + idxEntry->value.load(std::memory_order_relaxed) == nullptr) { + + idxEntry->key.store(blockStartIndex, std::memory_order_relaxed); + localBlockIndex->tail.store(newTail, std::memory_order_release); + return true; + } + + // No room in the old block index, try to allocate another one! + if (allocMode == CannotAlloc || !new_block_index()) { + return false; + } + localBlockIndex = blockIndex.load(std::memory_order_relaxed); + newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1); + idxEntry = localBlockIndex->index[newTail]; + assert(idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE); + idxEntry->key.store(blockStartIndex, std::memory_order_relaxed); + localBlockIndex->tail.store(newTail, std::memory_order_release); + return true; + } + + inline void rewind_block_index_tail() + { + auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); + localBlockIndex->tail.store((localBlockIndex->tail.load(std::memory_order_relaxed) - 1) & (localBlockIndex->capacity - 1), std::memory_order_relaxed); + } + + inline BlockIndexEntry* get_block_index_entry_for_index(index_t index) const + { + BlockIndexHeader* localBlockIndex; + auto idx = get_block_index_index_for_index(index, localBlockIndex); + return localBlockIndex->index[idx]; + } + + inline size_t get_block_index_index_for_index(index_t index, BlockIndexHeader*& localBlockIndex) const + { +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX + debug::DebugLock lock(mutex); +#endif + index &= ~static_cast(BLOCK_SIZE - 1); + localBlockIndex = blockIndex.load(std::memory_order_acquire); + auto tail = localBlockIndex->tail.load(std::memory_order_acquire); + auto tailBase = localBlockIndex->index[tail]->key.load(std::memory_order_relaxed); + assert(tailBase != INVALID_BLOCK_BASE); + // Note: Must use division instead of shift because the index may wrap around, causing a negative + // offset, whose negativity we want to preserve + auto offset = static_cast(static_cast::type>(index - tailBase) / BLOCK_SIZE); + size_t idx = (tail + offset) & (localBlockIndex->capacity - 1); + assert(localBlockIndex->index[idx]->key.load(std::memory_order_relaxed) == index && localBlockIndex->index[idx]->value.load(std::memory_order_relaxed) != nullptr); + return idx; + } + + bool new_block_index() + { + auto prev = blockIndex.load(std::memory_order_relaxed); + size_t prevCapacity = prev == nullptr ? 0 : prev->capacity; + auto entryCount = prev == nullptr ? nextBlockIndexCapacity : prevCapacity; + auto raw = static_cast((Traits::malloc)( + sizeof(BlockIndexHeader) + + std::alignment_of::value - 1 + sizeof(BlockIndexEntry) * entryCount + + std::alignment_of::value - 1 + sizeof(BlockIndexEntry*) * nextBlockIndexCapacity)); + if (raw == nullptr) { + return false; + } + + auto header = new (raw) BlockIndexHeader; + auto entries = reinterpret_cast(details::align_for(raw + sizeof(BlockIndexHeader))); + auto index = reinterpret_cast(details::align_for(reinterpret_cast(entries) + sizeof(BlockIndexEntry) * entryCount)); + if (prev != nullptr) { + auto prevTail = prev->tail.load(std::memory_order_relaxed); + auto prevPos = prevTail; + size_t i = 0; + do { + prevPos = (prevPos + 1) & (prev->capacity - 1); + index[i++] = prev->index[prevPos]; + } while (prevPos != prevTail); + assert(i == prevCapacity); + } + for (size_t i = 0; i != entryCount; ++i) { + new (entries + i) BlockIndexEntry; + entries[i].key.store(INVALID_BLOCK_BASE, std::memory_order_relaxed); + index[prevCapacity + i] = entries + i; + } + header->prev = prev; + header->entries = entries; + header->index = index; + header->capacity = nextBlockIndexCapacity; + header->tail.store((prevCapacity - 1) & (nextBlockIndexCapacity - 1), std::memory_order_relaxed); + + blockIndex.store(header, std::memory_order_release); + + nextBlockIndexCapacity <<= 1; + + return true; + } + + private: + size_t nextBlockIndexCapacity; + std::atomic blockIndex; + +#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED + public: + details::ThreadExitListener threadExitListener; + private: +#endif + +#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG + public: + ImplicitProducer* nextImplicitProducer; + private: +#endif + +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX + mutable debug::DebugMutex mutex; +#endif +#if MCDBGQ_TRACKMEM + friend struct MemStats; +#endif + }; + + + ////////////////////////////////// + // Block pool manipulation + ////////////////////////////////// + + void populate_initial_block_list(size_t blockCount) + { + initialBlockPoolSize = blockCount; + if (initialBlockPoolSize == 0) { + initialBlockPool = nullptr; + return; + } + + initialBlockPool = create_array(blockCount); + if (initialBlockPool == nullptr) { + initialBlockPoolSize = 0; + } + for (size_t i = 0; i < initialBlockPoolSize; ++i) { + initialBlockPool[i].dynamicallyAllocated = false; + } + } + + inline Block* try_get_block_from_initial_pool() + { + if (initialBlockPoolIndex.load(std::memory_order_relaxed) >= initialBlockPoolSize) { + return nullptr; + } + + auto index = initialBlockPoolIndex.fetch_add(1, std::memory_order_relaxed); + + return index < initialBlockPoolSize ? (initialBlockPool + index) : nullptr; + } + + inline void add_block_to_free_list(Block* block) + { +#if MCDBGQ_TRACKMEM + block->owner = nullptr; +#endif + freeList.add(block); + } + + inline void add_blocks_to_free_list(Block* block) + { + while (block != nullptr) { + auto next = block->next; + add_block_to_free_list(block); + block = next; + } + } + + inline Block* try_get_block_from_free_list() + { + return freeList.try_get(); + } + + // Gets a free block from one of the memory pools, or allocates a new one (if applicable) + template + Block* requisition_block() + { + auto block = try_get_block_from_initial_pool(); + if (block != nullptr) { + return block; + } + + block = try_get_block_from_free_list(); + if (block != nullptr) { + return block; + } + + if (canAlloc == CanAlloc) { + return create(); + } + + return nullptr; + } + + +#if MCDBGQ_TRACKMEM + public: + struct MemStats { + size_t allocatedBlocks; + size_t usedBlocks; + size_t freeBlocks; + size_t ownedBlocksExplicit; + size_t ownedBlocksImplicit; + size_t implicitProducers; + size_t explicitProducers; + size_t elementsEnqueued; + size_t blockClassBytes; + size_t queueClassBytes; + size_t implicitBlockIndexBytes; + size_t explicitBlockIndexBytes; + + friend class ConcurrentQueue; + + private: + static MemStats getFor(ConcurrentQueue* q) + { + MemStats stats = { 0 }; + + stats.elementsEnqueued = q->size_approx(); + + auto block = q->freeList.head_unsafe(); + while (block != nullptr) { + ++stats.allocatedBlocks; + ++stats.freeBlocks; + block = block->freeListNext.load(std::memory_order_relaxed); + } + + for (auto ptr = q->producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { + bool implicit = dynamic_cast(ptr) != nullptr; + stats.implicitProducers += implicit ? 1 : 0; + stats.explicitProducers += implicit ? 0 : 1; + + if (implicit) { + auto prod = static_cast(ptr); + stats.queueClassBytes += sizeof(ImplicitProducer); + auto head = prod->headIndex.load(std::memory_order_relaxed); + auto tail = prod->tailIndex.load(std::memory_order_relaxed); + auto hash = prod->blockIndex.load(std::memory_order_relaxed); + if (hash != nullptr) { + for (size_t i = 0; i != hash->capacity; ++i) { + if (hash->index[i]->key.load(std::memory_order_relaxed) != ImplicitProducer::INVALID_BLOCK_BASE && hash->index[i]->value.load(std::memory_order_relaxed) != nullptr) { + ++stats.allocatedBlocks; + ++stats.ownedBlocksImplicit; + } + } + stats.implicitBlockIndexBytes += hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry); + for (; hash != nullptr; hash = hash->prev) { + stats.implicitBlockIndexBytes += sizeof(typename ImplicitProducer::BlockIndexHeader) + hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry*); + } + } + for (; details::circular_less_than(head, tail); head += BLOCK_SIZE) { + //auto block = prod->get_block_index_entry_for_index(head); + ++stats.usedBlocks; + } + } + else { + auto prod = static_cast(ptr); + stats.queueClassBytes += sizeof(ExplicitProducer); + auto tailBlock = prod->tailBlock; + bool wasNonEmpty = false; + if (tailBlock != nullptr) { + auto block = tailBlock; + do { + ++stats.allocatedBlocks; + if (!block->ConcurrentQueue::Block::template is_empty() || wasNonEmpty) { + ++stats.usedBlocks; + wasNonEmpty = wasNonEmpty || block != tailBlock; + } + ++stats.ownedBlocksExplicit; + block = block->next; + } while (block != tailBlock); + } + auto index = prod->blockIndex.load(std::memory_order_relaxed); + while (index != nullptr) { + stats.explicitBlockIndexBytes += sizeof(typename ExplicitProducer::BlockIndexHeader) + index->size * sizeof(typename ExplicitProducer::BlockIndexEntry); + index = static_cast(index->prev); + } + } + } + + auto freeOnInitialPool = q->initialBlockPoolIndex.load(std::memory_order_relaxed) >= q->initialBlockPoolSize ? 0 : q->initialBlockPoolSize - q->initialBlockPoolIndex.load(std::memory_order_relaxed); + stats.allocatedBlocks += freeOnInitialPool; + stats.freeBlocks += freeOnInitialPool; + + stats.blockClassBytes = sizeof(Block) * stats.allocatedBlocks; + stats.queueClassBytes += sizeof(ConcurrentQueue); + + return stats; + } + }; + + // For debugging only. Not thread-safe. + MemStats getMemStats() + { + return MemStats::getFor(this); + } + private: + friend struct MemStats; +#endif + + + ////////////////////////////////// + // Producer list manipulation + ////////////////////////////////// + + ProducerBase* recycle_or_create_producer(bool isExplicit) + { + bool recycled; + return recycle_or_create_producer(isExplicit, recycled); + } + + ProducerBase* recycle_or_create_producer(bool isExplicit, bool& recycled) + { +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH + debug::DebugLock lock(implicitProdMutex); +#endif + // Try to re-use one first + for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { + if (ptr->inactive.load(std::memory_order_relaxed) && ptr->isExplicit == isExplicit) { + bool expected = true; + if (ptr->inactive.compare_exchange_strong(expected, /* desired */ false, std::memory_order_acquire, std::memory_order_relaxed)) { + // We caught one! It's been marked as activated, the caller can have it + recycled = true; + return ptr; + } + } + } + + recycled = false; + return add_producer(isExplicit ? static_cast(create(this)) : create(this)); + } + + ProducerBase* add_producer(ProducerBase* producer) + { + // Handle failed memory allocation + if (producer == nullptr) { + return nullptr; + } + + producerCount.fetch_add(1, std::memory_order_relaxed); + + // Add it to the lock-free list + auto prevTail = producerListTail.load(std::memory_order_relaxed); + do { + producer->next = prevTail; + } while (!producerListTail.compare_exchange_weak(prevTail, producer, std::memory_order_release, std::memory_order_relaxed)); + +#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG + if (producer->isExplicit) { + auto prevTailExplicit = explicitProducers.load(std::memory_order_relaxed); + do { + static_cast(producer)->nextExplicitProducer = prevTailExplicit; + } while (!explicitProducers.compare_exchange_weak(prevTailExplicit, static_cast(producer), std::memory_order_release, std::memory_order_relaxed)); + } + else { + auto prevTailImplicit = implicitProducers.load(std::memory_order_relaxed); + do { + static_cast(producer)->nextImplicitProducer = prevTailImplicit; + } while (!implicitProducers.compare_exchange_weak(prevTailImplicit, static_cast(producer), std::memory_order_release, std::memory_order_relaxed)); + } +#endif + + return producer; + } + + void reown_producers() + { + // After another instance is moved-into/swapped-with this one, all the + // producers we stole still think their parents are the other queue. + // So fix them up! + for (auto ptr = producerListTail.load(std::memory_order_relaxed); ptr != nullptr; ptr = ptr->next_prod()) { + ptr->parent = this; + } + } + + + ////////////////////////////////// + // Implicit producer hash + ////////////////////////////////// + + struct ImplicitProducerKVP + { + std::atomic key; + ImplicitProducer* value; // No need for atomicity since it's only read by the thread that sets it in the first place + + ImplicitProducerKVP() : value(nullptr) { } + + ImplicitProducerKVP(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT + { + key.store(other.key.load(std::memory_order_relaxed), std::memory_order_relaxed); + value = other.value; + } + + inline ImplicitProducerKVP& operator=(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT + { + swap(other); + return *this; + } + + inline void swap(ImplicitProducerKVP& other) MOODYCAMEL_NOEXCEPT + { + if (this != &other) { + details::swap_relaxed(key, other.key); + std::swap(value, other.value); + } + } + }; + + template + friend void moodycamel::swap(typename ConcurrentQueue::ImplicitProducerKVP&, typename ConcurrentQueue::ImplicitProducerKVP&) MOODYCAMEL_NOEXCEPT; + + struct ImplicitProducerHash + { + size_t capacity; + ImplicitProducerKVP* entries; + ImplicitProducerHash* prev; + }; + + inline void populate_initial_implicit_producer_hash() + { + if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return; + + implicitProducerHashCount.store(0, std::memory_order_relaxed); + auto hash = &initialImplicitProducerHash; + hash->capacity = INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; + hash->entries = &initialImplicitProducerHashEntries[0]; + for (size_t i = 0; i != INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; ++i) { + initialImplicitProducerHashEntries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed); + } + hash->prev = nullptr; + implicitProducerHash.store(hash, std::memory_order_relaxed); + } + + void swap_implicit_producer_hashes(ConcurrentQueue& other) + { + if (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return; + + // Swap (assumes our implicit producer hash is initialized) + initialImplicitProducerHashEntries.swap(other.initialImplicitProducerHashEntries); + initialImplicitProducerHash.entries = &initialImplicitProducerHashEntries[0]; + other.initialImplicitProducerHash.entries = &other.initialImplicitProducerHashEntries[0]; + + details::swap_relaxed(implicitProducerHashCount, other.implicitProducerHashCount); + + details::swap_relaxed(implicitProducerHash, other.implicitProducerHash); + if (implicitProducerHash.load(std::memory_order_relaxed) == &other.initialImplicitProducerHash) { + implicitProducerHash.store(&initialImplicitProducerHash, std::memory_order_relaxed); + } + else { + ImplicitProducerHash* hash; + for (hash = implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &other.initialImplicitProducerHash; hash = hash->prev) { + continue; + } + hash->prev = &initialImplicitProducerHash; + } + if (other.implicitProducerHash.load(std::memory_order_relaxed) == &initialImplicitProducerHash) { + other.implicitProducerHash.store(&other.initialImplicitProducerHash, std::memory_order_relaxed); + } + else { + ImplicitProducerHash* hash; + for (hash = other.implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &initialImplicitProducerHash; hash = hash->prev) { + continue; + } + hash->prev = &other.initialImplicitProducerHash; + } + } + + // Only fails (returns nullptr) if memory allocation fails + ImplicitProducer* get_or_add_implicit_producer() + { + // Note that since the data is essentially thread-local (key is thread ID), + // there's a reduced need for fences (memory ordering is already consistent + // for any individual thread), except for the current table itself. + + // Start by looking for the thread ID in the current and all previous hash tables. + // If it's not found, it must not be in there yet, since this same thread would + // have added it previously to one of the tables that we traversed. + + // Code and algorithm adapted from http://preshing.com/20130605/the-worlds-simplest-lock-free-hash-table + +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH + debug::DebugLock lock(implicitProdMutex); +#endif + + auto id = details::thread_id(); + auto hashedId = details::hash_thread_id(id); + + auto mainHash = implicitProducerHash.load(std::memory_order_acquire); + for (auto hash = mainHash; hash != nullptr; hash = hash->prev) { + // Look for the id in this hash + auto index = hashedId; + while (true) { // Not an infinite loop because at least one slot is free in the hash table + index &= hash->capacity - 1; + + auto probedKey = hash->entries[index].key.load(std::memory_order_relaxed); + if (probedKey == id) { + // Found it! If we had to search several hashes deep, though, we should lazily add it + // to the current main hash table to avoid the extended search next time. + // Note there's guaranteed to be room in the current hash table since every subsequent + // table implicitly reserves space for all previous tables (there's only one + // implicitProducerHashCount). + auto value = hash->entries[index].value; + if (hash != mainHash) { + index = hashedId; + while (true) { + index &= mainHash->capacity - 1; + probedKey = mainHash->entries[index].key.load(std::memory_order_relaxed); + auto empty = details::invalid_thread_id; +#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED + auto reusable = details::invalid_thread_id2; + if ((probedKey == empty && mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_relaxed, std::memory_order_relaxed)) || + (probedKey == reusable && mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_acquire, std::memory_order_acquire))) { +#else + if ((probedKey == empty && mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_relaxed, std::memory_order_relaxed))) { +#endif + mainHash->entries[index].value = value; + break; + } + ++index; + } + } + + return value; + } + if (probedKey == details::invalid_thread_id) { + break; // Not in this hash table + } + ++index; + } + } + + // Insert! + auto newCount = 1 + implicitProducerHashCount.fetch_add(1, std::memory_order_relaxed); + while (true) { + if (newCount >= (mainHash->capacity >> 1) && !implicitProducerHashResizeInProgress.test_and_set(std::memory_order_acquire)) { + // We've acquired the resize lock, try to allocate a bigger hash table. + // Note the acquire fence synchronizes with the release fence at the end of this block, and hence when + // we reload implicitProducerHash it must be the most recent version (it only gets changed within this + // locked block). + mainHash = implicitProducerHash.load(std::memory_order_acquire); + if (newCount >= (mainHash->capacity >> 1)) { + auto newCapacity = mainHash->capacity << 1; + while (newCount >= (newCapacity >> 1)) { + newCapacity <<= 1; + } + auto raw = static_cast((Traits::malloc)(sizeof(ImplicitProducerHash) + std::alignment_of::value - 1 + sizeof(ImplicitProducerKVP) * newCapacity)); + if (raw == nullptr) { + // Allocation failed + implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed); + implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); + return nullptr; + } + + auto newHash = new (raw) ImplicitProducerHash; + newHash->capacity = newCapacity; + newHash->entries = reinterpret_cast(details::align_for(raw + sizeof(ImplicitProducerHash))); + for (size_t i = 0; i != newCapacity; ++i) { + new (newHash->entries + i) ImplicitProducerKVP; + newHash->entries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed); + } + newHash->prev = mainHash; + implicitProducerHash.store(newHash, std::memory_order_release); + implicitProducerHashResizeInProgress.clear(std::memory_order_release); + mainHash = newHash; + } + else { + implicitProducerHashResizeInProgress.clear(std::memory_order_release); + } + } + + // If it's < three-quarters full, add to the old one anyway so that we don't have to wait for the next table + // to finish being allocated by another thread (and if we just finished allocating above, the condition will + // always be true) + if (newCount < (mainHash->capacity >> 1) + (mainHash->capacity >> 2)) { + bool recycled; + auto producer = static_cast(recycle_or_create_producer(false, recycled)); + if (producer == nullptr) { + implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed); + return nullptr; + } + if (recycled) { + implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed); + } + +#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED + producer->threadExitListener.callback = &ConcurrentQueue::implicit_producer_thread_exited_callback; + producer->threadExitListener.userData = producer; + details::ThreadExitNotifier::subscribe(&producer->threadExitListener); +#endif + + auto index = hashedId; + while (true) { + index &= mainHash->capacity - 1; + auto probedKey = mainHash->entries[index].key.load(std::memory_order_relaxed); + + auto empty = details::invalid_thread_id; +#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED + auto reusable = details::invalid_thread_id2; + if ((probedKey == empty && mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_relaxed, std::memory_order_relaxed)) || + (probedKey == reusable && mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_acquire, std::memory_order_acquire))) { +#else + if ((probedKey == empty && mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_relaxed, std::memory_order_relaxed))) { +#endif + mainHash->entries[index].value = producer; + break; + } + ++index; + } + return producer; + } + + // Hmm, the old hash is quite full and somebody else is busy allocating a new one. + // We need to wait for the allocating thread to finish (if it succeeds, we add, if not, + // we try to allocate ourselves). + mainHash = implicitProducerHash.load(std::memory_order_acquire); + } + } + +#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED + void implicit_producer_thread_exited(ImplicitProducer* producer) + { + // Remove from thread exit listeners + details::ThreadExitNotifier::unsubscribe(&producer->threadExitListener); + + // Remove from hash +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH + debug::DebugLock lock(implicitProdMutex); +#endif + auto hash = implicitProducerHash.load(std::memory_order_acquire); + assert(hash != nullptr); // The thread exit listener is only registered if we were added to a hash in the first place + auto id = details::thread_id(); + auto hashedId = details::hash_thread_id(id); + details::thread_id_t probedKey; + + // We need to traverse all the hashes just in case other threads aren't on the current one yet and are + // trying to add an entry thinking there's a free slot (because they reused a producer) + for (; hash != nullptr; hash = hash->prev) { + auto index = hashedId; + do { + index &= hash->capacity - 1; + probedKey = hash->entries[index].key.load(std::memory_order_relaxed); + if (probedKey == id) { + hash->entries[index].key.store(details::invalid_thread_id2, std::memory_order_release); + break; + } + ++index; + } while (probedKey != details::invalid_thread_id); // Can happen if the hash has changed but we weren't put back in it yet, or if we weren't added to this hash in the first place + } + + // Mark the queue as being recyclable + producer->inactive.store(true, std::memory_order_release); + } + + static void implicit_producer_thread_exited_callback(void* userData) + { + auto producer = static_cast(userData); + auto queue = producer->parent; + queue->implicit_producer_thread_exited(producer); + } +#endif + + ////////////////////////////////// + // Utility functions + ////////////////////////////////// + + template + static inline U* create_array(size_t count) + { + assert(count > 0); + auto p = static_cast((Traits::malloc)(sizeof(U) * count)); + if (p == nullptr) { + return nullptr; + } + + for (size_t i = 0; i != count; ++i) { + new (p + i) U(); + } + return p; + } + + template + static inline void destroy_array(U* p, size_t count) + { + if (p != nullptr) { + assert(count > 0); + for (size_t i = count; i != 0; ) { + (p + --i)->~U(); + } + (Traits::free)(p); + } + } + + template + static inline U* create() + { + auto p = (Traits::malloc)(sizeof(U)); + return p != nullptr ? new (p) U : nullptr; + } + + template + static inline U* create(A1&& a1) + { + auto p = (Traits::malloc)(sizeof(U)); + return p != nullptr ? new (p) U(std::forward(a1)) : nullptr; + } + + template + static inline void destroy(U* p) + { + if (p != nullptr) { + p->~U(); + } + (Traits::free)(p); + } + +private: + std::atomic producerListTail; + std::atomic producerCount; + + std::atomic initialBlockPoolIndex; + Block* initialBlockPool; + size_t initialBlockPoolSize; + +#if !MCDBGQ_USEDEBUGFREELIST + FreeList freeList; +#else + debug::DebugFreeList freeList; +#endif + + std::atomic implicitProducerHash; + std::atomic implicitProducerHashCount; // Number of slots logically used + ImplicitProducerHash initialImplicitProducerHash; + std::array initialImplicitProducerHashEntries; + std::atomic_flag implicitProducerHashResizeInProgress; + + std::atomic nextExplicitConsumerId; + std::atomic globalExplicitConsumerOffset; + +#if MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH + debug::DebugMutex implicitProdMutex; +#endif + +#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG + std::atomic explicitProducers; + std::atomic implicitProducers; +#endif +}; + + +template +ProducerToken::ProducerToken(ConcurrentQueue& queue) + : producer(queue.recycle_or_create_producer(true)) +{ + if (producer != nullptr) { + producer->token = this; + } +} + +template +ProducerToken::ProducerToken(BlockingConcurrentQueue& queue) + : producer(reinterpret_cast*>(&queue)->recycle_or_create_producer(true)) +{ + if (producer != nullptr) { + producer->token = this; + } +} + +template +ConsumerToken::ConsumerToken(ConcurrentQueue& queue) + : itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr) +{ + initialOffset = queue.nextExplicitConsumerId.fetch_add(1, std::memory_order_release); + lastKnownGlobalOffset = -1; +} + +template +ConsumerToken::ConsumerToken(BlockingConcurrentQueue& queue) + : itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr) +{ + initialOffset = reinterpret_cast*>(&queue)->nextExplicitConsumerId.fetch_add(1, std::memory_order_release); + lastKnownGlobalOffset = -1; +} + +template +inline void swap(ConcurrentQueue& a, ConcurrentQueue& b) MOODYCAMEL_NOEXCEPT +{ + a.swap(b); +} + +inline void swap(ProducerToken& a, ProducerToken& b) MOODYCAMEL_NOEXCEPT +{ + a.swap(b); +} + +inline void swap(ConsumerToken& a, ConsumerToken& b) MOODYCAMEL_NOEXCEPT +{ + a.swap(b); +} + +template +inline void swap(typename ConcurrentQueue::ImplicitProducerKVP& a, typename ConcurrentQueue::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT +{ + a.swap(b); +} + +} + +#if defined(__GNUC__) +#pragma GCC diagnostic pop +#endif diff --git a/third-party/concurrentqueue/internal/concurrentqueue_internal_debug.h b/third-party/concurrentqueue/internal/concurrentqueue_internal_debug.h new file mode 100644 index 0000000..6db4e22 --- /dev/null +++ b/third-party/concurrentqueue/internal/concurrentqueue_internal_debug.h @@ -0,0 +1,87 @@ +#pragma once + +//#define MCDBGQ_TRACKMEM 1 +//#define MCDBGQ_NOLOCKFREE_FREELIST 1 +//#define MCDBGQ_USEDEBUGFREELIST 1 +//#define MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX 1 +//#define MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH 1 + +#if defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__) +#define WIN32_LEAN_AND_MEAN +#include +namespace moodycamel { namespace debug { + struct DebugMutex { + DebugMutex() { InitializeCriticalSectionAndSpinCount(&cs, 0x400); } + ~DebugMutex() { DeleteCriticalSection(&cs); } + + void lock() { EnterCriticalSection(&cs); } + void unlock() { LeaveCriticalSection(&cs); } + + private: + CRITICAL_SECTION cs; + }; +} } +#else +#include +namespace moodycamel { namespace debug { + struct DebugMutex { + void lock() { m.lock(); } + void unlock() { m.unlock(); } + + private: + std::mutex m; + }; +} } +#define +#endif + +namespace moodycamel { namespace debug { + struct DebugLock { + explicit DebugLock(DebugMutex& mutex) + : mutex(mutex) + { + mutex.lock(); + } + + ~DebugLock() + { + mutex.unlock(); + } + + private: + DebugMutex& mutex; + }; + + + template + struct DebugFreeList { + DebugFreeList() : head(nullptr) { } + DebugFreeList(DebugFreeList&& other) : head(other.head) { other.head = nullptr; } + void swap(DebugFreeList& other) { std::swap(head, other.head); } + + inline void add(N* node) + { + DebugLock lock(mutex); + node->freeListNext = head; + head = node; + } + + inline N* try_get() + { + DebugLock lock(mutex); + if (head == nullptr) { + return nullptr; + } + + auto prevHead = head; + head = head->freeListNext; + return prevHead; + } + + N* head_unsafe() const { return head; } + + private: + N* head; + DebugMutex mutex; + }; +} } diff --git a/third-party/concurrentqueue/samples.md b/third-party/concurrentqueue/samples.md new file mode 100644 index 0000000..00905ab --- /dev/null +++ b/third-party/concurrentqueue/samples.md @@ -0,0 +1,375 @@ +# Samples for moodycamel::ConcurrentQueue + +Here are some example usage scenarios with sample code. Note that most +use the simplest version of each available method for demonstration purposes, +but they can all be adapted to use tokens and/or the corresponding bulk methods for +extra speed. + + +## Hello queue + + ConcurrentQueue q; + + for (int i = 0; i != 123; ++i) + q.enqueue(i); + + int item; + for (int i = 0; i != 123; ++i) { + q.try_dequeue(item); + assert(item == i); + } + + +## Hello concurrency + +Basic example of how to use the queue from multiple threads, with no +particular goal (i.e. it does nothing, but in an instructive way). + + ConcurrentQueue q; + int dequeued[100] = { 0 }; + std::thread threads[20]; + + // Producers + for (int i = 0; i != 10; ++i) { + threads[i] = std::thread([&](int i) { + for (int j = 0; j != 10; ++j) { + q.enqueue(i * 10 + j); + } + }, i); + } + + // Consumers + for (int i = 10; i != 20; ++i) { + threads[i] = std::thread([&]() { + int item; + for (int j = 0; j != 20; ++j) { + if (q.try_dequeue(item)) { + ++dequeued[item]; + } + } + }); + } + + // Wait for all threads + for (int i = 0; i != 20; ++i) { + threads[i].join(); + } + + // Collect any leftovers (could be some if e.g. consumers finish before producers) + int item; + while (q.try_dequeue(item)) { + ++dequeued[item]; + } + + // Make sure everything went in and came back out! + for (int i = 0; i != 100; ++i) { + assert(dequeued[i] == 1); + } + + +## Bulk up + +Same as previous example, but runs faster. + + ConcurrentQueue q; + int dequeued[100] = { 0 }; + std::thread threads[20]; + + // Producers + for (int i = 0; i != 10; ++i) { + threads[i] = std::thread([&](int i) { + int items[10]; + for (int j = 0; j != 10; ++j) { + items[j] = i * 10 + j; + } + q.enqueue_bulk(items, 10); + }, i); + } + + // Consumers + for (int i = 10; i != 20; ++i) { + threads[i] = std::thread([&]() { + int items[20]; + for (std::size_t count = q.try_dequeue_bulk(items, 20); count != 0; --count) { + ++dequeued[items[count - 1]]; + } + }); + } + + // Wait for all threads + for (int i = 0; i != 20; ++i) { + threads[i].join(); + } + + // Collect any leftovers (could be some if e.g. consumers finish before producers) + int items[10]; + std::size_t count; + while ((count = q.try_dequeue_bulk(items, 10)) != 0) { + for (std::size_t i = 0; i != count; ++i) { + ++dequeued[items[i]]; + } + } + + // Make sure everything went in and came back out! + for (int i = 0; i != 100; ++i) { + assert(dequeued[i] == 1); + } + + +## Producer/consumer model (simultaneous) + +In this model, one set of threads is producing items, +and the other is consuming them concurrently until all of +them have been consumed. The counters are required to +ensure that all items eventually get consumed. + + ConcurrentQueue q; + const int ProducerCount = 8; + const int ConsumerCount = 8; + std::thread producers[ProducerCount]; + std::thread consumers[ConsumerCount]; + std::atomic doneProducers(0); + std::atomic doneConsumers(0); + for (int i = 0; i != ProducerCount; ++i) { + producers[i] = std::thread([&]() { + while (produce) { + q.enqueue(produceItem()); + } + doneProducers.fetch_add(1, std::memory_order_release); + }); + } + for (int i = 0; i != ConsumerCount; ++i) { + consumers[i] = std::thread([&]() { + Item item; + bool itemsLeft; + do { + // It's important to fence (if the producers have finished) *before* dequeueing + itemsLeft = doneProducers.load(std::memory_order_acquire) != ProducerCount; + while (q.try_dequeue(item)) { + itemsLeft = true; + consumeItem(item); + } + } while (itemsLeft || doneConsumers.fetch_add(1, std::memory_order_acq_rel) + 1 == ConsumerCount); + // The condition above is a bit tricky, but it's necessary to ensure that the + // last consumer sees the memory effects of all the other consumers before it + // calls try_dequeue for the last time + }); + } + for (int i = 0; i != ProducerCount; ++i) { + producers[i].join(); + } + for (int i = 0; i != ConsumerCount; ++i) { + consumers[i].join(); + } + +## Producer/consumer model (simultaneous, blocking) + +The blocking version is different, since either the number of elements being produced needs +to be known ahead of time, or some other coordination is required to tell the consumers when +to stop calling wait_dequeue (not shown here). This is necessary because otherwise a consumer +could end up blocking forever -- and destroying a queue while a consumer is blocking on it leads +to undefined behaviour. + + BlockingConcurrentQueue q; + const int ProducerCount = 8; + const int ConsumerCount = 8; + std::thread producers[ProducerCount]; + std::thread consumers[ConsumerCount]; + std::atomic promisedElementsRemaining(ProducerCount * 1000); + for (int i = 0; i != ProducerCount; ++i) { + producers[i] = std::thread([&]() { + for (int j = 0; j != 1000; ++j) { + q.enqueue(produceItem()); + } + }); + } + for (int i = 0; i != ConsumerCount; ++i) { + consumers[i] = std::thread([&]() { + Item item; + while (promisedElementsRemaining.fetch_sub(1, std::memory_order_relaxed)) { + q.wait_dequeue(item); + consumeItem(item); + } + }); + } + for (int i = 0; i != ProducerCount; ++i) { + producers[i].join(); + } + for (int i = 0; i != ConsumerCount; ++i) { + consumers[i].join(); + } + + +## Producer/consumer model (separate stages) + + ConcurrentQueue q; + + // Production stage + std::thread threads[8]; + for (int i = 0; i != 8; ++i) { + threads[i] = std::thread([&]() { + while (produce) { + q.enqueue(produceItem()); + } + }); + } + for (int i = 0; i != 8; ++i) { + threads[i].join(); + } + + // Consumption stage + std::atomic doneConsumers(0); + for (int i = 0; i != 8; ++i) { + threads[i] = std::thread([&]() { + Item item; + do { + while (q.try_dequeue(item)) { + consumeItem(item); + } + // Loop again one last time if we're the last producer (with the acquired + // memory effects of the other producers): + } while (doneConsumers.fetch_add(1, std::memory_order_acq_rel) + 1 == 8); + }); + } + for (int i = 0; i != 8; ++i) { + threads[i].join(); + } + +Note that there's no point trying to use the blocking queue with this model, since +there's no need to use the `wait` methods (all the elements are produced before any +are consumed), and hence the complexity would be the same but with additional overhead. + + +## Object pool + +If you don't know what threads will be using the queue in advance, +you can't really declare any long-term tokens. The obvious solution +is to use the implicit methods (that don't take any tokens): + + // A pool of 'Something' objects that can be safely accessed + // from any thread + class SomethingPool + { + public: + Something getSomething() + { + Something obj; + queue.try_dequeue(obj); + + // If the dequeue succeeded, obj will be an object from the + // thread pool, otherwise it will be the default-constructed + // object as declared above + return obj; + } + + void recycleSomething(Something&& obj) + { + queue.enqueue(std::move(obj)); + } + }; + + +## Threadpool task queue + + BlockingConcurrentQueue q; + + // To create a task from any thread: + q.enqueue(...); + + // On threadpool threads: + Task task; + while (true) { + q.wait_dequeue(task); + + // Process task... + } + + +## Multithreaded game loop + + BlockingConcurrentQueue q; + std::atomic pendingTasks(0); + + // On threadpool threads: + Task task; + while (true) { + q.wait_dequeue(task); + + // Process task... + + pendingTasks.fetch_add(-1, std::memory_order_release); + } + + // Whenever a new task needs to be processed for the frame: + pendingTasks.fetch_add(1, std::memory_order_release); + q.enqueue(...); + + // To wait for all the frame's tasks to complete before rendering: + while (pendingTasks.load(std::memory_order_acquire) != 0) + continue; + + // Alternatively you could help out the thread pool while waiting: + while (pendingTasks.load(std::memory_order_acquire) != 0) { + if (!q.try_dequeue(task)) { + continue; + } + + // Process task... + + pendingTasks.fetch_add(-1, std::memory_order_release); + } + + +## Pump until empty + +This might be useful if, for example, you want to process any remaining items +in the queue before it's destroyed. Note that it is your responsibility +to ensure that the memory effects of any enqueue operations you wish to see on +the dequeue thread are visible (i.e. if you're waiting for a certain set of elements, +you need to use memory fences to ensure that those elements are visible to the dequeue +thread after they've been enqueued). + + ConcurrentQueue q; + + // Single-threaded pumping: + Item item; + while (q.try_dequeue(item)) { + // Process item... + } + // q is guaranteed to be empty here, unless there is another thread enqueueing still or + // there was another thread dequeueing at one point and its memory effects have not + // yet been propagated to this thread. + + // Multi-threaded pumping: + std::thread threads[8]; + std::atomic doneConsumers(0); + for (int i = 0; i != 8; ++i) { + threads[i] = std::thread([&]() { + Item item; + do { + while (q.try_dequeue(item)) { + // Process item... + } + } while (doneConsumers.fetch_add(1, std::memory_order_acq_rel) + 1 == 8); + // If there are still enqueue operations happening on other threads, + // then the queue may not be empty at this point. However, if all enqueue + // operations completed before we finished pumping (and the propagation of + // their memory effects too), and all dequeue operations apart from those + // our threads did above completed before we finished pumping (and the + // propagation of their memory effects too), then the queue is guaranteed + // to be empty at this point. + }); + } + for (int i = 0; i != 8; ++i) { + threads[i].join(); + } + + +## Wait for a queue to become empty (without dequeueing) + +You can't (robustly) :-) However, you can set up your own atomic counter and +poll that instead (see the game loop example). If you're satisfied with merely an estimate, you can use +`size_approx()`. Note that `size_approx()` may return 0 even if the queue is +not completely empty, unless the queue has already stabilized first (no threads +are enqueueing or dequeueing, and all memory effects of any previous operations +have been propagated to the thread before it calls `size_approx()`). diff --git a/third-party/libftdi/.gitignore b/third-party/libftdi/.gitignore new file mode 100644 index 0000000..d05881d --- /dev/null +++ b/third-party/libftdi/.gitignore @@ -0,0 +1,53 @@ +# Normal stuff +*.o +*.a +*.so +*.lo +*.la +*.pc +.deps/ +.libs/ +.kdev4/ +build/ + +# kdevelop +*.kdevelop.pcs +*.kdevses + +# Doxygen documentation +Doxyfile +Doxyfile.xml +doc/Doxyfile +doc/html +doc/man +doc/xml + +# examples +examples/baud_test +examples/bitbang +examples/bitbang2 +examples/bitbang_cbus +examples/bitbang_ft2232 +examples/find_all +examples/find_all_pp +examples/serial_test +examples/simple + +# Backup files and stuff from patches +*.orig +*.rej +*~ +.*.swp + +# libftdi specific +libftdi1-config +libftdi1.spec + +# CMake +CMakeCache.txt +cmake_install.cmake +CMakeFiles + +# Misc. binaries +*.dylib +opt diff --git a/third-party/libftdi/AUTHORS b/third-party/libftdi/AUTHORS new file mode 100644 index 0000000..43e5218 --- /dev/null +++ b/third-party/libftdi/AUTHORS @@ -0,0 +1,79 @@ +Main developers: + + Intra2net AG + +Contributors in alphabetical order, +see Changelog for full details: + + Adam Malinowski + Alain Abbas + Alexander Lehmann + Alex Harford + Anders Larsen + Andrei Errapart + Andrew John Rogers + Arnim Läuger + Aurelien Jarno + Benjamin Vanheuverzwijn + Chris Morgan + Chris Zeh + Clifford Wolf + Daniel Kirkham + David Challis + Davide Michelizza + Denis Sirotkin + Emil + Eric Schott + Eugene Hutorny + Evan Nemerson + Evgeny Sinelnikov + Fahrzin Hemmati + Flynn Marquardt + Forest Crossman + Ian Abbott + Jared Boone + Jarkko Sonninen + Jean-Daniel Merkli + Jochen Sprickerhof + Joe Zbiciak + Jon Beniston + Juergen Beisert + Lorenz Moesenlechner + Marek Vavruša + Marius Kintel + Mark Hämmerling + Matthias Janke + Matthias Kranz + Matthias Richter + Matthijs ten Berge + Max + Maxwell Dreytser + Michel Zou + Mike Frysinger + Nathael Pajani + Nathan Fraser + Oleg Seiljus + Paul Fertser + Peter Holik + Raphael Assenat + Robert Cox + Robin Haberkorn + Rodney Sinclair + Rogier Wolff + Rolf Fiedler + Salvador Eduardo Tropea + Stephan Linz + Steven Turner + Tarek Heiland + Thilo Schulz + Thimo Eichstaedt + Thomas Fischl + Thomas Klose + Tim Ansell + Tom Saunders + Uwe Bonnes + Vladimir Yakovlev + Wilfried Holzke + Xiaofan Chen + Yegor Yefremov + Yi-Shin Li diff --git a/third-party/libftdi/CMakeLists.txt b/third-party/libftdi/CMakeLists.txt new file mode 100644 index 0000000..b6ad430 --- /dev/null +++ b/third-party/libftdi/CMakeLists.txt @@ -0,0 +1,244 @@ +# Project +project(libftdi1) +set(MAJOR_VERSION 1) +set(MINOR_VERSION 4) +set(PACKAGE libftdi1) +set(VERSION_STRING ${MAJOR_VERSION}.${MINOR_VERSION}) +set(VERSION ${VERSION_STRING}) +set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) + +# CMake +if("${CMAKE_BUILD_TYPE}" STREQUAL "") + set(CMAKE_BUILD_TYPE RelWithDebInfo) +endif("${CMAKE_BUILD_TYPE}" STREQUAL "") +set(CMAKE_COLOR_MAKEFILE ON) +cmake_minimum_required(VERSION 2.6 FATAL_ERROR) + +add_definitions(-Wall) + +# Debug build +message("-- Build type: ${CMAKE_BUILD_TYPE}") +if(${CMAKE_BUILD_TYPE} STREQUAL Debug) + add_definitions(-DDEBUG) +endif(${CMAKE_BUILD_TYPE} STREQUAL Debug) + +# find libusb +find_package ( USB1 REQUIRED ) +include_directories ( ${LIBUSB_INCLUDE_DIR} ) + +# Find Boost (optional package) +find_package(Boost) + +# Set components +set(CPACK_COMPONENTS_ALL sharedlibs staticlibs headers) +set(CPACK_COMPONENT_SHAREDLIBS_DISPLAY_NAME "Shared libraries") +set(CPACK_COMPONENT_STATICLIBS_DISPLAY_NAME "Static libraries") +set(CPACK_COMPONENT_HEADERS_DISPLAY_NAME "C++ Headers") + +set(CPACK_COMPONENT_SHAREDLIBS_DESCRIPTION +"Shared library for general use.") +set(CPACK_COMPONENT_STATICLIBS_DESCRIPTION +"Static library, good if you want to embed libftdi1 in your application.") +set(CPACK_COMPONENT_HEADERS_DESCRIPTION +"C/C++ header files.") + +set(CPACK_COMPONENT_SHAREDLIBS_GROUP "Development") +set(CPACK_COMPONENT_STATICLIBS_GROUP "Development") +set(CPACK_COMPONENT_HEADERS_GROUP "Development") + +option ( STATICLIBS "Build static libraries" ON ) + +# guess LIB_SUFFIX, don't take debian multiarch into account +if ( NOT DEFINED LIB_SUFFIX ) + if( CMAKE_SYSTEM_NAME MATCHES "Linux" + AND NOT CMAKE_CROSSCOMPILING + AND NOT EXISTS "/etc/debian_version" + AND NOT EXISTS "/etc/arch-release" ) + if ( "${CMAKE_SIZEOF_VOID_P}" EQUAL "8" ) + set ( LIB_SUFFIX 64 ) + endif () + endif () +endif () + +if(NOT APPLE) + if(CMAKE_SIZEOF_VOID_P EQUAL 4) + SET(PACK_ARCH "") + else(CMAKE_SIZEOF_VOID_P EQUAL 8) + SET(PACK_ARCH .x86_64) + endif(CMAKE_SIZEOF_VOID_P EQUAL 4) +else(NOT APPLE) + SET(PACK_ARCH "") +endif(NOT APPLE) + +# Package information +set(CPACK_PACKAGE_VERSION ${VERSION_STRING}) +set(CPACK_PACKAGE_CONTACT "Intra2net AG ") +set(CPACK_PACKAGE_DESCRIPTION "libftdi1 library.") +set(CPACK_PACKAGE_DESCRIPTION_SUMMARY ${CPACK_PACKAGE_DESCRIPTION} + ) +# Package settings +if ( UNIX ) + set(CPACK_GENERATOR "DEB;RPM") + set(CPACK_CMAKE_GENERATOR "Unix Makefiles") + set(CPACK_PACKAGE_NAME ${PROJECT_NAME}) + set(CPACK_PACKAGE_FILE_NAME ${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}${PACK_ARCH}) +endif () + +if ( WIN32 ) + set ( CPACK_GENERATOR "NSIS" ) + set ( CPACK_CMAKE_GENERATOR "MinGW Makefiles" ) + set ( CPACK_PACKAGE_NAME "${PROJECT_NAME}" ) + set ( CPACK_PACKAGE_VENDOR "" ) + set ( CPACK_PACKAGE_INSTALL_DIRECTORY "libftdi1" ) + set ( CPACK_PACKAGE_FILE_NAME "${PROJECT_NAME}-${VERSION_STRING}-win32") + set ( CPACK_NSIS_DISPLAY_NAME "libftdi1" ) + set ( CPACK_NSIS_MODIFY_PATH ON ) +endif () + +set(CPACK_RESOURCE_FILE_LICENSE ${PROJECT_SOURCE_DIR}/LICENSE) + +set(CPACK_SOURCE_GENERATOR TGZ) +set(CPACK_SOURCE_IGNORE_FILES "\\\\.git;~$;build/") +set(CPACK_SOURCE_PACKAGE_FILE_NAME ${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}) + +# Subdirectories +if ( UNIX ) + set ( CPACK_SET_DESTDIR ON ) +endif () + +# "make dist" target +set(ARCHIVE_NAME ${CMAKE_PROJECT_NAME}-${VERSION_STRING}) +add_custom_target(dist + COMMAND git archive --prefix=${ARCHIVE_NAME}/ HEAD + | bzip2 > ${CMAKE_BINARY_DIR}/${ARCHIVE_NAME}.tar.bz2 + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) + +# Tests +option ( BUILD_TESTS "Build unit tests with Boost Unit Test framework" ON ) + +# Documentation +option ( DOCUMENTATION "Generate API documentation with Doxygen" ON ) + + +find_package ( Doxygen ) +if ( DOCUMENTATION AND DOXYGEN_FOUND ) + + # Find doxy config + message(STATUS "Doxygen found.") + + # Copy doxy.config.in + set(top_srcdir ${PROJECT_SOURCE_DIR}) + configure_file(${PROJECT_SOURCE_DIR}/doc/Doxyfile.in ${CMAKE_BINARY_DIR}/Doxyfile ) + configure_file(${PROJECT_SOURCE_DIR}/doc/Doxyfile.xml.in ${CMAKE_BINARY_DIR}/Doxyfile.xml ) + + # Run doxygen + add_custom_command( + OUTPUT ${CMAKE_BINARY_DIR}/doc/html/index.html + COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_BINARY_DIR}/doc + COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_BINARY_DIR}/Doxyfile + DEPENDS ${c_headers};${c_sources};${cpp_sources};${cpp_headers} + ) + + add_custom_target(docs ALL DEPENDS ${CMAKE_BINARY_DIR}/doc/html/index.html) + + message(STATUS "Generating API documentation with Doxygen") +else(DOCUMENTATION AND DOXYGEN_FOUND) + message(STATUS "Not generating API documentation") +endif(DOCUMENTATION AND DOXYGEN_FOUND) + +add_subdirectory(src) +add_subdirectory(ftdipp) +add_subdirectory(python) +add_subdirectory(ftdi_eeprom) +add_subdirectory(examples) +add_subdirectory(packages) +add_subdirectory(test) + +# PkgConfig +set(prefix ${CMAKE_INSTALL_PREFIX}) +set(exec_prefix ${CMAKE_INSTALL_PREFIX}/bin) +set(includedir ${CMAKE_INSTALL_PREFIX}/include/${PROJECT_NAME}) + +if(${UNIX}) + set(libdir ${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}) +endif(${UNIX}) +if(${WIN32}) + set(libdir ${CMAKE_INSTALL_PREFIX}/bin) +endif(${WIN32}) + +configure_file(${PROJECT_SOURCE_DIR}/libftdi1.spec.in ${CMAKE_BINARY_DIR}/libftdi1.spec @ONLY) +configure_file(${PROJECT_SOURCE_DIR}/libftdi1.pc.in ${CMAKE_BINARY_DIR}/libftdi1.pc @ONLY) +configure_file(${PROJECT_SOURCE_DIR}/libftdipp1.pc.in ${CMAKE_BINARY_DIR}/libftdipp1.pc @ONLY) +install(FILES ${CMAKE_BINARY_DIR}/libftdi1.pc ${CMAKE_BINARY_DIR}/libftdipp1.pc + DESTINATION lib${LIB_SUFFIX}/pkgconfig) + +if (UNIX OR MINGW) + configure_file ( libftdi1-config.in ${CMAKE_CURRENT_BINARY_DIR}/libftdi1-config @ONLY ) + install ( PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/libftdi1-config + DESTINATION bin ) +endif () + +# config script install path +if ( NOT DEFINED LIBFTDI_CMAKE_CONFIG_DIR ) + set ( LIBFTDI_CMAKE_CONFIG_DIR lib${LIB_SUFFIX}/cmake/libftdi1 ) +endif () + +set ( LIBFTDI_INCLUDE_DIR ${includedir} ) +set ( LIBFTDI_INCLUDE_DIRS ${LIBFTDI_INCLUDE_DIR} ) +set ( LIBFTDI_LIBRARY ftdi1 ) +set ( LIBFTDI_LIBRARIES ${LIBFTDI_LIBRARY} ) +list ( APPEND LIBFTDI_LIBRARIES ${LIBUSB_LIBRARIES} ) +set ( LIBFTDI_STATIC_LIBRARY ftdi1.a ) +set ( LIBFTDI_STATIC_LIBRARIES ${LIBFTDI_STATIC_LIBRARY} ) +list ( APPEND LIBFTDI_STATIC_LIBRARIES ${LIBUSB_LIBRARIES} ) +if (FTDI_BUILD_CPP) + set ( LIBFTDIPP_LIBRARY ftdipp1 ) + set ( LIBFTDIPP_LIBRARIES ${LIBFTDIPP_LIBRARY} ) + list ( APPEND LIBFTDIPP_LIBRARIES ${LIBUSB_LIBRARIES} ) +endif () +set ( LIBFTDI_LIBRARY_DIRS ${libdir} ) +set ( LIBFTDI_ROOT_DIR ${prefix} ) +set ( LIBFTDI_VERSION_STRING ${VERSION_STRING} ) +set ( LIBFTDI_VERSION_MAJOR ${MAJOR_VERSION} ) +set ( LIBFTDI_VERSION_MINOR ${MINOR_VERSION} ) + +set ( LIBFTDI_USE_FILE ${CMAKE_INSTALL_PREFIX}/${LIBFTDI_CMAKE_CONFIG_DIR}/UseLibFTDI1.cmake ) + +if(CMAKE_VERSION VERSION_LESS 2.8.8) + configure_file ( cmake/LibFTDI1Config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/LibFTDI1Config.cmake @ONLY ) + configure_file ( cmake/LibFTDI1ConfigVersion.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/LibFTDI1ConfigVersion.cmake @ONLY ) +else () + include(CMakePackageConfigHelpers) + + configure_package_config_file ( + cmake/LibFTDI1Config.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/LibFTDI1Config.cmake + INSTALL_DESTINATION ${LIBFTDI_CMAKE_CONFIG_DIR} + PATH_VARS + LIBFTDI_USE_FILE + LIBFTDI_ROOT_DIR + LIBFTDI_INCLUDE_DIR + LIBFTDI_INCLUDE_DIRS + LIBFTDI_LIBRARY_DIRS + NO_CHECK_REQUIRED_COMPONENTS_MACRO + ) + write_basic_package_version_file ( + LibFTDI1ConfigVersion.cmake + VERSION ${LIBFTDI_VERSION_STRING} + COMPATIBILITY AnyNewerVersion + ) +endif () + + +install ( FILES + ${CMAKE_CURRENT_BINARY_DIR}/LibFTDI1Config.cmake + ${CMAKE_CURRENT_BINARY_DIR}/LibFTDI1ConfigVersion.cmake + cmake/UseLibFTDI1.cmake + + DESTINATION ${LIBFTDI_CMAKE_CONFIG_DIR} + ) + + + + +include(CPack) diff --git a/third-party/libftdi/COPYING-CMAKE-SCRIPTS b/third-party/libftdi/COPYING-CMAKE-SCRIPTS new file mode 100644 index 0000000..4b41776 --- /dev/null +++ b/third-party/libftdi/COPYING-CMAKE-SCRIPTS @@ -0,0 +1,22 @@ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third-party/libftdi/COPYING.GPL b/third-party/libftdi/COPYING.GPL new file mode 100644 index 0000000..d511905 --- /dev/null +++ b/third-party/libftdi/COPYING.GPL @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/third-party/libftdi/COPYING.LIB b/third-party/libftdi/COPYING.LIB new file mode 100644 index 0000000..5bc8fb2 --- /dev/null +++ b/third-party/libftdi/COPYING.LIB @@ -0,0 +1,481 @@ + GNU LIBRARY GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the library GPL. It is + numbered 2 because it goes with version 2 of the ordinary GPL.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Library General Public License, applies to some +specially designated Free Software Foundation software, and to any +other libraries whose authors decide to use it. You can use it for +your libraries, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if +you distribute copies of the library, or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link a program with the library, you must provide +complete object files to the recipients so that they can relink them +with the library, after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + Our method of protecting your rights has two steps: (1) copyright +the library, and (2) offer you this license which gives you legal +permission to copy, distribute and/or modify the library. + + Also, for each distributor's protection, we want to make certain +that everyone understands that there is no warranty for this free +library. If the library is modified by someone else and passed on, we +want its recipients to know that what they have is not the original +version, so that any problems introduced by others will not reflect on +the original authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that companies distributing free +software will individually obtain patent licenses, thus in effect +transforming the program into proprietary software. To prevent this, +we have made it clear that any patent must be licensed for everyone's +free use or not licensed at all. + + Most GNU software, including some libraries, is covered by the ordinary +GNU General Public License, which was designed for utility programs. This +license, the GNU Library General Public License, applies to certain +designated libraries. This license is quite different from the ordinary +one; be sure to read it in full, and don't assume that anything in it is +the same as in the ordinary license. + + The reason we have a separate public license for some libraries is that +they blur the distinction we usually make between modifying or adding to a +program and simply using it. Linking a program with a library, without +changing the library, is in some sense simply using the library, and is +analogous to running a utility program or application program. However, in +a textual and legal sense, the linked executable is a combined work, a +derivative of the original library, and the ordinary General Public License +treats it as such. + + Because of this blurred distinction, using the ordinary General +Public License for libraries did not effectively promote software +sharing, because most developers did not use the libraries. We +concluded that weaker conditions might promote sharing better. + + However, unrestricted linking of non-free programs would deprive the +users of those programs of all benefit from the free status of the +libraries themselves. This Library General Public License is intended to +permit developers of non-free programs to use free libraries, while +preserving your freedom as a user of such programs to change the free +libraries that are incorporated in them. (We have not seen how to achieve +this as regards changes in header files, but we have achieved it as regards +changes in the actual functions of the Library.) The hope is that this +will lead to faster development of free libraries. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, while the latter only +works together with the library. + + Note that it is possible for a library to be covered by the ordinary +General Public License rather than by this special one. + + GNU LIBRARY GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library which +contains a notice placed by the copyright holder or other authorized +party saying it may be distributed under the terms of this Library +General Public License (also called "this License"). Each licensee is +addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also compile or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + c) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + d) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the source code distributed need not include anything that is normally +distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Library General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/third-party/libftdi/ChangeLog b/third-party/libftdi/ChangeLog new file mode 100644 index 0000000..04acd31 --- /dev/null +++ b/third-party/libftdi/ChangeLog @@ -0,0 +1,251 @@ +New in 1.4 - 2017-08-07 +----------------------- +* New ftdi_usb_open_bus_addr() open function +* Use BM/R series baud rate computation for FT230X +* ftdi_get_error_string() now returns const char* +* C++ API: Ability to open devices with empty descriptor strings +* C++ API: Fix enumerations for buffer purge and modem controls +* small build fixes and improvements in the python examples +* ftdi_eeprom / eeprom handling: + * New API function: ftdi_eeprom_get_strings() + * Fix USE_SERIAL handling for 230X type chips + * Make ftdi_read_eeprom_location() endianness independent + * Fix flashing of FT245R + +New in 1.3 - 2016-05-20 +----------------------- +* Added ftdi_usb_get_strings2() to prevent automatic device close (Fahrzin Hemmati) +* Added ftdi_transfer_data_cancel() for cancellation of a submitted transfer, + avoided resubmittion of a canceled transfer in the callbacks, + replaced calls to libusb_handle_events with + libusb_handle_events_timeout_completed (Eugene Hutorny) +* ftdi_eeprom / eeprom handling: + * Add support for arbitrary user data (Salvador Eduardo Tropea) + * Add --build-eeprom support (Salvador Eduardo Tropea) + * Fix use_usb_version config file option (Thilo Schulz) + * Ability to include other config files in EEPROM config file (Thilo Schulz) + * Add external oscillator enable bit (Raphael Assenat) + * Support channel configuration (Stephan Linz) + * Added --device option to ftdi_eeprom to specify FTDI device (Robin Haberkorn) + * Fixed EEPROM user-area space checks for FT232R and FT245R chips (Robin Haberkorn) +* Various improvements to CBUS handling, including the EEPROM (Robin Haberkorn) +* swig wrapper: Fix handling of binary strings in ftdi_write_data() + for python 3 (xantares09) +* cbus python example code (Rodney Sinclair) +* ftdi_stream: fix timeout setting (Ларионов Даниил) +* Fixed typo in CBUS defines: CBUSG_DRIVE1 -> CBUSH_DRIVE1 + +New in 1.2 - 2014-11-21 +----------------------- +* Support for FT230X devices (Uwe Bonnes) +* ftdi_usb_get_strings(): Don't try to open an already open device (Denis Sirotkin) +* Support for finding devices bricked by the Windows driver (Forest Crossman) +* cmake build system: New LibFTDI1ConfigVersion.cmake file (xantares09) +* Fix a typo in the MPSSE command CLK_BYTES_OR_LOW (Benjamin Vanheuverzwijn) +* Minor fixes for MSVC++ (Andrei Errapart) +* Various small code improvements (Florian Preinstorfer, Jochen Sprickerhof, xantares09) + +New in 1.1 - 2014-02-05 +----------------------- +* Fix FT232H eeprom suspend pulldown setting (Davide Michelizza) +* Fix FT232H eeprom user area size (Davide Michelizza) +* Improved mingw build (Paul Fertser and Michel Zou) +* C++ wrapper: Get/set functions for USB timeouts (Jochen Sprickerhof) +* Partial support for FT230X (Nathael Pajani) +* New API function: ftdi_eeprom_set_strings() (Nathael Pajani) +* Prevent possible segfault in ftdi_eeprom_decode() (Nathael Pajani) +* Save device release number in eeprom (Jarkko Sonninen) +* Fix "self powered" eeprom flag (Jarkko Sonninen) +* Improved python wrapper (Michel Zou) +* Many buildsystem improvements (Michel Zou and Mike Frysinger) +* See the git history for more changes and fixes + +New in 1.0 - 2013-01-29 +----------------------- +* Ported to libusb 1.x (initial work by Jie Zhang) +* Many eeprom handling improvements (Uwe Bonnes, Anders Larsen) +* Renamed pkconfig, library .so etc. files to "libftdi1" (Intra2net) +* ftdi_eeprom is part of libftdi now (Intra2net) + +* New baudrate calculation code + unit tests (Uwe Bonnes and Intra2net) +* Improved python bindings including python3 support (Michel Zou) +* Switched completely to cmake build system (Intra2net) +* cmake: Easy libftdi discovery via find_package() (Michel Zou) +* eeprom handling now done via get()/set() functions (Uwe Bonnes) +* C++ wrapper: Fixed use-after-free in List::find_all() (Intra2net) +* Documentation updates (Xiaofan Chen) +* See the git history for more changes and fixes + +New in 0.20 - 2012-03-19 +------------------------ +* Support for FT232H (Uwe Bonnes) +* Fixed install location of header files (Uwe Bonnes and Intra2net) +* Backported serial_test tool from libftdi 1.x (Uwe Bonnes) + +New in 0.19 - 2011-05-23 +------------------------ +* Make kernel driver detach configurable (Thomas Klose) +* Correct ftdi_poll_modem_status() result code (Tom Saunders) +* cmake build system improvements (Evgeny Sinelnikov) +* Fix uninitialized memory access in async mode (Intra2net) +* Support for FT232R eeprom features (Hermann Kraus) +* Fix size returned by ftdi_read_data (Hermann Kraus) +* C++ wrapper: Fix infinite recursion in set_bitmode (Intra2net) +* Improvements to the python wrapper (Flynn Marquardt and Chris Zeh) + +New in 0.18 - 2010-06-25 +------------------------ +* Add ftdi_eeprom_free() to free allocated memory in eeprom (Wilfried Holzke) +* More generic error message for the FTDI kernel driver (Intra2net) +* Honor CPPFLAGS in python wrapper build (Alexander Lehmann) +* cmake: Fix package creation on 32-bit machines (Uwe Bonnes) +* Fix swig argument constraints (Intra2net) +* Don't segfault if device is closed or ftdi context is invalid (Intra2net) +* Ability to disable build of examples / documentation (Mike Frysinger and Intra2net) +* Fix typo in python wrapper build (Mike Frysinger) +* Autoconf build system improvements (Mike Frysinger) + +New in 0.17 - 2009-12-19 +------------------------ +* C++ wrapper: Reduced code duplication and small other changes (Intra2net) +* Deprecated old ftdi_enable_bitbang() function (Intra2net) +* New ftdi_usb_open_desc_index() function (Intra2net) +* Added baud rate test example code (Intra2net) +* New serial input example code (Jim Paris) +* Fix modem status byte filtering for USB high speed chips (Intra2net and Jim Paris) +* Add bitmode for synchronous fifo in FT2232H (Uwe Bonnes) +* Fix usb_set_configuration() call on Windows 64 (NIL) +* Fix usb index in ftdi_convert_baudrate() for FT2232H/FT4232H chips (Thimo Eichstaedt) +* Set initial baudrate on correct interface instead of always the first one (Thimo Eichstaedt) +* Call usb_set_configuration() on Windows only (Uwe Bonnes) +* 64 bit and other buildsystem fixes (Uwe Bonnes) +* Don't build --with-async-mode w/ libusb-compat-0.1 (Clifford Wolf) +* Functions for read/write of a single eeprom location (Oleg Seiljus) +* Protect against double close of usb device (Nathan Fraser) +* Fix out-of-tree-build in python wrapper (Aurelien Jarno) +* Autoconf and doxygen cleanup (Jim Paris) + +New in 0.16 - 2009-05-08 +------------------------ +* C++ wrapper: Reopen the device after calling get_strings() in Context::open() (Marek Vavruša and Intra2net) +* C++ wrapper: Fixed an inheritance problem (Marek Vavruša and Intra2net) +* C++ wrapper: Relicensed under GPLv2 + linking exception (Marek Vavruša and Intra2net) +* Support for FT2232H and FT4232H (David Challis, Alex Harford and Intra2net) +* Support for mingw cross compile (Uwe Bonnes) +* Python bindings and minor autoconf cleanup (Tarek Heiland) +* Code cleanup in various places (Intra2net) +* Fixed ftdi_read_chipid in some cases (Matthias Richter) +* eeprom decode function and small cleanups (Marius Kintel) +* cmake system improvements (Marius Kintel and Intra2net) +* Fix compilation in -ansi -pedantic mode (Matthias Janke) + +New in 0.15 - 2008-12-19 +------------------------ +* Full C++ wrapper. Needs boost (Marek Vavruša and Intra2net) +* cmake rules (Marek Vavruša) + +New in 0.14 - 2008-09-09 +------------------------ +* Fixed flow control code for second FT2232 interface (Marek Vavruša) +* Ability to set flow control via one USB call (Marek Vavruša) +* 64 bit build support in the RPM spec file (Uwe Bonnes) +* Small fix to the RPM spec file (Uwe Bonnes) +* Ability to set RS232 break type (Intra2net) +* Grouped flow control and modem status code together (Intra2net) + +New in 0.13 - 2008-06-13 +------------------------ +* Build .spec file via configure.in (Intra2net) +* Fixed "libusb-config --cflags" call (Mike Frysinger and Intra2net) +* Always set usb configuration (Mike Frysinger and Intra2net) +* Improved libusb-win32 support (Mike Frysinger) + +New in 0.12 - 2008-04-16 +------------------------ +* Fix build of documentation for "out of tree" builds +* Fix USB config descriptor in the eeprom (Juergen Beisert) +* Ability to purge RX/TX buffers separately (Arnim Läuger) +* Setting of event and error character (Arnim Läuger) +* Poll modem status function (Arnim Läuger and Intra2net) +* Updated documentation and created AUTHORS file + +New in 0.11 - 2008-03-01 +------------------------ +* Vala bindings helper functions (ftdi_new, ftdi_free, ftdi_list_free2) (Even Nermerson) +* Support for different EEPROM sizes (Andrew John Rogers, andrew@rogerstech.co.uk) +* Async write support. Linux only and no error handling. + You have to enable it via --with-async-mode. +* Detection of R-type chips +* FTDIChip-ID read support (Peter Holik) + +New in 0.10 - 2007-05-08 +------------------------ +* Examples for libftdi_usb_find_all and CBUS mode +* Fixed ftdi_list_free +* Small cosmetic changes + +New in 0.9 - 2007-02-09 +----------------------- +* Fixed build without doxygen +* Correct .so file library version + +New in 0.8 - 2007-02-08 +----------------------- +* Complete doxygen documentation and examples +* Extended FT2232C bitbang mode example code (Max) +* ftdi_usb_get_strings function to get device ID strings (Matthijs ten Berge) +* Fix ftdi_read_pins on PowerPC systems (Thomas Fischl) +* Automatically detach ftdi_sio kernel driver (Uwe Bonnes and Intra2net) +* Configurable flow control (Lorenz Moesenlechner and Matthias Kranz) + +New in 0.7 - 2005-10-11 +----------------------- +* Baudrate calculation fix for FT2232C (Steven Turner/FTDI) +* Find all devices by vendor/product id (Tim Ansell and Intra2net) +* Documentation updates (Tim Ansell) + +New in 0.6 - 2005-04-24 +----------------------- +* Set library version on .so file again +* Configurable serial line parameters (Alain Abbas) +* Improved filtering of status bytes (Evgeny Sinelnikov) +* Extended FT2232C support (Uwe Bonnes) +* Small improvement to the baudrate calculation code (Emil) +* Error handling cleanup (Rogier Wolff and Intra2net) + +New in 0.5 - 2004-09-24 +----------------------- +* New autoconf suite +* pkgconfig support +* Status byte filtering now works for "big" readbuffer sizes (Thanks Evgeny!) +* Open device by description and/or serial (Evgeny Sinelnikov) +* Improved error handling (Evgeny Sinelnikov) + +New in 0.4 - 2004-06-15 +----------------------- +* Fixed filtering of status bytes (Readbuffer size is now 64 bytes) +* FT2232C support (Steven Turner/FTDI) +* New baudrate calculation code (Ian Abbott) +* Automatic detection of chip type +* Important: ftdi_write_data now returns the bytes written +* Fixed defaults values in ftdi_eeprom_initdefaults (Jean-Daniel Merkli) +* Reset internal readbuffer offsets for reset()/purge_buffers() +* Small typo fixes (Mark Haemmerling) + +New in 0.3 - 2004-03-25 +----------------------- +* Improved read function which takes arbitrary input buffer sizes + Attention: Call ftdi_deinit() on exit to free used memory +* Vastly increased read/write performance (configurable chunksize, default is 4096) +* Set/get latency timer function working (Thanks Steven Turner/FTDI) +* Increased library version because the changes require recompilation + +New in 0.2 - 2004-01-03 +----------------------- +* EEPROM build fix by Daniel Kirkham (Melbourne, Australia) +* Implemented basic ftdi_read_data() function +* EEPROM write fixes + +New in 0.1 - 2003-06-10 +----------------------- +* First public release diff --git a/third-party/libftdi/FindUSB1.cmake b/third-party/libftdi/FindUSB1.cmake new file mode 100644 index 0000000..ebcac99 --- /dev/null +++ b/third-party/libftdi/FindUSB1.cmake @@ -0,0 +1,38 @@ +# - Try to find the freetype library +# Once done this defines +# +# LIBUSB_FOUND - system has libusb +# LIBUSB_INCLUDE_DIR - the libusb include directory +# LIBUSB_LIBRARIES - Link these to use libusb + +# Copyright (c) 2006, 2008 Laurent Montel, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + + +if (LIBUSB_INCLUDE_DIR AND LIBUSB_LIBRARIES) + + # in cache already + set(LIBUSB_FOUND TRUE) + +else (LIBUSB_INCLUDE_DIR AND LIBUSB_LIBRARIES) + IF (NOT WIN32) + # use pkg-config to get the directories and then use these values + # in the FIND_PATH() and FIND_LIBRARY() calls + find_package(PkgConfig) + pkg_check_modules(PC_LIBUSB libusb-1.0) + ENDIF(NOT WIN32) + + FIND_PATH(LIBUSB_INCLUDE_DIR libusb.h + PATHS ${PC_LIBUSB_INCLUDEDIR} ${PC_LIBUSB_INCLUDE_DIRS}) + + FIND_LIBRARY(LIBUSB_LIBRARIES NAMES usb-1.0 + PATHS ${PC_LIBUSB_LIBDIR} ${PC_LIBUSB_LIBRARY_DIRS}) + + include(FindPackageHandleStandardArgs) + FIND_PACKAGE_HANDLE_STANDARD_ARGS(LIBUSB DEFAULT_MSG LIBUSB_LIBRARIES LIBUSB_INCLUDE_DIR) + + MARK_AS_ADVANCED(LIBUSB_INCLUDE_DIR LIBUSB_LIBRARIES) + +endif (LIBUSB_INCLUDE_DIR AND LIBUSB_LIBRARIES) diff --git a/third-party/libftdi/LICENSE b/third-party/libftdi/LICENSE new file mode 100644 index 0000000..f1831c0 --- /dev/null +++ b/third-party/libftdi/LICENSE @@ -0,0 +1,25 @@ +The C library "libftdi1" is distributed under the +GNU Library General Public License version 2. + +A copy of the GNU Library General Public License (LGPL) is included +in this distribution, in the file COPYING.LIB. + +---------------------------------------------------------------------- + +The C++ wrapper "ftdipp1" is distributed under the GNU General +Public License version 2 (with a special exception described below). + +A copy of the GNU General Public License (GPL) is included +in this distribution, in the file COPYING.GPL. + +As a special exception, if other files instantiate templates or use macros +or inline functions from this file, or you compile this file and link it +with other works to produce a work based on this file, this file +does not by itself cause the resulting work to be covered +by the GNU General Public License. + +However the source code for this file must still be made available +in accordance with section (3) of the GNU General Public License. + +This exception does not invalidate any other reasons why a work based +on this file might be covered by the GNU General Public License. diff --git a/third-party/libftdi/README b/third-party/libftdi/README new file mode 100644 index 0000000..072b671 --- /dev/null +++ b/third-party/libftdi/README @@ -0,0 +1,52 @@ +-------------------------------------------------------------------- +libftdi version 1.4 +-------------------------------------------------------------------- + +libftdi - A library (using libusb) to talk to FTDI's UART/FIFO chips +including the popular bitbang mode. + +The following chips are supported: +* FT230X +- FT4232H / FT2232H +- FT232R / FT245R +- FT2232L / FT2232D / FT2232C +- FT232BM / FT245BM (and the BL/BQ variants) +- FT8U232AM / FT8U245AM + +libftdi requires libusb 1.x. + +The AUTHORS file contains a list of all the people +that made libftdi possible what it is today. + +Changes +------- +* New ftdi_usb_open_bus_addr() open function +* Use BM/R series baud rate computation for FT230X +* ftdi_get_error_string() now returns const char* +* C++ API: Ability to open devices with empty descriptor strings +* C++ API: Fix enumerations for buffer purge and modem controls +* small build fixes and improvements in the python examples +* ftdi_eeprom / eeprom handling: + * New API function: ftdi_eeprom_get_strings() + * Fix USE_SERIAL handling for 230X type chips + * Make ftdi_read_eeprom_location() endianness independent + * Fix flashing of FT245R + +You'll find the newest version of libftdi at: +https://www.intra2net.com/en/developer/libftdi + + +Quick start +----------- +mkdir build +cd build + +cmake -DCMAKE_INSTALL_PREFIX="/usr" ../ +make +make install + +More verbose build instructions are in "README.build" + +-------------------------------------------------------------------- +www.intra2net.com 2003-2017 Intra2net AG +-------------------------------------------------------------------- diff --git a/third-party/libftdi/README.build b/third-party/libftdi/README.build new file mode 100644 index 0000000..e130ca0 --- /dev/null +++ b/third-party/libftdi/README.build @@ -0,0 +1,96 @@ +Here is a short tutorial on how to build libftdi git under +Ubuntu 12.10, But it is similar on other Linux distros. + +1) Install the build tools +sudo apt-get install build-essential (yum install make automake gcc gcc-c++ kernel-devel) +sudo apt-get install git-core (yum install git) +sudo apt-get install cmake (yum install cmake) +sudo apt-get install doxygen (for building documentations) (yum install doxygen) + +2) Install dependencies +sudo apt-get install libusb-1.0-devel (yum install libusb-devel) +(if the system comes with older version like 1.0.8 or +earlier, it is recommended you build libusbx-1.0.14 or later). + +sudo apt-get install libconfuse-dev (for ftdi-eeprom) (yum install libconfuse-devel) +sudo apt-get install swig python-dev (for python bindings) (yum install swig python-devel) +sudo apt-get install libboost-all-dev (for C++ binding and unit test) (yum install boost-devel) + +3) Clone the git repository +mkdir libftdi +cd libftdi +git clone git://developer.intra2net.com/libftdi + +If you are building the release tar ball, just extract the source +tar ball. + +4) Build the git source and install +cd libftdi +mkdir build +cd build +cmake -DCMAKE_INSTALL_PREFIX="/usr" ../ +make +sudo make install + +5) carry out some tests +cd examples + +mcuee@Ubuntu1210VM:~/Desktop/build/libftdi/libftdi/build/examples$ +./find_all_pp -v 0x0403 -p 0x6001 +Found devices ( VID: 0x403, PID: 0x6001 ) +------------------------------------------------ +FTDI (0x8730800): ftdi, usb serial converter, ftDEH51S (Open OK) +FTDI (0x8730918): FTDI, FT232R USB UART, A8007Ub5 (Open OK) + +mcuee@Ubuntu1210VM:~/Desktop/build/libftdi/libftdi/build/examples$ ./eeprom +2 FTDI devices found: Only Readout on EEPROM done. Use +VID/PID/desc/serial to select device +Decoded values of device 1: +Chip type 1 ftdi_eeprom_size: 128 +0x000: 00 00 03 04 01 60 00 04 a0 16 08 00 10 01 94 0a .....`.. ........ +0x010: 9e 2a c8 12 0a 03 66 00 74 00 64 00 69 00 2a 03 .*....f. t.d.i.*. +0x020: 75 00 73 00 62 00 20 00 73 00 65 00 72 00 69 00 u.s.b. . s.e.r.i. +0x030: 61 00 6c 00 20 00 63 00 6f 00 6e 00 76 00 65 00 a.l. .c. o.n.v.e. +0x040: 72 00 74 00 65 00 72 00 12 03 66 00 74 00 44 00 r.t.e.r. ..f.t.D. +0x050: 45 00 48 00 35 00 31 00 53 00 02 03 00 00 00 00 E.H.5.1. S....... +0x060: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ........ ........ +0x070: 00 00 00 00 00 00 00 00 00 00 00 00 01 00 16 02 ........ ........ +VID: 0x0403 +PID: 0x6001 +Release: 0x0400 +Bus Powered: 44 mA USB Remote Wake Up +Manufacturer: ftdi +Product: usb serial converter +Serial: ftDEH51S +Checksum : 0216 +Enable Remote Wake Up +PNP: 1 +Decoded values of device 2: +Chip type 3 ftdi_eeprom_size: 128 +0x000: 00 40 03 04 01 60 00 00 a0 2d 08 00 00 00 98 0a .@...`.. .-...... +0x010: a2 20 c2 12 23 10 05 00 0a 03 46 00 54 00 44 00 . ..#... ..F.T.D. +0x020: 49 00 20 03 46 00 54 00 32 00 33 00 32 00 52 00 I. .F.T. 2.3.2.R. +0x030: 20 00 55 00 53 00 42 00 20 00 55 00 41 00 52 00 .U.S.B. .U.A.R. +0x040: 54 00 12 03 41 00 38 00 30 00 30 00 37 00 55 00 T...A.8. 0.0.7.U. +0x050: 62 00 35 00 c9 bf 1c 80 00 00 00 00 00 00 00 00 b.5..... ........ +0x060: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ........ ........ +0x070: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 0f 23 ........ .......# +0x080: 2c 04 d3 fb 00 00 c9 bf 1c 80 42 00 00 00 00 00 ,....... ..B..... +0x090: 00 00 00 00 00 00 00 00 38 41 32 52 4a 33 47 4f ........ 8A2RJ3GO +VID: 0x0403 +PID: 0x6001 +Release: 0x0000 +Bus Powered: 90 mA USB Remote Wake Up +Manufacturer: FTDI +Product: FT232R USB UART +Serial: A8007Ub5 +Checksum : 230f +Internal EEPROM +Enable Remote Wake Up +PNP: 1 +Channel A has Mode UART VCP +C0 Function: TXLED +C1 Function: RXLED +C2 Function: TXDEN +C3 Function: PWREN +C4 Function: SLEEP diff --git a/third-party/libftdi/README.mingw b/third-party/libftdi/README.mingw new file mode 100644 index 0000000..771204c --- /dev/null +++ b/third-party/libftdi/README.mingw @@ -0,0 +1,38 @@ +* How to cross compile libftdi-1.x for Windows? * + 1 - Prepare a pkg-config wrapper according to + https://www.flameeyes.eu/autotools-mythbuster/pkgconfig/cross-compiling.html , + additionally export PKG_CONFIG_ALLOW_SYSTEM_CFLAGS and + PKG_CONFIG_ALLOW_SYSTEM_LIBS. + 2 - Write a CMake toolchain file according to + http://www.vtk.org/Wiki/CmakeMingw . Change the path to your future sysroot. + 3 - Get libusb sources (either by cloning the git repo or by downloading a + tarball). Unpack, autogen.sh (when building from git), and configure like this: + ./configure --build=`./config.guess` --host=i686-w64-mingw32 \ + --prefix=/usr --with-sysroot=$HOME/i686-w64-mingw32-root/ + 4 - run + make install DESTDIR=$HOME/i686-w64-mingw32-root/ + 5 - go to libftdi-1.x source directory and run + cmake -DCMAKE_TOOLCHAIN_FILE=~/Toolchain-mingw.cmake \ + -DCMAKE_INSTALL_PREFIX="/usr" \ + -DPKG_CONFIG_EXECUTABLE=`which i686-w64-mingw32-pkg-config` + 6 - run + make install DESTDIR=$HOME/i686-w64-mingw32-root/ + +* How to run libftdi 1.x under Windows * + +On 26-Jan-2014, libusbx and libusb project were merged with the release +of libusb-1.0.18 and now the project is called libusb. + +libusb Windows backend will need to rely on a proper driver to run. +Please refer to the following wiki page for proper driver installation. +https://github.com/libusb/libusb/wiki/Windows#wiki-How_to_use_libusb_on_Windows + +As of 26-Jan-2014, libusb Windows backend supports WinUSB, +libusb0.sys and libusbk.sys driver. However, libusb's support of +libusb0.sys and libusbk.sys is considered to be less mature than +WinUSB. Therefore, WinUSB driver installation using Zadig +is recommended. + +Take note once you replace the original FTDI driver with WinUSB driver, +you can no longer use the functionality the original FTDI driver provides +(eg. Virtual Serial Port or D2XX). diff --git a/third-party/libftdi/TODO b/third-party/libftdi/TODO new file mode 100644 index 0000000..517178c --- /dev/null +++ b/third-party/libftdi/TODO @@ -0,0 +1,3 @@ +*** TODO for 1.0 release *** +Documentation: +- Document the new EEPROM function diff --git a/third-party/libftdi/cmake/FindConfuse.cmake b/third-party/libftdi/cmake/FindConfuse.cmake new file mode 100644 index 0000000..ab25eef --- /dev/null +++ b/third-party/libftdi/cmake/FindConfuse.cmake @@ -0,0 +1,74 @@ +# libConfuse is a configuration file parser library +# available at http://www.nongnu.org/confuse/ +# +# The module defines the following variables: +# CONFUSE_FOUND - the system has Confuse +# CONFUSE_INCLUDE_DIR - where to find confuse.h +# CONFUSE_INCLUDE_DIRS - confuse includes +# CONFUSE_LIBRARY - where to find the Confuse library +# CONFUSE_LIBRARIES - aditional libraries +# CONFUSE_ROOT_DIR - root dir (ex. /usr/local) + +#============================================================================= +# Copyright 2010-2013, Julien Schueller +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# The views and conclusions contained in the software and documentation are those +# of the authors and should not be interpreted as representing official policies, +# either expressed or implied, of the FreeBSD Project. +#============================================================================= + + +find_path ( CONFUSE_INCLUDE_DIR + NAMES confuse.h +) + +set ( CONFUSE_INCLUDE_DIRS ${CONFUSE_INCLUDE_DIR} ) + +find_library ( CONFUSE_LIBRARY + NAMES confuse +) + +set ( CONFUSE_LIBRARIES ${CONFUSE_LIBRARY} ) + + +# try to guess root dir from include dir +if ( CONFUSE_INCLUDE_DIR ) + string ( REGEX REPLACE "(.*)/include.*" "\\1" CONFUSE_ROOT_DIR ${CONFUSE_INCLUDE_DIR} ) +# try to guess root dir from library dir +elseif ( CONFUSE_LIBRARY ) + string ( REGEX REPLACE "(.*)/lib[/|32|64].*" "\\1" CONFUSE_ROOT_DIR ${CONFUSE_LIBRARY} ) +endif () + + +# handle the QUIETLY and REQUIRED arguments +include ( FindPackageHandleStandardArgs ) +find_package_handle_standard_args( Confuse DEFAULT_MSG CONFUSE_LIBRARY CONFUSE_INCLUDE_DIR ) + +mark_as_advanced ( + CONFUSE_LIBRARY + CONFUSE_LIBRARIES + CONFUSE_INCLUDE_DIR + CONFUSE_INCLUDE_DIRS + CONFUSE_ROOT_DIR +) diff --git a/third-party/libftdi/cmake/FindLibintl.cmake b/third-party/libftdi/cmake/FindLibintl.cmake new file mode 100644 index 0000000..65ed91c --- /dev/null +++ b/third-party/libftdi/cmake/FindLibintl.cmake @@ -0,0 +1,47 @@ +# Try to find Libintl functionality +# Once done this will define +# +# LIBINTL_FOUND - system has Libintl +# LIBINTL_INCLUDE_DIR - Libintl include directory +# LIBINTL_LIBRARIES - Libraries needed to use Libintl +# +# TODO: This will enable translations only if Gettext functionality is +# present in libc. Must have more robust system for release, where Gettext +# functionality can also reside in standalone Gettext library, or the one +# embedded within kdelibs (cf. gettext.m4 from Gettext source). + +# Copyright (c) 2006, Chusslove Illich, +# Copyright (c) 2007, Alexander Neundorf, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +if(LIBINTL_INCLUDE_DIR AND LIBINTL_LIB_FOUND) + set(Libintl_FIND_QUIETLY TRUE) +endif(LIBINTL_INCLUDE_DIR AND LIBINTL_LIB_FOUND) + +find_path(LIBINTL_INCLUDE_DIR libintl.h) + +set(LIBINTL_LIB_FOUND FALSE) + +if(LIBINTL_INCLUDE_DIR) + include(CheckFunctionExists) + check_function_exists(dgettext LIBINTL_LIBC_HAS_DGETTEXT) + + if (LIBINTL_LIBC_HAS_DGETTEXT) + set(LIBINTL_LIBRARIES) + set(LIBINTL_LIB_FOUND TRUE) + else (LIBINTL_LIBC_HAS_DGETTEXT) + find_library(LIBINTL_LIBRARIES NAMES intl libintl ) + if(LIBINTL_LIBRARIES) + set(LIBINTL_LIB_FOUND TRUE) + endif(LIBINTL_LIBRARIES) + endif (LIBINTL_LIBC_HAS_DGETTEXT) + +endif(LIBINTL_INCLUDE_DIR) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Libintl DEFAULT_MSG LIBINTL_INCLUDE_DIR LIBINTL_LIB_FOUND) + +mark_as_advanced(LIBINTL_INCLUDE_DIR LIBINTL_LIBRARIES LIBINTL_LIBC_HAS_DGETTEXT LIBINTL_LIB_FOUND) + diff --git a/third-party/libftdi/cmake/FindUSB1.cmake b/third-party/libftdi/cmake/FindUSB1.cmake new file mode 100644 index 0000000..b90e297 --- /dev/null +++ b/third-party/libftdi/cmake/FindUSB1.cmake @@ -0,0 +1,37 @@ +# - Try to find the freetype library +# Once done this defines +# +# LIBUSB_FOUND - system has libusb +# LIBUSB_INCLUDE_DIR - the libusb include directory +# LIBUSB_LIBRARIES - Link these to use libusb + +# Copyright (c) 2006, 2008 Laurent Montel, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + + +if (LIBUSB_INCLUDE_DIR AND LIBUSB_LIBRARIES) + + # in cache already + set(LIBUSB_FOUND TRUE) + +else (LIBUSB_INCLUDE_DIR AND LIBUSB_LIBRARIES) + # use pkg-config to get the directories and then use these values + # in the FIND_PATH() and FIND_LIBRARY() calls + find_package(PkgConfig) + pkg_check_modules(PC_LIBUSB libusb-1.0) + + FIND_PATH(LIBUSB_INCLUDE_DIR libusb.h + PATH_SUFFIXES libusb-1.0 + PATHS ${PC_LIBUSB_INCLUDEDIR} ${PC_LIBUSB_INCLUDE_DIRS}) + + FIND_LIBRARY(LIBUSB_LIBRARIES NAMES usb-1.0 + PATHS ${PC_LIBUSB_LIBDIR} ${PC_LIBUSB_LIBRARY_DIRS}) + + include(FindPackageHandleStandardArgs) + FIND_PACKAGE_HANDLE_STANDARD_ARGS(LIBUSB DEFAULT_MSG LIBUSB_LIBRARIES LIBUSB_INCLUDE_DIR) + + MARK_AS_ADVANCED(LIBUSB_INCLUDE_DIR LIBUSB_LIBRARIES) + +endif (LIBUSB_INCLUDE_DIR AND LIBUSB_LIBRARIES) diff --git a/third-party/libftdi/cmake/LibFTDI1Config.cmake.in b/third-party/libftdi/cmake/LibFTDI1Config.cmake.in new file mode 100644 index 0000000..ecc615e --- /dev/null +++ b/third-party/libftdi/cmake/LibFTDI1Config.cmake.in @@ -0,0 +1,53 @@ +# -*- cmake -*- +# +# LibFTDI1Config.cmake(.in) +# +# Copyright (C) 2013 Intra2net AG and the libftdi developers +# +# This file is part of LibFTDI. +# +# LibFTDI is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License +# version 2.1 as published by the Free Software Foundation; +# + +# Use the following variables to compile and link against LibFTDI: +# LIBFTDI_FOUND - True if LibFTDI was found on your system +# LIBFTDI_USE_FILE - The file making LibFTDI usable +# LIBFTDI_DEFINITIONS - Definitions needed to build with LibFTDI +# LIBFTDI_INCLUDE_DIRS - Directory where ftdi.h can be found +# LIBFTDI_INCLUDE_DIRS - List of directories of LibFTDI and it's dependencies +# LIBFTDI_LIBRARY - LibFTDI library location +# LIBFTDI_LIBRARIES - List of libraries to link against LibFTDI library +# LIBFTDIPP_LIBRARY - LibFTDI C++ wrapper library location +# LIBFTDIPP_LIBRARIES - List of libraries to link against LibFTDI C++ wrapper +# LIBFTDI_LIBRARY_DIRS - List of directories containing LibFTDI' libraries +# LIBFTDI_ROOT_DIR - The base directory of LibFTDI +# LIBFTDI_VERSION_STRING - A human-readable string containing the version +# LIBFTDI_VERSION_MAJOR - The major version of LibFTDI +# LIBFTDI_VERSION_MINOR - The minor version of LibFTDI +# LIBFTDI_VERSION_PATCH - The patch version of LibFTDI +# LIBFTDI_PYTHON_MODULE_PATH - Path to the python module + +set ( LIBFTDI_FOUND 1 ) +set ( LIBFTDI_USE_FILE "@LIBFTDI_USE_FILE@" ) + +set ( LIBFTDI_DEFINITIONS "@LIBFTDI_DEFINITIONS@" ) +set ( LIBFTDI_INCLUDE_DIR "@LIBFTDI_INCLUDE_DIR@" ) +set ( LIBFTDI_INCLUDE_DIRS "@LIBFTDI_INCLUDE_DIRS@" ) +set ( LIBFTDI_LIBRARY "@LIBFTDI_LIBRARY@" ) +set ( LIBFTDI_LIBRARIES "@LIBFTDI_LIBRARIES@" ) +set ( LIBFTDI_STATIC_LIBRARY "@LIBFTDI_STATIC_LIBRARY@" ) +set ( LIBFTDI_STATIC_LIBRARIES "@LIBFTDI_STATIC_LIBRARIES@" ) +set ( LIBFTDIPP_LIBRARY "@LIBFTDIPP_LIBRARY@" ) +set ( LIBFTDIPP_LIBRARIES "@LIBFTDIPP_LIBRARIES@" ) +set ( LIBFTDI_LIBRARY_DIRS "@LIBFTDI_LIBRARY_DIRS@" ) +set ( LIBFTDI_ROOT_DIR "@LIBFTDI_ROOT_DIR@" ) + +set ( LIBFTDI_VERSION_STRING "@LIBFTDI_VERSION_STRING@" ) +set ( LIBFTDI_VERSION_MAJOR "@LIBFTDI_VERSION_MAJOR@" ) +set ( LIBFTDI_VERSION_MINOR "@LIBFTDI_VERSION_MINOR@" ) +set ( LIBFTDI_VERSION_PATCH "@LIBFTDI_VERSION_PATCH@" ) + +set ( LIBFTDI_PYTHON_MODULE_PATH "@LIBFTDI_PYTHON_MODULE_PATH@" ) + diff --git a/third-party/libftdi/cmake/LibFTDI1ConfigVersion.cmake.in b/third-party/libftdi/cmake/LibFTDI1ConfigVersion.cmake.in new file mode 100644 index 0000000..ed0d463 --- /dev/null +++ b/third-party/libftdi/cmake/LibFTDI1ConfigVersion.cmake.in @@ -0,0 +1,31 @@ +# This is a basic version file for the Config-mode of find_package(). +# It is used by write_basic_package_version_file() as input file for configure_file() +# to create a version-file which can be installed along a config.cmake file. +# +# The created file sets PACKAGE_VERSION_EXACT if the current version string and +# the requested version string are exactly the same and it sets +# PACKAGE_VERSION_COMPATIBLE if the current version is >= requested version. +# The variable CVF_VERSION must be set before calling configure_file(). + +set(PACKAGE_VERSION "@LIBFTDI_VERSION_STRING@") + +if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}" ) + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + set(PACKAGE_VERSION_COMPATIBLE TRUE) + if( "${PACKAGE_FIND_VERSION}" STREQUAL "${PACKAGE_VERSION}") + set(PACKAGE_VERSION_EXACT TRUE) + endif() +endif() + +# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it: +if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "8" STREQUAL "") + return() +endif() + +# check that the installed version has the same 32/64bit-ness as the one which is currently searching: +if(NOT "${CMAKE_SIZEOF_VOID_P}" STREQUAL "8") + math(EXPR installedBits "8 * 8") + set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)") + set(PACKAGE_VERSION_UNSUITABLE TRUE) +endif() diff --git a/third-party/libftdi/cmake/Toolchain-Crossbuild32.cmake b/third-party/libftdi/cmake/Toolchain-Crossbuild32.cmake new file mode 100644 index 0000000..3c80dd8 --- /dev/null +++ b/third-party/libftdi/cmake/Toolchain-Crossbuild32.cmake @@ -0,0 +1,4 @@ +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_C_COMPILER gcc -m32) +set(CMAKE_CXX_COMPILER g++ -m32) +set(CMAKE_FIND_ROOT_PATH /usr/lib) diff --git a/third-party/libftdi/cmake/Toolchain-i686-w64-mingw32.cmake b/third-party/libftdi/cmake/Toolchain-i686-w64-mingw32.cmake new file mode 100644 index 0000000..1eed0cf --- /dev/null +++ b/third-party/libftdi/cmake/Toolchain-i686-w64-mingw32.cmake @@ -0,0 +1,17 @@ +# the name of the target operating system +SET(CMAKE_SYSTEM_NAME Windows) + +# which compilers to use for C and C++ +SET(CMAKE_C_COMPILER i686-w64-mingw32-gcc) +SET(CMAKE_CXX_COMPILER i686-w64-mingw32-g++) + +# here is the target environment located +SET(CMAKE_FIND_ROOT_PATH /usr/i686-w64-mingw32 ) + +# adjust the default behaviour of the FIND_XXX() commands: +# search headers and libraries in the target environment, search +# programs in the host environment +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set (CMAKE_RC_COMPILER i686-w64-mingw32-windres) diff --git a/third-party/libftdi/cmake/Toolchain-mingw32.cmake b/third-party/libftdi/cmake/Toolchain-mingw32.cmake new file mode 100644 index 0000000..bff3cba --- /dev/null +++ b/third-party/libftdi/cmake/Toolchain-mingw32.cmake @@ -0,0 +1,16 @@ +# the name of the target operating system +SET(CMAKE_SYSTEM_NAME Windows) + +# which compilers to use for C and C++ +SET(CMAKE_C_COMPILER i386-mingw32msvc-gcc) +SET(CMAKE_CXX_COMPILER i386-mingw32msvc-g++) + +# here is the target environment located +SET(CMAKE_FIND_ROOT_PATH /opt/cross/i386-mingw32msvc ) + +# adjust the default behaviour of the FIND_XXX() commands: +# search headers and libraries in the target environment, search +# programs in the host environment +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) diff --git a/third-party/libftdi/cmake/Toolchain-x86_64-w64-mingw32.cmake b/third-party/libftdi/cmake/Toolchain-x86_64-w64-mingw32.cmake new file mode 100644 index 0000000..20a5b40 --- /dev/null +++ b/third-party/libftdi/cmake/Toolchain-x86_64-w64-mingw32.cmake @@ -0,0 +1,17 @@ +# the name of the target operating system +SET(CMAKE_SYSTEM_NAME Windows) + +# which compilers to use for C and C++ +SET(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc) +SET(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++) + +# here is the target environment located +SET(CMAKE_FIND_ROOT_PATH /usr/x86_64-w64-mingw32 ) + +# adjust the default behaviour of the FIND_XXX() commands: +# search headers and libraries in the target environment, search +# programs in the host environment +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set (CMAKE_RC_COMPILER x86_64-w64-mingw32-windres) diff --git a/third-party/libftdi/cmake/UseLibFTDI1.cmake b/third-party/libftdi/cmake/UseLibFTDI1.cmake new file mode 100644 index 0000000..e7e43b0 --- /dev/null +++ b/third-party/libftdi/cmake/UseLibFTDI1.cmake @@ -0,0 +1,18 @@ +# -*- cmake -*- +# +# UseLibFTDI.cmake +# +# Copyright (C) 2013 Intra2net AG and the libftdi developers +# +# This file is part of LibFTDI. +# +# LibFTDI is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License +# version 2.1 as published by the Free Software Foundation; +# + + +add_definitions ( ${LIBFTDI_DEFINITIONS} ) +include_directories ( ${LIBFTDI_INCLUDE_DIRS} ) +link_directories ( ${LIBFTDI_LIBRARY_DIRS} ) + diff --git a/third-party/libftdi/doc/Doxyfile.in b/third-party/libftdi/doc/Doxyfile.in new file mode 100644 index 0000000..84006c8 --- /dev/null +++ b/third-party/libftdi/doc/Doxyfile.in @@ -0,0 +1,2393 @@ +# Doxyfile 1.8.10 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = @PACKAGE@ + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = @VERSION@ + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = doc + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = NO + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = YES + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = YES + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = NO + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = @top_srcdir@/src \ + @top_srcdir@/ftdipp + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, +# *.vhdl, *.ucf, *.qsf, *.as and *.js. + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = @top_srcdir@/examples + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = NO + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://www.mathjax.org/mathjax + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /