Driver: Block between read attempts
Driver: * Refactored to limit accessibility of member fields; Communication: * readTask() now calls for a blocking wait;ks-refactor-docs
parent
f25a0a4a81
commit
75af3220b0
|
|
@ -276,7 +276,7 @@ void Communication::readTask() {
|
||||||
std::unique_lock<std::mutex> lk(pauseReadTaskMutex);
|
std::unique_lock<std::mutex> lk(pauseReadTaskMutex);
|
||||||
pauseReadTaskCv.wait(lk, [this]() { return !pauseReadTask; });
|
pauseReadTaskCv.wait(lk, [this]() { return !pauseReadTask; });
|
||||||
}
|
}
|
||||||
if(driver->readAvailable()) {
|
if(driver->waitForRx(readTaskWakeLimit, readTaskWakeTimeout)) {
|
||||||
if(pauseReadTask) {
|
if(pauseReadTask) {
|
||||||
/**
|
/**
|
||||||
* Reads could have paused while the driver was not available
|
* Reads could have paused while the driver was not available
|
||||||
|
|
|
||||||
|
|
@ -11,13 +11,21 @@ using namespace icsneo;
|
||||||
bool Driver::pushRx(const uint8_t* buf, size_t numReceived) {
|
bool Driver::pushRx(const uint8_t* buf, size_t numReceived) {
|
||||||
bool ret = readBuffer.write(buf, numReceived);
|
bool ret = readBuffer.write(buf, numReceived);
|
||||||
|
|
||||||
if(hasRxWaitRequest) {
|
rxWaitCv.notify_all();
|
||||||
rxWaitRequestCv.notify_one();
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Driver::clearBuffers()
|
||||||
|
{
|
||||||
|
WriteOperation flushop;
|
||||||
|
|
||||||
|
readBuffer.clear();
|
||||||
|
rxWaitCv.notify_all();
|
||||||
|
|
||||||
|
while (writeQueue.try_dequeue(flushop)) {}
|
||||||
|
}
|
||||||
|
|
||||||
bool Driver::waitForRx(size_t limit, std::chrono::milliseconds timeout) {
|
bool Driver::waitForRx(size_t limit, std::chrono::milliseconds timeout) {
|
||||||
return waitForRx([limit, this]() {
|
return waitForRx([limit, this]() {
|
||||||
return readBuffer.size() >= limit;
|
return readBuffer.size() >= limit;
|
||||||
|
|
@ -26,13 +34,7 @@ bool Driver::waitForRx(size_t limit, std::chrono::milliseconds timeout) {
|
||||||
|
|
||||||
bool Driver::waitForRx(std::function<bool()> predicate, std::chrono::milliseconds timeout) {
|
bool Driver::waitForRx(std::function<bool()> predicate, std::chrono::milliseconds timeout) {
|
||||||
std::unique_lock<std::mutex> lk(rxWaitMutex);
|
std::unique_lock<std::mutex> lk(rxWaitMutex);
|
||||||
hasRxWaitRequest = true;
|
return rxWaitCv.wait_for(lk, timeout, predicate);
|
||||||
|
|
||||||
auto ret = rxWaitRequestCv.wait_for(lk, timeout, predicate);
|
|
||||||
|
|
||||||
hasRxWaitRequest = false;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Driver::readWait(std::vector<uint8_t>& bytes, std::chrono::milliseconds timeout, size_t limit) {
|
bool Driver::readWait(std::vector<uint8_t>& bytes, std::chrono::milliseconds timeout, size_t limit) {
|
||||||
|
|
@ -92,4 +94,4 @@ bool Driver::write(const std::vector<uint8_t>& bytes) {
|
||||||
report(APIEvent::Type::Unknown, APIEvent::Severity::Error);
|
report(APIEvent::Type::Unknown, APIEvent::Severity::Error);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -87,6 +87,9 @@ public:
|
||||||
std::unique_ptr<Driver> driver;
|
std::unique_ptr<Driver> driver;
|
||||||
device_eventhandler_t report;
|
device_eventhandler_t report;
|
||||||
|
|
||||||
|
size_t readTaskWakeLimit = 1;
|
||||||
|
std::chrono::milliseconds readTaskWakeTimeout = std::chrono::milliseconds(1000);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
static int messageCallbackIDCounter;
|
static int messageCallbackIDCounter;
|
||||||
std::mutex messageCallbacksLock;
|
std::mutex messageCallbacksLock;
|
||||||
|
|
|
||||||
|
|
@ -24,9 +24,11 @@ public:
|
||||||
virtual bool isOpen() = 0;
|
virtual bool isOpen() = 0;
|
||||||
virtual void modeChangeIncoming() {}
|
virtual void modeChangeIncoming() {}
|
||||||
virtual void awaitModeChangeComplete() {}
|
virtual void awaitModeChangeComplete() {}
|
||||||
virtual bool isDisconnected() { return disconnected; };
|
|
||||||
virtual bool close() = 0;
|
virtual bool close() = 0;
|
||||||
|
|
||||||
|
inline bool isDisconnected() const { return disconnected; };
|
||||||
|
inline bool isClosing() const { return closing; }
|
||||||
|
|
||||||
bool waitForRx(size_t limit, std::chrono::milliseconds timeout);
|
bool waitForRx(size_t limit, std::chrono::milliseconds timeout);
|
||||||
bool waitForRx(std::function<bool()> predicate, std::chrono::milliseconds timeout);
|
bool waitForRx(std::function<bool()> predicate, std::chrono::milliseconds timeout);
|
||||||
bool readWait(std::vector<uint8_t>& bytes, std::chrono::milliseconds timeout = std::chrono::milliseconds(100), size_t limit = 0);
|
bool readWait(std::vector<uint8_t>& bytes, std::chrono::milliseconds timeout = std::chrono::milliseconds(100), size_t limit = 0);
|
||||||
|
|
@ -52,8 +54,8 @@ protected:
|
||||||
WAIT
|
WAIT
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual void readTask() = 0;
|
inline void setIsClosing(bool isClosing) { closing = isClosing; }
|
||||||
virtual void writeTask() = 0;
|
inline void setIsDisconnected(bool isDisconnected) { disconnected = isDisconnected; }
|
||||||
|
|
||||||
// Overridable in case the driver doesn't want to use writeTask and writeQueue
|
// Overridable in case the driver doesn't want to use writeTask and writeQueue
|
||||||
virtual bool writeQueueFull() { return writeQueue.size_approx() > writeQueueSize; }
|
virtual bool writeQueueFull() { return writeQueue.size_approx() > writeQueueSize; }
|
||||||
|
|
@ -61,13 +63,15 @@ protected:
|
||||||
virtual bool writeInternal(const std::vector<uint8_t>& b) { return writeQueue.enqueue(WriteOperation(b)); }
|
virtual bool writeInternal(const std::vector<uint8_t>& b) { return writeQueue.enqueue(WriteOperation(b)); }
|
||||||
|
|
||||||
bool pushRx(const uint8_t* buf, size_t numReceived);
|
bool pushRx(const uint8_t* buf, size_t numReceived);
|
||||||
RingBuffer readBuffer = RingBuffer(ICSNEO_DRIVER_RINGBUFFER_SIZE);
|
void clearBuffers();
|
||||||
std::atomic<bool> hasRxWaitRequest = false;
|
|
||||||
std::condition_variable rxWaitRequestCv;
|
|
||||||
std::mutex rxWaitMutex;
|
|
||||||
|
|
||||||
moodycamel::BlockingConcurrentQueue<WriteOperation> writeQueue;
|
moodycamel::BlockingConcurrentQueue<WriteOperation> writeQueue;
|
||||||
std::thread readThread, writeThread;
|
|
||||||
|
private:
|
||||||
|
RingBuffer readBuffer = RingBuffer(ICSNEO_DRIVER_RINGBUFFER_SIZE);
|
||||||
|
std::condition_variable rxWaitCv;
|
||||||
|
std::mutex rxWaitMutex;
|
||||||
|
|
||||||
std::atomic<bool> closing{false};
|
std::atomic<bool> closing{false};
|
||||||
std::atomic<bool> disconnected{false};
|
std::atomic<bool> disconnected{false};
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -22,8 +22,10 @@ public:
|
||||||
private:
|
private:
|
||||||
neodevice_t& device;
|
neodevice_t& device;
|
||||||
std::optional<void*> handle;
|
std::optional<void*> handle;
|
||||||
void readTask() override;
|
|
||||||
void writeTask() override;
|
std::thread readThread, writeThread;
|
||||||
|
void readTask();
|
||||||
|
void writeTask();
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -47,8 +47,10 @@ private:
|
||||||
|
|
||||||
static std::string HandleToTTY(neodevice_handle_t handle);
|
static std::string HandleToTTY(neodevice_handle_t handle);
|
||||||
|
|
||||||
void readTask() override;
|
std::thread readThread, writeThread;
|
||||||
void writeTask() override;
|
void readTask();
|
||||||
|
void writeTask();
|
||||||
|
|
||||||
bool fdIsValid();
|
bool fdIsValid();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,8 +25,11 @@ public:
|
||||||
bool isOpen() override;
|
bool isOpen() override;
|
||||||
bool close() override;
|
bool close() override;
|
||||||
private:
|
private:
|
||||||
void readTask() override;
|
std::thread readThread, writeThread;
|
||||||
void writeTask() override;
|
|
||||||
|
void readTask();
|
||||||
|
void writeTask();
|
||||||
|
|
||||||
bool writeQueueFull() override;
|
bool writeQueueFull() override;
|
||||||
bool writeQueueAlmostFull() override;
|
bool writeQueueAlmostFull() override;
|
||||||
bool writeInternal(const std::vector<uint8_t>& bytes) override;
|
bool writeInternal(const std::vector<uint8_t>& bytes) override;
|
||||||
|
|
|
||||||
|
|
@ -57,8 +57,11 @@ private:
|
||||||
static std::vector<std::string> handles;
|
static std::vector<std::string> handles;
|
||||||
|
|
||||||
static bool ErrorIsDisconnection(int errorCode);
|
static bool ErrorIsDisconnection(int errorCode);
|
||||||
|
std::thread readThread, writeThread;
|
||||||
|
|
||||||
void readTask();
|
void readTask();
|
||||||
void writeTask();
|
void writeTask();
|
||||||
|
|
||||||
bool openable; // Set to false in the constructor if the object has not been found in searchResultDevices
|
bool openable; // Set to false in the constructor if the object has not been found in searchResultDevices
|
||||||
|
|
||||||
neodevice_t& device;
|
neodevice_t& device;
|
||||||
|
|
|
||||||
|
|
@ -30,8 +30,10 @@ private:
|
||||||
uint8_t deviceMAC[6];
|
uint8_t deviceMAC[6];
|
||||||
bool openable = true;
|
bool openable = true;
|
||||||
EthernetPacketizer ethPacketizer;
|
EthernetPacketizer ethPacketizer;
|
||||||
void readTask() override;
|
|
||||||
void writeTask() override;
|
std::thread readThread, writeThread;
|
||||||
|
void readTask();
|
||||||
|
void writeTask();
|
||||||
|
|
||||||
class NetworkInterface {
|
class NetworkInterface {
|
||||||
public:
|
public:
|
||||||
|
|
|
||||||
|
|
@ -47,8 +47,10 @@ private:
|
||||||
uint32_t dstIP;
|
uint32_t dstIP;
|
||||||
uint16_t dstPort;
|
uint16_t dstPort;
|
||||||
std::unique_ptr<Socket> socket;
|
std::unique_ptr<Socket> socket;
|
||||||
void readTask() override;
|
|
||||||
void writeTask() override;
|
std::thread readThread, writeThread;
|
||||||
|
void readTask();
|
||||||
|
void writeTask();
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,7 @@ private:
|
||||||
bool openable = true;
|
bool openable = true;
|
||||||
EthernetPacketizer ethPacketizer;
|
EthernetPacketizer ethPacketizer;
|
||||||
|
|
||||||
|
std::thread readThread, writeThread;
|
||||||
std::thread transmitThread;
|
std::thread transmitThread;
|
||||||
pcap_send_queue* transmitQueue = nullptr;
|
pcap_send_queue* transmitQueue = nullptr;
|
||||||
std::condition_variable transmitQueueCV;
|
std::condition_variable transmitQueueCV;
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,7 @@ private:
|
||||||
std::shared_ptr<Detail> detail;
|
std::shared_ptr<Detail> detail;
|
||||||
|
|
||||||
std::vector<std::shared_ptr<std::thread>> threads;
|
std::vector<std::shared_ptr<std::thread>> threads;
|
||||||
|
std::thread readThread, writeThread;
|
||||||
void readTask();
|
void readTask();
|
||||||
void writeTask();
|
void writeTask();
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -64,7 +64,7 @@ bool FTD3XX::open() {
|
||||||
}
|
}
|
||||||
handle.emplace(tmpHandle);
|
handle.emplace(tmpHandle);
|
||||||
|
|
||||||
closing = false;
|
setIsClosing(false);
|
||||||
readThread = std::thread(&FTD3XX::readTask, this);
|
readThread = std::thread(&FTD3XX::readTask, this);
|
||||||
writeThread = std::thread(&FTD3XX::writeTask, this);
|
writeThread = std::thread(&FTD3XX::writeTask, this);
|
||||||
|
|
||||||
|
|
@ -81,23 +81,21 @@ bool FTD3XX::close() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
closing = true;
|
setIsClosing(true);
|
||||||
disconnected = false;
|
setIsDisconnected(false);
|
||||||
|
|
||||||
if(readThread.joinable())
|
if(readThread.joinable())
|
||||||
readThread.join();
|
readThread.join();
|
||||||
if(writeThread.joinable())
|
if(writeThread.joinable())
|
||||||
writeThread.join();
|
writeThread.join();
|
||||||
|
|
||||||
WriteOperation flushop;
|
clearBuffers();
|
||||||
readBuffer.pop(readBuffer.size());
|
|
||||||
while(writeQueue.try_dequeue(flushop)) {}
|
|
||||||
|
|
||||||
if(const auto ret = FT_Close(*handle); ret != FT_OK) {
|
if(const auto ret = FT_Close(*handle); ret != FT_OK) {
|
||||||
addEvent(ret, APIEvent::Severity::EventWarning);
|
addEvent(ret, APIEvent::Severity::EventWarning);
|
||||||
}
|
}
|
||||||
|
|
||||||
closing = false;
|
setIsClosing(false);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
@ -110,7 +108,7 @@ void FTD3XX::readTask() {
|
||||||
|
|
||||||
FT_SetStreamPipe(*handle, false, false, READ_PIPE_ID, bufferSize);
|
FT_SetStreamPipe(*handle, false, false, READ_PIPE_ID, bufferSize);
|
||||||
FT_SetPipeTimeout(*handle, READ_PIPE_ID, 1);
|
FT_SetPipeTimeout(*handle, READ_PIPE_ID, 1);
|
||||||
while(!closing && !isDisconnected()) {
|
while(!isClosing() && !isDisconnected()) {
|
||||||
ULONG received = 0;
|
ULONG received = 0;
|
||||||
OVERLAPPED overlap = {};
|
OVERLAPPED overlap = {};
|
||||||
FT_InitializeOverlapped(*handle, &overlap);
|
FT_InitializeOverlapped(*handle, &overlap);
|
||||||
|
|
@ -119,13 +117,13 @@ void FTD3XX::readTask() {
|
||||||
#else
|
#else
|
||||||
FT_ReadPipeAsync(*handle, 0, buffer, bufferSize, &received, &overlap);
|
FT_ReadPipeAsync(*handle, 0, buffer, bufferSize, &received, &overlap);
|
||||||
#endif
|
#endif
|
||||||
while(!closing) {
|
while(!isClosing()) {
|
||||||
const auto ret = FT_GetOverlappedResult(*handle, &overlap, &received, true);
|
const auto ret = FT_GetOverlappedResult(*handle, &overlap, &received, true);
|
||||||
if(ret == FT_IO_PENDING)
|
if(ret == FT_IO_PENDING)
|
||||||
continue;
|
continue;
|
||||||
if(ret != FT_OK) {
|
if(ret != FT_OK) {
|
||||||
if(ret == FT_IO_ERROR) {
|
if(ret == FT_IO_ERROR) {
|
||||||
disconnected = true;
|
setIsDisconnected(true);
|
||||||
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
||||||
} else {
|
} else {
|
||||||
addEvent(ret, APIEvent::Severity::Error);
|
addEvent(ret, APIEvent::Severity::Error);
|
||||||
|
|
@ -146,7 +144,7 @@ void FTD3XX::writeTask() {
|
||||||
|
|
||||||
FT_SetPipeTimeout(*handle, WRITE_PIPE_ID, 100);
|
FT_SetPipeTimeout(*handle, WRITE_PIPE_ID, 100);
|
||||||
WriteOperation writeOp;
|
WriteOperation writeOp;
|
||||||
while(!closing && !isDisconnected()) {
|
while(!isClosing() && !isDisconnected()) {
|
||||||
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -160,13 +158,13 @@ void FTD3XX::writeTask() {
|
||||||
#else
|
#else
|
||||||
FT_WritePipeAsync(*handle, 0, writeOp.bytes.data(), size, &sent, &overlap);
|
FT_WritePipeAsync(*handle, 0, writeOp.bytes.data(), size, &sent, &overlap);
|
||||||
#endif
|
#endif
|
||||||
while(!closing) {
|
while(!isClosing()) {
|
||||||
const auto ret = FT_GetOverlappedResult(*handle, &overlap, &sent, true);
|
const auto ret = FT_GetOverlappedResult(*handle, &overlap, &sent, true);
|
||||||
if(ret == FT_IO_PENDING)
|
if(ret == FT_IO_PENDING)
|
||||||
continue;
|
continue;
|
||||||
if(ret != FT_OK) {
|
if(ret != FT_OK) {
|
||||||
if(ret == FT_IO_ERROR) {
|
if(ret == FT_IO_ERROR) {
|
||||||
disconnected = true;
|
setIsDisconnected(true);
|
||||||
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
||||||
} else {
|
} else {
|
||||||
addEvent(ret, APIEvent::Severity::Error);
|
addEvent(ret, APIEvent::Severity::Error);
|
||||||
|
|
|
||||||
|
|
@ -117,7 +117,7 @@ bool CDCACM::close() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
closing = true;
|
setIsClosing(true);
|
||||||
|
|
||||||
if(readThread.joinable())
|
if(readThread.joinable())
|
||||||
readThread.join();
|
readThread.join();
|
||||||
|
|
@ -125,8 +125,8 @@ bool CDCACM::close() {
|
||||||
if(writeThread.joinable())
|
if(writeThread.joinable())
|
||||||
writeThread.join();
|
writeThread.join();
|
||||||
|
|
||||||
closing = false;
|
setIsClosing(false);
|
||||||
disconnected = false;
|
setIsDisconnected(false);
|
||||||
|
|
||||||
if(modeChanging) {
|
if(modeChanging) {
|
||||||
// We're expecting this inode to go away after we close the device
|
// We're expecting this inode to go away after we close the device
|
||||||
|
|
@ -140,9 +140,7 @@ bool CDCACM::close() {
|
||||||
int ret = ::close(fd);
|
int ret = ::close(fd);
|
||||||
fd = -1;
|
fd = -1;
|
||||||
|
|
||||||
WriteOperation flushop;
|
clearBuffers();
|
||||||
readBuffer.clear();
|
|
||||||
while (writeQueue.try_dequeue(flushop)) {}
|
|
||||||
|
|
||||||
if(modeChanging) {
|
if(modeChanging) {
|
||||||
modeChanging = false;
|
modeChanging = false;
|
||||||
|
|
@ -173,7 +171,7 @@ void CDCACM::readTask() {
|
||||||
constexpr size_t READ_BUFFER_SIZE = 2048;
|
constexpr size_t READ_BUFFER_SIZE = 2048;
|
||||||
uint8_t readbuf[READ_BUFFER_SIZE];
|
uint8_t readbuf[READ_BUFFER_SIZE];
|
||||||
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
||||||
while(!closing && !isDisconnected()) {
|
while(!isClosing() && !isDisconnected()) {
|
||||||
fd_set rfds = {0};
|
fd_set rfds = {0};
|
||||||
struct timeval tv = {0};
|
struct timeval tv = {0};
|
||||||
FD_SET(fd, &rfds);
|
FD_SET(fd, &rfds);
|
||||||
|
|
@ -199,8 +197,8 @@ void CDCACM::readTask() {
|
||||||
// Requesting thread is responsible for calling close. This allows for more flexibility
|
// Requesting thread is responsible for calling close. This allows for more flexibility
|
||||||
});
|
});
|
||||||
break;
|
break;
|
||||||
} else if(!closing && !fdIsValid() && !isDisconnected()) {
|
} else if(!isClosing() && !fdIsValid() && !isDisconnected()) {
|
||||||
disconnected = true;
|
setIsDisconnected(true);
|
||||||
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -210,7 +208,7 @@ void CDCACM::readTask() {
|
||||||
void CDCACM::writeTask() {
|
void CDCACM::writeTask() {
|
||||||
WriteOperation writeOp;
|
WriteOperation writeOp;
|
||||||
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
||||||
while(!closing && !isDisconnected()) {
|
while(!isClosing() && !isDisconnected()) {
|
||||||
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -233,7 +231,7 @@ void CDCACM::writeTask() {
|
||||||
} else if (actualWritten < 0) {
|
} else if (actualWritten < 0) {
|
||||||
if(!fdIsValid()) {
|
if(!fdIsValid()) {
|
||||||
if(!isDisconnected()) {
|
if(!isDisconnected()) {
|
||||||
disconnected = true;
|
setIsDisconnected(true);
|
||||||
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
|
|
|
||||||
|
|
@ -166,13 +166,13 @@ bool FirmIO::close() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
closing = true;
|
setIsClosing(true);
|
||||||
|
|
||||||
if(readThread.joinable())
|
if(readThread.joinable())
|
||||||
readThread.join();
|
readThread.join();
|
||||||
|
|
||||||
closing = false;
|
setIsClosing(false);
|
||||||
disconnected = false;
|
setIsDisconnected(false);
|
||||||
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
if(vbase != nullptr) {
|
if(vbase != nullptr) {
|
||||||
|
|
@ -202,7 +202,7 @@ void FirmIO::readTask() {
|
||||||
std::cerr << "FirmIO::readTask setpriority failed : " << strerror(errno) << std::endl;
|
std::cerr << "FirmIO::readTask setpriority failed : " << strerror(errno) << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
while(!closing && !isDisconnected()) {
|
while(!isClosing() && !isDisconnected()) {
|
||||||
fd_set rfds = {0};
|
fd_set rfds = {0};
|
||||||
struct timeval tv = {0};
|
struct timeval tv = {0};
|
||||||
FD_SET(fd, &rfds);
|
FD_SET(fd, &rfds);
|
||||||
|
|
@ -244,7 +244,7 @@ void FirmIO::readTask() {
|
||||||
uint8_t* addr = reinterpret_cast<uint8_t*>(msg.payload.data.addr - PHY_ADDR_BASE + vbase);
|
uint8_t* addr = reinterpret_cast<uint8_t*>(msg.payload.data.addr - PHY_ADDR_BASE + vbase);
|
||||||
while (!pushRx(addr, msg.payload.data.len)) {
|
while (!pushRx(addr, msg.payload.data.len)) {
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // back-off so reading thread can empty the buffer
|
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // back-off so reading thread can empty the buffer
|
||||||
if (closing || isDisconnected()) {
|
if (isClosing() || isDisconnected()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ bool FTDI::open() {
|
||||||
ftdi.flush();
|
ftdi.flush();
|
||||||
|
|
||||||
// Create threads
|
// Create threads
|
||||||
closing = false;
|
setIsClosing(false);
|
||||||
readThread = std::thread(&FTDI::readTask, this);
|
readThread = std::thread(&FTDI::readTask, this);
|
||||||
writeThread = std::thread(&FTDI::writeTask, this);
|
writeThread = std::thread(&FTDI::writeTask, this);
|
||||||
|
|
||||||
|
|
@ -94,7 +94,7 @@ bool FTDI::close() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
closing = true;
|
setIsClosing(true);
|
||||||
|
|
||||||
if(readThread.joinable())
|
if(readThread.joinable())
|
||||||
readThread.join();
|
readThread.join();
|
||||||
|
|
@ -109,12 +109,10 @@ bool FTDI::close() {
|
||||||
report(APIEvent::Type::DriverFailedToClose, APIEvent::Severity::Error);
|
report(APIEvent::Type::DriverFailedToClose, APIEvent::Severity::Error);
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteOperation flushop;
|
clearBuffers();
|
||||||
readBuffer.clear();
|
|
||||||
while(writeQueue.try_dequeue(flushop)) {}
|
|
||||||
|
|
||||||
closing = false;
|
setIsClosing(false);
|
||||||
disconnected = false;
|
setIsDisconnected(false);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -202,12 +200,12 @@ void FTDI::readTask() {
|
||||||
constexpr size_t READ_BUFFER_SIZE = 8;
|
constexpr size_t READ_BUFFER_SIZE = 8;
|
||||||
uint8_t readbuf[READ_BUFFER_SIZE];
|
uint8_t readbuf[READ_BUFFER_SIZE];
|
||||||
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
||||||
while(!closing && !isDisconnected()) {
|
while(!isClosing() && !isDisconnected()) {
|
||||||
auto readBytes = ftdi.read(readbuf, READ_BUFFER_SIZE);
|
auto readBytes = ftdi.read(readbuf, READ_BUFFER_SIZE);
|
||||||
if(readBytes < 0) {
|
if(readBytes < 0) {
|
||||||
if(ErrorIsDisconnection(readBytes)) {
|
if(ErrorIsDisconnection(readBytes)) {
|
||||||
if(!isDisconnected()) {
|
if(!isDisconnected()) {
|
||||||
disconnected = true;
|
setIsDisconnected(true);
|
||||||
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
|
|
@ -220,7 +218,7 @@ void FTDI::readTask() {
|
||||||
void FTDI::writeTask() {
|
void FTDI::writeTask() {
|
||||||
WriteOperation writeOp;
|
WriteOperation writeOp;
|
||||||
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
||||||
while(!closing && !isDisconnected()) {
|
while(!isClosing() && !isDisconnected()) {
|
||||||
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -230,7 +228,7 @@ void FTDI::writeTask() {
|
||||||
if(writeBytes < 0) {
|
if(writeBytes < 0) {
|
||||||
if(ErrorIsDisconnection(writeBytes)) {
|
if(ErrorIsDisconnection(writeBytes)) {
|
||||||
if(!isDisconnected()) {
|
if(!isDisconnected()) {
|
||||||
disconnected = true;
|
setIsDisconnected(true);
|
||||||
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
|
||||||
|
|
@ -258,28 +258,26 @@ bool PCAP::close() {
|
||||||
if(!isOpen())
|
if(!isOpen())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
closing = true; // Signal the threads that we are closing
|
setIsClosing(true); // Signal the threads that we are closing
|
||||||
pcap_breakloop(iface.fp);
|
pcap_breakloop(iface.fp);
|
||||||
#ifndef __linux__
|
#ifndef __linux__
|
||||||
pthread_cancel(readThread.native_handle());
|
pthread_cancel(readThread.native_handle());
|
||||||
#endif
|
#endif
|
||||||
readThread.join();
|
readThread.join();
|
||||||
writeThread.join();
|
writeThread.join();
|
||||||
closing = false;
|
setIsClosing(false);
|
||||||
|
|
||||||
pcap_close(iface.fp);
|
pcap_close(iface.fp);
|
||||||
iface.fp = nullptr;
|
iface.fp = nullptr;
|
||||||
|
|
||||||
WriteOperation flushop;
|
clearBuffers();
|
||||||
readBuffer.clear();
|
|
||||||
while(writeQueue.try_dequeue(flushop)) {}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void PCAP::readTask() {
|
void PCAP::readTask() {
|
||||||
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
||||||
while (!closing) {
|
while (!isClosing()) {
|
||||||
pcap_dispatch(iface.fp, -1, [](uint8_t* obj, const struct pcap_pkthdr* header, const uint8_t* data) {
|
pcap_dispatch(iface.fp, -1, [](uint8_t* obj, const struct pcap_pkthdr* header, const uint8_t* data) {
|
||||||
PCAP* driver = reinterpret_cast<PCAP*>(obj);
|
PCAP* driver = reinterpret_cast<PCAP*>(obj);
|
||||||
if(driver->ethPacketizer.inputUp({data, data + header->caplen})) {
|
if(driver->ethPacketizer.inputUp({data, data + header->caplen})) {
|
||||||
|
|
@ -294,7 +292,7 @@ void PCAP::writeTask() {
|
||||||
WriteOperation writeOp;
|
WriteOperation writeOp;
|
||||||
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
||||||
|
|
||||||
while(!closing) {
|
while(!isClosing()) {
|
||||||
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -511,20 +511,18 @@ bool TCP::close() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
closing = true;
|
setIsClosing(true);
|
||||||
disconnected = false;
|
setIsDisconnected(false);
|
||||||
|
|
||||||
if(readThread.joinable())
|
if(readThread.joinable())
|
||||||
readThread.join();
|
readThread.join();
|
||||||
if(writeThread.joinable())
|
if(writeThread.joinable())
|
||||||
writeThread.join();
|
writeThread.join();
|
||||||
|
|
||||||
WriteOperation flushop;
|
clearBuffers();
|
||||||
readBuffer.pop(readBuffer.size());
|
|
||||||
while(writeQueue.try_dequeue(flushop)) {}
|
|
||||||
|
|
||||||
socket.reset();
|
socket.reset();
|
||||||
closing = false;
|
setIsClosing(false);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
@ -534,7 +532,7 @@ void TCP::readTask() {
|
||||||
|
|
||||||
constexpr size_t READ_BUFFER_SIZE = 2048;
|
constexpr size_t READ_BUFFER_SIZE = 2048;
|
||||||
uint8_t readbuf[READ_BUFFER_SIZE];
|
uint8_t readbuf[READ_BUFFER_SIZE];
|
||||||
while(!closing) {
|
while(!isClosing()) {
|
||||||
if(const auto received = ::recv(*socket, (char*)readbuf, READ_BUFFER_SIZE, 0); received > 0) {
|
if(const auto received = ::recv(*socket, (char*)readbuf, READ_BUFFER_SIZE, 0); received > 0) {
|
||||||
pushRx(readbuf, received);
|
pushRx(readbuf, received);
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -547,11 +545,11 @@ void TCP::writeTask() {
|
||||||
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
||||||
|
|
||||||
WriteOperation writeOp;
|
WriteOperation writeOp;
|
||||||
while(!closing) {
|
while(!isClosing()) {
|
||||||
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
while(!closing) {
|
while(!isClosing()) {
|
||||||
if(::send(*socket, (char*)writeOp.bytes.data(), WIN_INT(writeOp.bytes.size()), 0) > 0)
|
if(::send(*socket, (char*)writeOp.bytes.data(), WIN_INT(writeOp.bytes.size()), 0) > 0)
|
||||||
break;
|
break;
|
||||||
socket->poll(POLLOUT, 100);
|
socket->poll(POLLOUT, 100);
|
||||||
|
|
|
||||||
|
|
@ -246,18 +246,17 @@ bool PCAP::close() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
closing = true; // Signal the threads that we are closing
|
setIsClosing(true); // Signal the threads that we are closing
|
||||||
readThread.join();
|
readThread.join();
|
||||||
writeThread.join();
|
writeThread.join();
|
||||||
transmitThread.join();
|
transmitThread.join();
|
||||||
closing = false;
|
setIsClosing(false);
|
||||||
|
|
||||||
pcap.close(iface.fp);
|
pcap.close(iface.fp);
|
||||||
iface.fp = nullptr;
|
iface.fp = nullptr;
|
||||||
|
|
||||||
WriteOperation flushop;
|
clearBuffers();
|
||||||
readBuffer.clear();
|
|
||||||
while(writeQueue.try_dequeue(flushop)) {}
|
|
||||||
transmitQueue = nullptr;
|
transmitQueue = nullptr;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
@ -267,7 +266,7 @@ void PCAP::readTask() {
|
||||||
struct pcap_pkthdr* header;
|
struct pcap_pkthdr* header;
|
||||||
const uint8_t* data;
|
const uint8_t* data;
|
||||||
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
||||||
while(!closing) {
|
while(!isClosing()) {
|
||||||
auto readBytes = pcap.next_ex(iface.fp, &header, &data);
|
auto readBytes = pcap.next_ex(iface.fp, &header, &data);
|
||||||
if(readBytes < 0) {
|
if(readBytes < 0) {
|
||||||
report(APIEvent::Type::FailedToRead, APIEvent::Severity::Error);
|
report(APIEvent::Type::FailedToRead, APIEvent::Severity::Error);
|
||||||
|
|
@ -291,7 +290,7 @@ void PCAP::writeTask() {
|
||||||
pcap_send_queue* queue2 = pcap.sendqueue_alloc(128000);
|
pcap_send_queue* queue2 = pcap.sendqueue_alloc(128000);
|
||||||
pcap_send_queue* queue = queue1;
|
pcap_send_queue* queue = queue1;
|
||||||
|
|
||||||
while(!closing) {
|
while(!isClosing()) {
|
||||||
// Potentially, we added frames to a second queue faster than the other thread was able to hand the first
|
// Potentially, we added frames to a second queue faster than the other thread was able to hand the first
|
||||||
// off to the kernel. In that case, wait for a minimal amount of time before checking whether we can
|
// off to the kernel. In that case, wait for a minimal amount of time before checking whether we can
|
||||||
// transmit it again.
|
// transmit it again.
|
||||||
|
|
@ -342,9 +341,9 @@ void PCAP::writeTask() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void PCAP::transmitTask() {
|
void PCAP::transmitTask() {
|
||||||
while(!closing) {
|
while(!isClosing()) {
|
||||||
std::unique_lock<std::mutex> lk(transmitQueueMutex);
|
std::unique_lock<std::mutex> lk(transmitQueueMutex);
|
||||||
if(transmitQueueCV.wait_for(lk, std::chrono::milliseconds(100), [this] { return !!transmitQueue; }) && !closing && transmitQueue) {
|
if(transmitQueueCV.wait_for(lk, std::chrono::milliseconds(100), [this] { return !!transmitQueue; }) && !isClosing() && transmitQueue) {
|
||||||
pcap_send_queue* current = transmitQueue;
|
pcap_send_queue* current = transmitQueue;
|
||||||
lk.unlock();
|
lk.unlock();
|
||||||
pcap.sendqueue_transmit(iface.fp, current, 0);
|
pcap.sendqueue_transmit(iface.fp, current, 0);
|
||||||
|
|
|
||||||
|
|
@ -326,12 +326,12 @@ bool VCP::close() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
closing = true; // Signal the threads that we are closing
|
setIsClosing(true); // Signal the threads that we are closing
|
||||||
for(auto& t : threads)
|
for(auto& t : threads)
|
||||||
t->join(); // Wait for the threads to close
|
t->join(); // Wait for the threads to close
|
||||||
readThread.join();
|
readThread.join();
|
||||||
writeThread.join();
|
writeThread.join();
|
||||||
closing = false;
|
setIsClosing(false);
|
||||||
|
|
||||||
if(!CloseHandle(detail->handle)) {
|
if(!CloseHandle(detail->handle)) {
|
||||||
report(APIEvent::Type::DriverFailedToClose, APIEvent::Severity::Error);
|
report(APIEvent::Type::DriverFailedToClose, APIEvent::Severity::Error);
|
||||||
|
|
@ -357,9 +357,7 @@ bool VCP::close() {
|
||||||
detail->overlappedWait.hEvent = INVALID_HANDLE_VALUE;
|
detail->overlappedWait.hEvent = INVALID_HANDLE_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteOperation flushop;
|
clearBuffers();
|
||||||
readBuffer.clear();
|
|
||||||
while(writeQueue.try_dequeue(flushop)) {}
|
|
||||||
|
|
||||||
if(!ret)
|
if(!ret)
|
||||||
report(APIEvent::Type::DriverFailedToClose, APIEvent::Severity::Error);
|
report(APIEvent::Type::DriverFailedToClose, APIEvent::Severity::Error);
|
||||||
|
|
@ -379,7 +377,7 @@ void VCP::readTask() {
|
||||||
IOTaskState state = LAUNCH;
|
IOTaskState state = LAUNCH;
|
||||||
DWORD bytesRead = 0;
|
DWORD bytesRead = 0;
|
||||||
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
||||||
while(!closing && !isDisconnected()) {
|
while(!isClosing() && !isDisconnected()) {
|
||||||
switch(state) {
|
switch(state) {
|
||||||
case LAUNCH: {
|
case LAUNCH: {
|
||||||
COMSTAT comStatus;
|
COMSTAT comStatus;
|
||||||
|
|
@ -401,7 +399,7 @@ void VCP::readTask() {
|
||||||
else if(lastError != ERROR_SUCCESS) {
|
else if(lastError != ERROR_SUCCESS) {
|
||||||
if(lastError == ERROR_ACCESS_DENIED) {
|
if(lastError == ERROR_ACCESS_DENIED) {
|
||||||
if(!isDisconnected()) {
|
if(!isDisconnected()) {
|
||||||
disconnected = true;
|
setIsDisconnected(true);
|
||||||
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
|
|
@ -432,7 +430,7 @@ void VCP::writeTask() {
|
||||||
VCP::WriteOperation writeOp;
|
VCP::WriteOperation writeOp;
|
||||||
DWORD bytesWritten = 0;
|
DWORD bytesWritten = 0;
|
||||||
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
EventManager::GetInstance().downgradeErrorsOnCurrentThread();
|
||||||
while(!closing && !isDisconnected()) {
|
while(!isClosing() && !isDisconnected()) {
|
||||||
switch(state) {
|
switch(state) {
|
||||||
case LAUNCH: {
|
case LAUNCH: {
|
||||||
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
if(!writeQueue.wait_dequeue_timed(writeOp, std::chrono::milliseconds(100)))
|
||||||
|
|
@ -448,7 +446,7 @@ void VCP::writeTask() {
|
||||||
}
|
}
|
||||||
else if(winerr == ERROR_ACCESS_DENIED) {
|
else if(winerr == ERROR_ACCESS_DENIED) {
|
||||||
if(!isDisconnected()) {
|
if(!isDisconnected()) {
|
||||||
disconnected = true;
|
setIsDisconnected(true);
|
||||||
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
report(APIEvent::Type::DeviceDisconnected, APIEvent::Severity::Error);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue