diff --git a/api/icsneocpp/eventmanager.cpp b/api/icsneocpp/eventmanager.cpp index 164c966..2871c44 100644 --- a/api/icsneocpp/eventmanager.cpp +++ b/api/icsneocpp/eventmanager.cpp @@ -8,6 +8,99 @@ EventManager& EventManager::GetInstance() { return inst; } +void EventManager::downgradeErrorsOnCurrentThread() { + if(destructing) + return; + std::lock_guard lk(downgradedThreadsMutex); + auto i = downgradedThreads.find(std::this_thread::get_id()); + if(i != downgradedThreads.end()) { + i->second = true; + } else { + downgradedThreads.insert({std::this_thread::get_id(), true}); + } +} + +void EventManager::cancelErrorDowngradingOnCurrentThread() { + if(destructing) + return; + std::lock_guard lk(downgradedThreadsMutex); + auto i = downgradedThreads.find(std::this_thread::get_id()); + if(i != downgradedThreads.end()) { + i->second = false; + } +} + +void EventManager::add(APIEvent event) { + if(destructing) + return; + if(event.getSeverity() == APIEvent::Severity::Error) { + // if the error was added on a thread that downgrades errors (non-user thread) + std::lock_guard lk(downgradedThreadsMutex); + auto i = downgradedThreads.find(std::this_thread::get_id()); + if(i != downgradedThreads.end() && i->second) { + event.downgradeFromError(); + { + std::lock_guard eventsLock(eventsMutex); + addEventInternal(event); + } // free the lock so that callbacks may modify events + runCallbacks(event); + } else { + std::lock_guard errorsLock(errorsMutex); + lastUserErrors[std::this_thread::get_id()] = event; + } + } else { + { + std::lock_guard eventsLock(eventsMutex); + addEventInternal(event); + } // free the lock so that callbacks may modify events + runCallbacks(event); + } +} + +void EventManager::addEventInternal(APIEvent event) { + // Ensure the event list is at most exactly full (size of eventLimit - 1, leaving room for a potential APIEvent::TooManyEvents) + // Removes any events of type TooManyEvents from the end before checking to avoid duplicates. + enforceLimit(); + + // We are exactly full, either because the list was truncated or because we were simply full before + if(events.size() == eventLimit - 1) { + // If the event is worth adding + if(event.getType() != APIEvent::Type::TooManyEvents) { + discardOldest(1); + events.push_back(event); + } + + events.push_back(APIEvent(APIEvent::Type::TooManyEvents, APIEvent::Severity::EventWarning)); + } else { + if (event.getType() != APIEvent::Type::TooManyEvents) + events.push_back(event); + } +} + +void EventManager::runCallbacks(APIEvent event) { + std::lock_guard lk(callbacksMutex); + for(auto& i : callbacks) + i.second.callIfMatch(std::make_shared(event)); +} + +void EventManager::setEventLimit(size_t newLimit) { + std::lock_guard eventLimitLock(eventLimitMutex); + + if(newLimit == eventLimit) + return; + + if(newLimit < 10) { + add(APIEvent::Type::ParameterOutOfRange, APIEvent::Severity::Error); + return; + } + + eventLimit = newLimit; + + std::lock_guard eventsLock(eventsMutex); + if(enforceLimit()) + addEventInternal(APIEvent(APIEvent::Type::TooManyEvents, APIEvent::Severity::EventWarning)); +} + void EventManager::ResetInstance() { std::lock_guard eventsLock(eventsMutex); std::lock_guard errorsLock(errorsMutex); @@ -21,7 +114,7 @@ void EventManager::ResetInstance() { lastUserErrors.clear(); downgradedThreads.clear(); callbacks.clear(); - + callbackID = 0; eventLimit = 10000; } @@ -55,7 +148,7 @@ bool EventManager::isDowngradingErrorsOnCurrentThread() const { void EventManager::get(std::vector& eventOutput, size_t max, EventFilter filter) { std::lock_guard lk(eventsMutex); - + if(max == 0) // A limit of 0 indicates no limit max = (size_t)-1; @@ -98,7 +191,7 @@ void EventManager::discard(EventFilter filter) { }); } -size_t EventManager::count_internal(EventFilter filter) const { +size_t EventManager::countInternal(EventFilter filter) const { size_t ret = 0; for(auto& event : events) if(filter.match(event)) @@ -117,7 +210,7 @@ bool EventManager::enforceLimit() { while(it != events.rend() && filter.match(*it)) { it = decltype(it){events.erase( std::next(it).base() )}; } - + // We are not overflowing if(events.size() < eventLimit) return false; @@ -137,6 +230,6 @@ void EventManager::discardOldest(size_t count) { while(it != events.end()) { it = events.erase(it); if(--count == 0) - break; + break; } } \ No newline at end of file diff --git a/include/icsneo/api/eventmanager.h b/include/icsneo/api/eventmanager.h index 1dafe56..5d5725d 100644 --- a/include/icsneo/api/eventmanager.h +++ b/include/icsneo/api/eventmanager.h @@ -29,28 +29,10 @@ public: // If this thread is not in the map, add it to be ignored // If it is, set it to be ignored - void downgradeErrorsOnCurrentThread() { - if(destructing) - return; - std::lock_guard lk(downgradedThreadsMutex); - auto i = downgradedThreads.find(std::this_thread::get_id()); - if(i != downgradedThreads.end()) { - i->second = true; - } else { - downgradedThreads.insert({std::this_thread::get_id(), true}); - } - } + void downgradeErrorsOnCurrentThread(); // If this thread exists in the map, turn off downgrading - void cancelErrorDowngradingOnCurrentThread() { - if(destructing) - return; - std::lock_guard lk(downgradedThreadsMutex); - auto i = downgradedThreads.find(std::this_thread::get_id()); - if(i != downgradedThreads.end()) { - i->second = false; - } - } + void cancelErrorDowngradingOnCurrentThread(); bool isDowngradingErrorsOnCurrentThread() const; @@ -59,7 +41,7 @@ public: size_t eventCount(EventFilter filter = EventFilter()) const { std::lock_guard lk(eventsMutex); - return count_internal(filter); + return countInternal(filter); }; std::vector get(EventFilter filter, size_t max = 0) { return get(max, filter); } @@ -73,65 +55,23 @@ public: APIEvent getLastError(); - void add(APIEvent event) { - if(destructing) - return; - if(event.getSeverity() == APIEvent::Severity::Error) { - // if the error was added on a thread that downgrades errors (non-user thread) - std::lock_guard lk(downgradedThreadsMutex); - auto i = downgradedThreads.find(std::this_thread::get_id()); - if(i != downgradedThreads.end() && i->second) { - event.downgradeFromError(); - std::unique_lock eventsLock(eventsMutex); - add_internal_event(event); - // free the lock so that callbacks may modify events - eventsLock.unlock(); - runCallbacks(event); - } else { - std::lock_guard errorsLock(errorsMutex); - add_internal_error(event); - } - } else { - std::unique_lock eventsLock(eventsMutex); - add_internal_event(event); - // free the lock so that callbacks may modify events - eventsLock.unlock(); - runCallbacks(event); - } - } + void add(APIEvent event); void add(APIEvent::Type type, APIEvent::Severity severity, const Device* forDevice = nullptr) { add(APIEvent(type, severity, forDevice)); } void discard(EventFilter filter = EventFilter()); - void setEventLimit(size_t newLimit) { - std::lock_guard eventLimitLock(eventLimitMutex); - - if(newLimit == eventLimit) - return; - - if(newLimit < 10) { - add(APIEvent::Type::ParameterOutOfRange, APIEvent::Severity::Error); - return; - } - - eventLimit = newLimit; - - std::lock_guard eventsLock(eventsMutex); - if(enforceLimit()) - add_internal_event(APIEvent(APIEvent::Type::TooManyEvents, APIEvent::Severity::EventWarning)); - } - size_t getEventLimit() const { std::lock_guard lk(eventLimitMutex); return eventLimit; } + void setEventLimit(size_t newLimit); private: - EventManager() : eventsMutex(), errorsMutex(), downgradedThreadsMutex(), callbacksMutex(), callbackIDMutex(), eventLimitMutex(), downgradedThreads(), callbacks(), events(), lastUserErrors(), eventLimit(10000) {} - EventManager(const EventManager &other); - EventManager& operator=(const EventManager &other); + EventManager() : eventLimit(10000) {} + EventManager(const EventManager& other); // = delete (not supported everywhere) + EventManager& operator=(const EventManager& other); // = delete (not supported everywhere) // Used by functions for threadsafety mutable std::mutex eventsMutex; @@ -155,51 +95,15 @@ private: std::map lastUserErrors; size_t eventLimit = 10000; - size_t count_internal(EventFilter filter = EventFilter()) const; + size_t countInternal(EventFilter filter = EventFilter()) const; - void runCallbacks(APIEvent event) { - std::lock_guard lk(callbacksMutex); - for(auto &i : callbacks) { - i.second.callIfMatch(std::make_shared(event)); - } - } - - /** - * Places a {id, event} pair into the lastUserErrors - * If the key id already exists in the map, replace the event of that pair with the new one - */ - void add_internal_error(APIEvent event) { - std::thread::id id = std::this_thread::get_id(); - auto it = lastUserErrors.find(id); - if(it == lastUserErrors.end()) - lastUserErrors.insert({id, event}); - else - it->second = event; - } + void runCallbacks(APIEvent event); /** * If events is not full, add the event at the end * Otherwise, remove the oldest event, push the event to the back and push a APIEvent::TooManyEvents to the back (in that order) */ - void add_internal_event(APIEvent event) { - // Ensure the event list is at most exactly full (size of eventLimit - 1, leaving room for a potential APIEvent::TooManyEvents) - // Removes any events of type TooManyEvents from the end before checking to avoid duplicates. - enforceLimit(); - - // We are exactly full, either because the list was truncated or because we were simply full before - if(events.size() == eventLimit - 1) { - // If the event is worth adding - if(event.getType() != APIEvent::Type::TooManyEvents) { - discardOldest(1); - events.push_back(event); - } - - events.push_back(APIEvent(APIEvent::Type::TooManyEvents, APIEvent::Severity::EventWarning)); - } else { - if (event.getType() != APIEvent::Type::TooManyEvents) - events.push_back(event); - } - } + void addEventInternal(APIEvent event); bool enforceLimit(); // Returns whether the limit enforcement resulted in an overflow