Support async_overflow_policy::discard_new (#2876)

Reason for the discard_new policy: when there is an overflow, there
is usually some unexpected issue (a bug, or some other unexpected stuff).
And in case of unexpected issue, the first arrived log messages are usually
more important than subsequent ones. For example, some application
keep logging error messages in case of functionality failure, which,
when using async_overflow_policy::overrun_oldest, will overrun the
first arrived messages that may contain real reason for the failure.
This commit is contained in:
Yubin 2023-09-10 04:05:08 +08:00 committed by GitHub
parent d109e1dcd0
commit b5b5043d42
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 94 additions and 4 deletions

1
.gitignore vendored
View File

@ -72,6 +72,7 @@ install_manifest.txt
/tests/logs/* /tests/logs/*
spdlogConfig.cmake spdlogConfig.cmake
spdlogConfigVersion.cmake spdlogConfigVersion.cmake
compile_commands.json
# idea # idea
.idea/ .idea/

View File

@ -21,9 +21,10 @@ namespace spdlog {
// Async overflow policy - block by default. // Async overflow policy - block by default.
enum class async_overflow_policy enum class async_overflow_policy
{ {
block, // Block until message can be enqueued block, // Block until message can be enqueued
overrun_oldest // Discard oldest message in the queue if full when trying to overrun_oldest, // Discard oldest message in the queue if full when trying to
// add new item. // add new item.
discard_new // Discard new message if the queue is full when trying to add new item.
}; };
namespace details { namespace details {

View File

@ -12,6 +12,7 @@
#include <spdlog/details/circular_q.h> #include <spdlog/details/circular_q.h>
#include <atomic>
#include <condition_variable> #include <condition_variable>
#include <mutex> #include <mutex>
@ -49,6 +50,28 @@ public:
push_cv_.notify_one(); push_cv_.notify_one();
} }
void enqueue_if_have_room(T &&item)
{
bool pushed = false;
{
std::unique_lock<std::mutex> lock(queue_mutex_);
if (!q_.full())
{
q_.push_back(std::move(item));
pushed = true;
}
}
if (pushed)
{
push_cv_.notify_one();
}
else
{
++discard_counter_;
}
}
// dequeue with a timeout. // dequeue with a timeout.
// Return true, if succeeded dequeue item, false otherwise // Return true, if succeeded dequeue item, false otherwise
bool dequeue_for(T &popped_item, std::chrono::milliseconds wait_duration) bool dequeue_for(T &popped_item, std::chrono::milliseconds wait_duration)
@ -99,6 +122,26 @@ public:
push_cv_.notify_one(); push_cv_.notify_one();
} }
void enqueue_if_have_room(T &&item)
{
bool pushed = false;
std::unique_lock<std::mutex> lock(queue_mutex_);
if (!q_.full())
{
q_.push_back(std::move(item));
pushed = true;
}
if (pushed)
{
push_cv_.notify_one();
}
else
{
++discard_counter_;
}
}
// dequeue with a timeout. // dequeue with a timeout.
// Return true, if succeeded dequeue item, false otherwise // Return true, if succeeded dequeue item, false otherwise
bool dequeue_for(T &popped_item, std::chrono::milliseconds wait_duration) bool dequeue_for(T &popped_item, std::chrono::milliseconds wait_duration)
@ -132,6 +175,11 @@ public:
return q_.overrun_counter(); return q_.overrun_counter();
} }
size_t discard_counter()
{
return discard_counter_.load(std::memory_order_relaxed);
}
size_t size() size_t size()
{ {
std::unique_lock<std::mutex> lock(queue_mutex_); std::unique_lock<std::mutex> lock(queue_mutex_);
@ -144,11 +192,17 @@ public:
q_.reset_overrun_counter(); q_.reset_overrun_counter();
} }
void reset_discard_counter()
{
discard_counter_.store(0, std::memory_order_relaxed);
}
private: private:
std::mutex queue_mutex_; std::mutex queue_mutex_;
std::condition_variable push_cv_; std::condition_variable push_cv_;
std::condition_variable pop_cv_; std::condition_variable pop_cv_;
spdlog::details::circular_q<T> q_; spdlog::details::circular_q<T> q_;
std::atomic<size_t> discard_counter_{0};
}; };
} // namespace details } // namespace details
} // namespace spdlog } // namespace spdlog

View File

@ -80,6 +80,16 @@ void SPDLOG_INLINE thread_pool::reset_overrun_counter()
q_.reset_overrun_counter(); q_.reset_overrun_counter();
} }
size_t SPDLOG_INLINE thread_pool::discard_counter()
{
return q_.discard_counter();
}
void SPDLOG_INLINE thread_pool::reset_discard_counter()
{
q_.reset_discard_counter();
}
size_t SPDLOG_INLINE thread_pool::queue_size() size_t SPDLOG_INLINE thread_pool::queue_size()
{ {
return q_.size(); return q_.size();
@ -91,10 +101,15 @@ void SPDLOG_INLINE thread_pool::post_async_msg_(async_msg &&new_msg, async_overf
{ {
q_.enqueue(std::move(new_msg)); q_.enqueue(std::move(new_msg));
} }
else else if (overflow_policy == async_overflow_policy::overrun_oldest)
{ {
q_.enqueue_nowait(std::move(new_msg)); q_.enqueue_nowait(std::move(new_msg));
} }
else
{
assert(overflow_policy == async_overflow_policy::discard_new);
q_.enqueue_if_have_room(std::move(new_msg));
}
} }
void SPDLOG_INLINE thread_pool::worker_loop_() void SPDLOG_INLINE thread_pool::worker_loop_()

View File

@ -98,6 +98,8 @@ public:
void post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy); void post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy);
size_t overrun_counter(); size_t overrun_counter();
void reset_overrun_counter(); void reset_overrun_counter();
size_t discard_counter();
void reset_discard_counter();
size_t queue_size(); size_t queue_size();
private: private:

View File

@ -43,6 +43,23 @@ TEST_CASE("discard policy ", "[async]")
REQUIRE(tp->overrun_counter() > 0); REQUIRE(tp->overrun_counter() > 0);
} }
TEST_CASE("discard policy discard_new ", "[async]")
{
auto test_sink = std::make_shared<spdlog::sinks::test_sink_mt>();
test_sink->set_delay(std::chrono::milliseconds(1));
size_t queue_size = 4;
size_t messages = 1024;
auto tp = std::make_shared<spdlog::details::thread_pool>(queue_size, 1);
auto logger = std::make_shared<spdlog::async_logger>("as", test_sink, tp, spdlog::async_overflow_policy::discard_new);
for (size_t i = 0; i < messages; i++)
{
logger->info("Hello message");
}
REQUIRE(test_sink->msg_counter() < messages);
REQUIRE(tp->discard_counter() > 0);
}
TEST_CASE("discard policy using factory ", "[async]") TEST_CASE("discard policy using factory ", "[async]")
{ {
size_t queue_size = 4; size_t queue_size = 4;