mirror of
https://github.com/gabime/spdlog.git
synced 2025-01-15 18:22:07 +08:00
context in async factory
This commit is contained in:
parent
7f3f958f63
commit
fddf4c99d7
@ -21,6 +21,7 @@
|
|||||||
#include "./async_logger.h"
|
#include "./async_logger.h"
|
||||||
#include "./details/context.h"
|
#include "./details/context.h"
|
||||||
#include "./details/thread_pool.h"
|
#include "./details/thread_pool.h"
|
||||||
|
#include "spdlog.h"
|
||||||
|
|
||||||
namespace spdlog {
|
namespace spdlog {
|
||||||
|
|
||||||
@ -35,15 +36,14 @@ template <async_overflow_policy OverflowPolicy = async_overflow_policy::block>
|
|||||||
struct async_factory_impl {
|
struct async_factory_impl {
|
||||||
template <typename Sink, typename... SinkArgs>
|
template <typename Sink, typename... SinkArgs>
|
||||||
static std::shared_ptr<async_logger> create(std::string logger_name, SinkArgs &&...args) {
|
static std::shared_ptr<async_logger> create(std::string logger_name, SinkArgs &&...args) {
|
||||||
auto ®istry_inst = details::context::instance();
|
auto context = spdlog::context();
|
||||||
|
|
||||||
// create global thread pool if not already exists
|
// create global thread pool if not already exists
|
||||||
auto &mutex = registry_inst.tp_mutex();
|
auto &mutex = context->tp_mutex();
|
||||||
std::lock_guard<std::recursive_mutex> tp_lock(mutex);
|
std::lock_guard<std::recursive_mutex> tp_lock(mutex);
|
||||||
auto tp = registry_inst.get_tp();
|
auto tp = context->get_tp();
|
||||||
if (tp == nullptr) {
|
if (tp == nullptr) {
|
||||||
tp = std::make_shared<details::thread_pool>(details::default_async_q_size, 1U);
|
tp = std::make_shared<details::thread_pool>(details::default_async_q_size, 1U);
|
||||||
registry_inst.set_tp(tp);
|
context->set_tp(tp);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto sink = std::make_shared<Sink>(std::forward<SinkArgs>(args)...);
|
auto sink = std::make_shared<Sink>(std::forward<SinkArgs>(args)...);
|
||||||
@ -71,7 +71,7 @@ inline void init_thread_pool(size_t q_size,
|
|||||||
std::function<void()> on_thread_start,
|
std::function<void()> on_thread_start,
|
||||||
std::function<void()> on_thread_stop) {
|
std::function<void()> on_thread_stop) {
|
||||||
auto tp = std::make_shared<details::thread_pool>(q_size, thread_count, on_thread_start, on_thread_stop);
|
auto tp = std::make_shared<details::thread_pool>(q_size, thread_count, on_thread_start, on_thread_stop);
|
||||||
details::context::instance().set_tp(std::move(tp));
|
spdlog::context()->set_tp(std::move(tp));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void init_thread_pool(size_t q_size, size_t thread_count, std::function<void()> on_thread_start) {
|
inline void init_thread_pool(size_t q_size, size_t thread_count, std::function<void()> on_thread_start) {
|
||||||
@ -83,5 +83,5 @@ inline void init_thread_pool(size_t q_size, size_t thread_count) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get the global thread pool.
|
// get the global thread pool.
|
||||||
inline std::shared_ptr<spdlog::details::thread_pool> thread_pool() { return details::context::instance().get_tp(); }
|
inline std::shared_ptr<spdlog::details::thread_pool> thread_pool() { return spdlog::context()->get_tp(); }
|
||||||
} // namespace spdlog
|
} // namespace spdlog
|
||||||
|
@ -23,7 +23,6 @@ class thread_pool;
|
|||||||
|
|
||||||
class SPDLOG_API context {
|
class SPDLOG_API context {
|
||||||
public:
|
public:
|
||||||
static context &instance();
|
|
||||||
context();
|
context();
|
||||||
~context();
|
~context();
|
||||||
context(const context &) = delete;
|
context(const context &) = delete;
|
||||||
|
@ -69,10 +69,5 @@ void context::shutdown() {
|
|||||||
|
|
||||||
std::recursive_mutex &context::tp_mutex() { return tp_mutex_; }
|
std::recursive_mutex &context::tp_mutex() { return tp_mutex_; }
|
||||||
|
|
||||||
context &context::instance() {
|
|
||||||
static context s_instance;
|
|
||||||
return s_instance;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace details
|
} // namespace details
|
||||||
} // namespace spdlog
|
} // namespace spdlog
|
||||||
|
Loading…
Reference in New Issue
Block a user