clang format

This commit is contained in:
gabime 2018-07-10 23:53:00 +03:00
parent d409e5367b
commit 16ee72da7c
8 changed files with 200 additions and 204 deletions

View File

@ -32,7 +32,6 @@ void bench_mt(int howmany, std::shared_ptr<spdlog::logger> log, int thread_count
int main(int argc, char *argv[])
{
int howmany = 1000000;
int queue_size = howmany + 2;
int threads = 10;
@ -76,7 +75,7 @@ int main(int argc, char *argv[])
auto daily_mt = spdlog::daily_logger_mt("daily_mt", "logs/daily_mt.log");
bench_mt(howmany, daily_mt, threads);
bench_mt(howmany, spdlog::create<null_sink_mt>("null_mt"), threads);
bench_mt(howmany, spdlog::create<null_sink_mt>("null_mt"), threads);
cout << "\n*******************************************************************************\n";
cout << "async logging.. " << threads << " threads sharing same logger, " << format(howmany) << " iterations " << endl;
@ -112,7 +111,7 @@ void bench(int howmany, std::shared_ptr<spdlog::logger> log)
auto delta = high_resolution_clock::now() - start;
auto delta_d = duration_cast<duration<double>>(delta).count();
cout << "Elapsed: " << delta_d << "\t" << format(int(howmany / delta_d)) << "/sec" << endl;
spdlog::drop(log->name());
spdlog::drop(log->name());
}
void bench_mt(int howmany, std::shared_ptr<spdlog::logger> log, int thread_count)

View File

@ -29,9 +29,9 @@ using namespace utils;
void bench(int howmany, std::shared_ptr<spdlog::logger> log);
void bench_mt(int howmany, std::shared_ptr<spdlog::logger> log, int thread_count);
int main(int , char *[])
int main(int, char *[])
{
std::srand(static_cast<unsigned>(std::time(nullptr))); // use current time as seed for random generator
std::srand(static_cast<unsigned>(std::time(nullptr))); // use current time as seed for random generator
int howmany = 1000000;
int queue_size = howmany + 2;
int threads = 10;
@ -91,7 +91,6 @@ int main(int , char *[])
return EXIT_SUCCESS;
}
void bench(int howmany, std::shared_ptr<spdlog::logger> log)
{
using namespace std::chrono;
@ -106,10 +105,10 @@ void bench(int howmany, std::shared_ptr<spdlog::logger> log)
auto start = high_resolution_clock::now();
log->info("Hello logger: msg number {}", i);
auto delta_nanos = chrono::duration_cast<nanoseconds>(high_resolution_clock::now() - start);
total_nanos+= delta_nanos;
total_nanos += delta_nanos;
}
auto avg = total_nanos.count()/howmany;
auto avg = total_nanos.count() / howmany;
cout << format(avg) << " ns/call" << endl;
}
@ -131,7 +130,7 @@ void bench_mt(int howmany, std::shared_ptr<spdlog::logger> log, int thread_count
auto start = high_resolution_clock::now();
log->info("Hello logger: msg number {}", j);
auto delta_nanos = chrono::duration_cast<nanoseconds>(high_resolution_clock::now() - start);
total_nanos+= delta_nanos.count();
total_nanos += delta_nanos.count();
}
}));
}
@ -141,9 +140,6 @@ void bench_mt(int howmany, std::shared_ptr<spdlog::logger> log, int thread_count
t.join();
};
auto avg = total_nanos/howmany;
auto avg = total_nanos / howmany;
cout << format(avg) << " ns/call" << endl;
}

View File

@ -9,14 +9,14 @@ namespace spdlog {
namespace details {
namespace fmt_helper {
template <size_t Buffer_Size>
template<size_t Buffer_Size>
inline void append_str(const std::string &str, fmt::basic_memory_buffer<char, Buffer_Size> &dest)
{
auto *str_ptr = str.data();
dest.append(str_ptr, str_ptr + str.size());
}
template <size_t Buffer_Size>
template<size_t Buffer_Size>
inline void append_c_str(const char *c_str, fmt::basic_memory_buffer<char, Buffer_Size> &dest)
{
char ch;
@ -41,7 +41,7 @@ inline void append_int(T n, fmt::basic_memory_buffer<char, Buffer_Size> &dest)
dest.append(i.data(), i.data() + i.size());
}
template <size_t Buffer_Size>
template<size_t Buffer_Size>
inline void pad2(int n, fmt::basic_memory_buffer<char, Buffer_Size> &dest)
{
if (n > 99)
@ -65,7 +65,7 @@ inline void pad2(int n, fmt::basic_memory_buffer<char, Buffer_Size> &dest)
fmt::format_to(dest, "{:02}", n);
}
template <size_t Buffer_Size>
template<size_t Buffer_Size>
inline void pad3(int n, fmt::basic_memory_buffer<char, Buffer_Size> &dest)
{
if (n > 99)
@ -91,7 +91,7 @@ inline void pad3(int n, fmt::basic_memory_buffer<char, Buffer_Size> &dest)
fmt::format_to(dest, "{:03}", n);
}
template <size_t Buffer_Size>
template<size_t Buffer_Size>
inline void pad6(size_t n, fmt::basic_memory_buffer<char, Buffer_Size> &dest)
{
// todo: maybe replace this implementation with

View File

@ -175,23 +175,23 @@ inline void spdlog::logger::critical(const T &msg)
#ifdef SPDLOG_WCHAR_TO_UTF8_SUPPORT
template<typename... Args>
inline void spdlog::logger::log(level::level_enum lvl, const wchar_t *fmt, const Args &... args)
{
if (!should_log(lvl))
{
return;
}
decltype(wstring_converter_)::byte_string utf8_string;
{
if (!should_log(lvl))
{
return;
}
try
{
{
std::lock_guard<std::mutex> lock(wstring_converter_mutex_);
utf8_string = wstring_converter_.to_bytes(fmt);
}
log(lvl, utf8_string.c_str(), args...);
}
SPDLOG_CATCH_AND_HANDLE
decltype(wstring_converter_)::byte_string utf8_string;
try
{
{
std::lock_guard<std::mutex> lock(wstring_converter_mutex_);
utf8_string = wstring_converter_.to_bytes(fmt);
}
log(lvl, utf8_string.c_str(), args...);
}
SPDLOG_CATCH_AND_HANDLE
}
template<typename... Args>

View File

@ -120,7 +120,7 @@ class c_formatter SPDLOG_FINAL : public flag_formatter
{
void format(const details::log_msg &, const std::tm &tm_time, fmt::memory_buffer &dest) override
{
//fmt::format_to(dest, "{} {} {} ", days[tm_time.tm_wday], months[tm_time.tm_mon], tm_time.tm_mday);
// fmt::format_to(dest, "{} {} {} ", days[tm_time.tm_wday], months[tm_time.tm_mon], tm_time.tm_mday);
// date
fmt_helper::append_str(days[tm_time.tm_wday], dest);
dest.push_back(' ');
@ -496,7 +496,7 @@ class full_formatter SPDLOG_FINAL : public flag_formatter
// cache the millis part for the next milli.
auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() % 1000;
if(millis != millis_cache_timestamp_ || cached_millis_.size() == 0)
if (millis != millis_cache_timestamp_ || cached_millis_.size() == 0)
{
cached_millis_.resize(0);
fmt_helper::pad3(static_cast<int>(millis), cached_millis_);
@ -528,8 +528,8 @@ class full_formatter SPDLOG_FINAL : public flag_formatter
}
private:
std::chrono::seconds cache_timestamp_ {0};
std::chrono::milliseconds::rep millis_cache_timestamp_ {0};
std::chrono::seconds cache_timestamp_{0};
std::chrono::milliseconds::rep millis_cache_timestamp_{0};
fmt::basic_memory_buffer<char, 128> cached_datetime_;
fmt::basic_memory_buffer<char, 8> cached_millis_;
};

View File

@ -10,37 +10,39 @@
#include <vector>
namespace spdlog {
namespace details {
namespace details {
using async_logger_ptr = std::shared_ptr<spdlog::async_logger>;
using async_logger_ptr = std::shared_ptr<spdlog::async_logger>;
enum class async_msg_type
{
log,
flush,
terminate
};
enum class async_msg_type
{
log,
flush,
terminate
};
// Async msg to move to/from the queue
// Movable only. should never be copied
struct async_msg
{
async_msg_type msg_type;
level::level_enum level;
log_clock::time_point time;
size_t thread_id;
fmt::basic_memory_buffer<char, 176> raw;
struct async_msg
{
async_msg_type msg_type;
level::level_enum level;
log_clock::time_point time;
size_t thread_id;
fmt::basic_memory_buffer<char, 176> raw;
size_t msg_id;
async_logger_ptr worker_ptr;
size_t msg_id;
async_logger_ptr worker_ptr;
async_msg() = default;
~async_msg() = default;
async_msg() = default;
~async_msg() = default;
// should only be moved in or out of the queue..
async_msg(const async_msg &) = delete;
#if defined(_MSC_VER) && _MSC_VER <= 1800 // support for vs2013 move
async_msg(async_msg &&other) SPDLOG_NOEXCEPT : msg_type(other.msg_type),
// should only be moved in or out of the queue..
async_msg(const async_msg &) = delete;
// support for vs2013 move
#if defined(_MSC_VER) && _MSC_VER <= 1800
async_msg(async_msg &&other) SPDLOG_NOEXCEPT : msg_type(other.msg_type),
level(other.level),
time(other.time),
thread_id(other.thread_id),
@ -59,161 +61,159 @@ namespace spdlog {
raw = std::move(other.raw);
msg_id = other.msg_id;
worker_ptr = std::move(other.worker_ptr);
return *this;
return *this;
}
#else
async_msg(async_msg &&other) = default;
async_msg &operator=(async_msg &&other) = default;
#else // (_MSC_VER) && _MSC_VER <= 1800
async_msg(async_msg &&other) = default;
async_msg &operator=(async_msg &&other) = default;
#endif
// construct from log_msg with given type
async_msg(async_logger_ptr &&worker, async_msg_type the_type, details::log_msg &&m)
: msg_type(the_type)
, level(m.level)
, time(m.time)
, thread_id(m.thread_id)
, msg_id(m.msg_id)
, worker_ptr(std::forward<async_logger_ptr>(worker))
{
fmt_helper::append_buf(m.raw, raw);
}
// construct from log_msg with given type
async_msg(async_logger_ptr &&worker, async_msg_type the_type, details::log_msg &&m)
: msg_type(the_type)
, level(m.level)
, time(m.time)
, thread_id(m.thread_id)
, msg_id(m.msg_id)
, worker_ptr(std::forward<async_logger_ptr>(worker))
{
fmt_helper::append_buf(m.raw, raw);
}
async_msg(async_logger_ptr &&worker, async_msg_type the_type)
: async_msg(std::forward<async_logger_ptr>(worker), the_type, details::log_msg())
{
}
async_msg(async_logger_ptr &&worker, async_msg_type the_type)
: async_msg(std::forward<async_logger_ptr>(worker), the_type, details::log_msg())
{
}
async_msg(async_msg_type the_type)
: async_msg(nullptr, the_type, details::log_msg())
{
}
async_msg(async_msg_type the_type)
: async_msg(nullptr, the_type, details::log_msg())
{
}
// copy into log_msg
void to_log_msg(log_msg &msg)
{
msg.logger_name = &worker_ptr->name();
msg.level = level;
msg.time = time;
msg.thread_id = thread_id;
fmt_helper::append_buf(raw, msg.raw);
msg.msg_id = msg_id;
msg.color_range_start = 0;
msg.color_range_end = 0;
}
};
// copy into log_msg
void to_log_msg(log_msg &msg)
{
msg.logger_name = &worker_ptr->name();
msg.level = level;
msg.time = time;
msg.thread_id = thread_id;
fmt_helper::append_buf(raw, msg.raw);
msg.msg_id = msg_id;
msg.color_range_start = 0;
msg.color_range_end = 0;
}
};
class thread_pool
class thread_pool
{
public:
using item_type = async_msg;
using q_type = details::mpmc_blocking_queue<item_type>;
using clock_type = std::chrono::steady_clock;
thread_pool(size_t q_max_items, size_t threads_n)
: q_(q_max_items)
{
// std::cout << "thread_pool() q_size_bytes: " << q_size_bytes << "\tthreads_n: " << threads_n << std::endl;
if (threads_n == 0 || threads_n > 1000)
{
public:
using item_type = async_msg;
using q_type = details::mpmc_blocking_queue<item_type>;
using clock_type = std::chrono::steady_clock;
throw spdlog_ex("spdlog::thread_pool(): invalid threads_n param (valid range is 1-1000)");
}
for (size_t i = 0; i < threads_n; i++)
{
threads_.emplace_back(std::bind(&thread_pool::worker_loop_, this));
}
}
thread_pool(size_t q_max_items, size_t threads_n)
: q_(q_max_items)
// message all threads to terminate gracefully join them
~thread_pool()
{
try
{
for (size_t i = 0; i < threads_.size(); i++)
{
// std::cout << "thread_pool() q_size_bytes: " << q_size_bytes << "\tthreads_n: " << threads_n << std::endl;
if (threads_n == 0 || threads_n > 1000)
{
throw spdlog_ex("spdlog::thread_pool(): invalid threads_n param (valid range is 1-1000)");
}
for (size_t i = 0; i < threads_n; i++)
{
threads_.emplace_back(std::bind(&thread_pool::worker_loop_, this));
}
post_async_msg_(async_msg(async_msg_type::terminate), async_overflow_policy::block);
}
// message all threads to terminate gracefully join them
~thread_pool()
for (auto &t : threads_)
{
try
{
for (size_t i = 0; i < threads_.size(); i++)
{
post_async_msg_(async_msg(async_msg_type::terminate), async_overflow_policy::block);
}
for (auto &t : threads_)
{
t.join();
}
}
catch (...)
{
}
t.join();
}
}
catch (...)
{
}
}
void post_log(async_logger_ptr &&worker_ptr, details::log_msg &&msg, async_overflow_policy overflow_policy)
{
async_msg async_m(std::forward<async_logger_ptr>(worker_ptr), async_msg_type::log, std::forward<log_msg>(msg));
post_async_msg_(std::move(async_m), overflow_policy);
}
void post_log(async_logger_ptr &&worker_ptr, details::log_msg &&msg, async_overflow_policy overflow_policy)
{
async_msg async_m(std::forward<async_logger_ptr>(worker_ptr), async_msg_type::log, std::forward<log_msg>(msg));
post_async_msg_(std::move(async_m), overflow_policy);
}
void post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy)
{
post_async_msg_(async_msg(std::move(worker_ptr), async_msg_type::flush), overflow_policy);
}
void post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy)
{
post_async_msg_(async_msg(std::move(worker_ptr), async_msg_type::flush), overflow_policy);
}
private:
q_type q_;
private:
q_type q_;
std::vector<std::thread> threads_;
std::vector<std::thread> threads_;
void post_async_msg_(async_msg &&new_msg, async_overflow_policy overflow_policy)
{
if (overflow_policy == async_overflow_policy::block)
{
q_.enqueue(std::move(new_msg));
}
else
{
q_.enqueue_nowait(std::move(new_msg));
}
}
void post_async_msg_(async_msg &&new_msg, async_overflow_policy overflow_policy)
{
if (overflow_policy == async_overflow_policy::block)
{
q_.enqueue(std::move(new_msg));
}
else
{
q_.enqueue_nowait(std::move(new_msg));
}
}
void worker_loop_()
{
while (process_next_msg_())
{
};
}
void worker_loop_()
{
while (process_next_msg_()) {};
}
// process next message in the queue
// return true if this thread should still be active (while no terminate msg was received)
bool process_next_msg_()
{
async_msg incoming_async_msg;
bool dequeued = q_.dequeue_for(incoming_async_msg, std::chrono::seconds(10));
if (!dequeued)
{
return true;
}
// process next message in the queue
// return true if this thread should still be active (while no terminate msg was received)
bool process_next_msg_()
{
async_msg incoming_async_msg;
bool dequeued = q_.dequeue_for(incoming_async_msg, std::chrono::seconds(10));
if (!dequeued)
{
return true;
}
switch (incoming_async_msg.msg_type)
{
case async_msg_type::flush:
{
incoming_async_msg.worker_ptr->backend_flush_();
return true;
}
switch (incoming_async_msg.msg_type)
{
case async_msg_type::flush:
{
incoming_async_msg.worker_ptr->backend_flush_();
return true;
}
case async_msg_type::terminate:
{
return false;
}
case async_msg_type::terminate:
{
return false;
}
default:
{
log_msg msg;
incoming_async_msg.to_log_msg(msg);
incoming_async_msg.worker_ptr->backend_log_(msg);
return true;
}
}
assert(false);
return true; // should not be reached
}
};
default:
{
log_msg msg;
incoming_async_msg.to_log_msg(msg);
incoming_async_msg.worker_ptr->backend_log_(msg);
return true;
}
}
assert(false);
return true; // should not be reached
}
};
} // namespace details
} // namespace details
} // namespace spdlog

View File

@ -63,7 +63,7 @@ public:
template<typename... Args>
void critical(const char *fmt, const Args &... args);
#ifdef SPDLOG_WCHAR_TO_UTF8_SUPPORT
#ifdef SPDLOG_WCHAR_TO_UTF8_SUPPORT
template<typename... Args>
void log(level::level_enum lvl, const wchar_t *fmt, const Args &... args);
@ -151,8 +151,8 @@ protected:
std::atomic<size_t> msg_counter_;
#ifdef SPDLOG_WCHAR_TO_UTF8_SUPPORT
std::wstring_convert<std::codecvt_utf8<wchar_t>> wstring_converter_;
std::mutex wstring_converter_mutex_;
std::wstring_convert<std::codecvt_utf8<wchar_t>> wstring_converter_;
std::mutex wstring_converter_mutex_;
#endif
};
} // namespace spdlog

View File

@ -95,14 +95,15 @@ private:
}
if (details::file_helper::file_exists(src) && details::os::rename(src, target) != 0)
{
// if failed try again after small delay.
// this is a workaround to a windows issue, where very high rotation rates sometimes fail (because of antivirus?).
details::os::sleep_for_millis(20);
details::os::remove(target);
if (details::os::rename(src, target) != 0)
{
throw spdlog_ex("rotating_file_sink: failed renaming " + filename_to_str(src) + " to " + filename_to_str(target), errno);
}
// if failed try again after small delay.
// this is a workaround to a windows issue, where very high rotation rates sometimes fail (because of antivirus?).
details::os::sleep_for_millis(20);
details::os::remove(target);
if (details::os::rename(src, target) != 0)
{
throw spdlog_ex(
"rotating_file_sink: failed renaming " + filename_to_str(src) + " to " + filename_to_str(target), errno);
}
}
}
file_helper_.reopen(true);