diff --git a/test/test-addrinfo-wp.cc b/test/test-addrinfo-wp.cc new file mode 100644 index 0000000..6dcd854 --- /dev/null +++ b/test/test-addrinfo-wp.cc @@ -0,0 +1,104 @@ +#include "../include/nsuv-inl.h" +#include "./catch.hpp" +#include "./helpers.h" + +using nsuv::ns_addrinfo; + +static const char* invalid_name = "xyzzy.xyzzy.xyzzy."; +static const char* valid_name = "localhost"; + +static std::string* my_data_ptr = nullptr; + +static void gettaddrinfo_failure_cb(ns_addrinfo* info, + int status, + std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + ASSERT_GT(0, status); + ASSERT_NULL(info->info()); +} + +TEST_CASE("invalid_get_async_wp", "[addrinfo]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + ns_addrinfo info; + + ASSERT_EQ(0, info.get(uv_default_loop(), + gettaddrinfo_failure_cb, + invalid_name, + nullptr, + nullptr, + data)); + + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + make_valgrind_happy(); +} + +static void gettaddrinfo_success_cb(ns_addrinfo* info, + int status, + std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + ASSERT_EQ(0, status); + ASSERT(info->info()); +} + +TEST_CASE("valid_get_async_wp", "[addrinfo]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + ns_addrinfo info; + + ASSERT_EQ(0, info.get(uv_default_loop(), + gettaddrinfo_success_cb, + valid_name, + nullptr, + nullptr, + data)); + + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + make_valgrind_happy(); +} + +static void gettaddrinfo_void_data_cb(ns_addrinfo* info, + int status, + std::weak_ptr d) { + auto data = d.lock(); + ASSERT_EQ(0, status); + ASSERT(info->info()); + ASSERT(data); + ASSERT_EQ(data.get(), my_data_ptr); +} + +TEST_CASE("valid_get_async_void_data_wp", "[addrinfo]") { + auto my_data = std::make_shared("my_data"); + std::weak_ptr wp = my_data; + ns_addrinfo info; + struct addrinfo hints; + + my_data_ptr = my_data.get(); + + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */ + hints.ai_socktype = SOCK_DGRAM; /* Datagram socket */ + hints.ai_flags = AI_PASSIVE; /* For wildcard IP address */ + hints.ai_protocol = 0; /* Any protocol */ + hints.ai_canonname = nullptr; + hints.ai_addr = nullptr; + hints.ai_next = nullptr; + + ASSERT_EQ(0, info.get(uv_default_loop(), + gettaddrinfo_void_data_cb, + valid_name, + nullptr, + &hints, + wp)); + + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + my_data_ptr = nullptr; + make_valgrind_happy(); +} diff --git a/test/test-async-wp.cc b/test/test-async-wp.cc new file mode 100644 index 0000000..c01670a --- /dev/null +++ b/test/test-async-wp.cc @@ -0,0 +1,93 @@ +#include "../include/nsuv-inl.h" +#include "./catch.hpp" +#include "./helpers.h" + +#include + +using nsuv::ns_thread; +using nsuv::ns_async; +using nsuv::ns_mutex; +using nsuv::ns_prepare; + +struct resources { + ns_thread* thread; + ns_async* async; + ns_mutex* mutex; + ns_prepare* prepare; +}; + +static std::atomic async_cb_called; +static int prepare_cb_called; +static int close_cb_called; + + +static void thread_cb(ns_thread*, std::weak_ptr wp) { + auto res = wp.lock(); + ASSERT(res); + for (;;) { + res->mutex->lock(); + res->mutex->unlock(); + if (async_cb_called == 3) { + break; + } + CHECK(0 == res->async->send()); + uv_sleep(0); + } +} + + +template +static void close_cb(H_T* handle) { + CHECK(handle != nullptr); + close_cb_called++; +} + + +static void async_cb(ns_async* handle, std::weak_ptr wp) { + auto res = wp.lock(); + ASSERT(res); + CHECK(handle == res->async); + res->mutex->lock(); + res->mutex->unlock(); + if (++async_cb_called == 3) { + res->async->close(close_cb); + res->prepare->close(close_cb); + } +} + + +static void prepare_cb(ns_prepare* handle, std::weak_ptr wp) { + auto res = wp.lock(); + ASSERT(res); + if (prepare_cb_called++) + return; + CHECK(handle == res->prepare); + CHECK(0 == res->thread->create(thread_cb, wp)); + res->mutex->unlock(); +} + + +TEST_CASE("async_operations_wp", "[async]") { + ns_thread thread; + ns_async async; + ns_mutex mutex; + ns_prepare prepare; + std::shared_ptr sp( + new resources{ &thread, &async, &mutex, &prepare }); + std::weak_ptr res = sp; + + ASSERT_EQ(0, prepare.init(uv_default_loop())); + ASSERT_EQ(0, prepare.start(prepare_cb, res)); + ASSERT_EQ(0, async.init(uv_default_loop(), async_cb, res)); + ASSERT_EQ(0, mutex.init()); + + mutex.lock(); + + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + ASSERT_LT(0, prepare_cb_called); + ASSERT_EQ(3, async_cb_called); + ASSERT_EQ(2, close_cb_called); + ASSERT_EQ(0, thread.join()); + + make_valgrind_happy(); +} diff --git a/test/test-tcp-close-reset-wp.cc b/test/test-tcp-close-reset-wp.cc new file mode 100644 index 0000000..9957b8b --- /dev/null +++ b/test/test-tcp-close-reset-wp.cc @@ -0,0 +1,354 @@ +#include "../include/nsuv-inl.h" +#include "./helpers.h" + +using nsuv::ns_connect; +using nsuv::ns_tcp; +using nsuv::ns_write; + +static uv_loop_t* loop; +static ns_tcp tcp_server; +static ns_tcp tcp_client; +static ns_tcp tcp_accepted; +static ns_connect connect_req; +static uv_shutdown_t shutdown_req; +static ns_write write_reqs[4]; + +static int client_close; +static int shutdown_before_close; + +static int write_cb_called; +static int close_cb_called; +static int shutdown_cb_called; + +static void connect_cb(ns_connect*, int, std::weak_ptr); +static void write_cb(ns_write*, int, std::weak_ptr); +static void close_cb(ns_tcp*, std::weak_ptr); +static void shutdown_cb(uv_shutdown_t*, int); + +static int read_size; + +static char ping_cstr[] = "PING"; + +static void zero_global_values() { + client_close = 0; + shutdown_before_close = 0; + write_cb_called = 0; + close_cb_called = 0; + shutdown_cb_called = 0; + read_size = 0; +} + + +static void do_write(ns_tcp* handle, std::weak_ptr d) { + auto sp = d.lock(); + uv_buf_t buf; + unsigned i; + int r; + + ASSERT(sp); + ASSERT_EQ(42, *sp); + + buf = uv_buf_init(ping_cstr, 4); + for (i = 0; i < ARRAY_SIZE(write_reqs); i++) { + r = handle->write(&write_reqs[i], &buf, 1, write_cb, d); + ASSERT(r == 0); + } +} + + +static void do_close(ns_tcp* handle, std::weak_ptr d) { + auto sp = d.lock(); + + ASSERT(sp); + ASSERT_EQ(42, *sp); + + if (shutdown_before_close == 1) { + ASSERT(0 == uv_shutdown( + &shutdown_req, handle->base_stream(), shutdown_cb)); + ASSERT(UV_EINVAL == handle->close_reset(close_cb, d)); + } else { + ASSERT(0 == handle->close_reset(close_cb, d)); + ASSERT(UV_ENOTCONN == + uv_shutdown(&shutdown_req, handle->base_stream(), shutdown_cb)); + } + + tcp_server.close(); +} + +static void alloc_cb(ns_tcp*, size_t, uv_buf_t* buf, std::weak_ptr d) { + auto sp = d.lock(); + static char slab[1024]; + + ASSERT(sp); + ASSERT_EQ(42, *sp); + + buf->base = slab; + buf->len = sizeof(slab); +} + +static void read_cb2(ns_tcp* stream, + ssize_t nread, + const uv_buf_t*, + std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + + ASSERT(stream == &tcp_client); + if (nread == UV_EOF) + stream->close(); +} + + +static void connect_cb(ns_connect* conn_req, + int, + std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + + ASSERT(conn_req == &connect_req); + ASSERT(0 == tcp_client.read_start(alloc_cb, read_cb2, d)); + do_write(&tcp_client, d); + if (client_close) + do_close(&tcp_client, d); +} + + +static void write_cb(ns_write* req, int, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + + /* write callbacks should run before the close callback */ + ASSERT(close_cb_called == 0); + ASSERT(req->handle() == &tcp_client); + write_cb_called++; +} + + +static void close_cb(ns_tcp* handle, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + + if (client_close) + ASSERT(handle == &tcp_client); + else + ASSERT(handle == &tcp_accepted); + + close_cb_called++; +} + + +static void shutdown_cb(uv_shutdown_t* req, int) { + if (client_close) + ASSERT(req->handle == tcp_client.base_stream()); + else + ASSERT(req->handle == tcp_accepted.base_stream()); + + shutdown_cb_called++; +} + + +static void read_cb(ns_tcp* stream, + ssize_t nread, + const uv_buf_t*, + std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + + ASSERT(stream == &tcp_accepted); + if (nread < 0) { + stream->close(); + } else { + read_size += nread; + if (read_size == 16 && client_close == 0) + do_close(&tcp_accepted, d); + } +} + + +static void connection_cb(ns_tcp* server, int status, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + + ASSERT(status == 0); + + ASSERT(0 == tcp_accepted.init(loop)); + ASSERT(0 == server->accept(&tcp_accepted)); + + ASSERT(0 == tcp_accepted.read_start(alloc_cb, read_cb, d)); +} + + +static void start_server(uv_loop_t* loop, + ns_tcp* handle, + std::weak_ptr d) { + auto sp = d.lock(); + struct sockaddr_in addr; + int r; + + ASSERT(sp); + ASSERT_EQ(42, *sp); + + ASSERT(0 == uv_ip4_addr("127.0.0.1", kTestPort, &addr)); + + r = handle->init(loop); + ASSERT(r == 0); + + r = handle->bind(SOCKADDR_CONST_CAST(&addr), 0); + ASSERT(r == 0); + + r = handle->listen(128, connection_cb, d); + ASSERT(r == 0); +} + + +static void do_connect(uv_loop_t* loop, + ns_tcp* tcp_client, + std::weak_ptr d) { + auto sp = d.lock(); + struct sockaddr_in addr; + int r; + + ASSERT(sp); + ASSERT_EQ(42, *sp); + + ASSERT(0 == uv_ip4_addr("127.0.0.1", kTestPort, &addr)); + + r = tcp_client->init(loop); + ASSERT(r == 0); + + r = tcp_client->connect(&connect_req, + SOCKADDR_CONST_CAST(&addr), + connect_cb, + d); + ASSERT(r == 0); +} + + +/* Check that pending write requests have their callbacks + * invoked when the handle is closed. + */ +TEST_CASE("tcp_close_reset_client_wp", "[tcp]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + int r; + + zero_global_values(); + + loop = uv_default_loop(); + + start_server(loop, &tcp_server, data); + + client_close = 1; + shutdown_before_close = 0; + + do_connect(loop, &tcp_client, data); + + ASSERT(write_cb_called == 0); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 0); + + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT(r == 0); + + ASSERT(write_cb_called == 4); + ASSERT(close_cb_called == 1); + ASSERT(shutdown_cb_called == 0); + + make_valgrind_happy(); +} + +TEST_CASE("tcp_close_reset_client_after_shutdown_wp", "[tcp]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + int r; + + zero_global_values(); + + loop = uv_default_loop(); + + start_server(loop, &tcp_server, data); + + client_close = 1; + shutdown_before_close = 1; + + do_connect(loop, &tcp_client, data); + + ASSERT(write_cb_called == 0); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 0); + + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT(r == 0); + + ASSERT(write_cb_called == 4); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 1); + + make_valgrind_happy(); +} + +TEST_CASE("tcp_close_reset_accepted_wp", "[tcp]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + int r; + + zero_global_values(); + + loop = uv_default_loop(); + + start_server(loop, &tcp_server, data); + + client_close = 0; + shutdown_before_close = 0; + + do_connect(loop, &tcp_client, data); + + ASSERT(write_cb_called == 0); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 0); + + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT(r == 0); + + ASSERT(write_cb_called == 4); + ASSERT(close_cb_called == 1); + ASSERT(shutdown_cb_called == 0); + + make_valgrind_happy(); +} + +TEST_CASE("tcp_close_reset_accepted_after_shutdown_wp", "[tcp]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + int r; + + zero_global_values(); + + loop = uv_default_loop(); + + start_server(loop, &tcp_server, data); + + client_close = 0; + shutdown_before_close = 1; + + do_connect(loop, &tcp_client, data); + + ASSERT(write_cb_called == 0); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 0); + + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT(r == 0); + + ASSERT(write_cb_called == 4); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 1); + + make_valgrind_happy(); +} diff --git a/test/test-tcp-close-while-connecting-wp.cc b/test/test-tcp-close-while-connecting-wp.cc new file mode 100644 index 0000000..d41a200 --- /dev/null +++ b/test/test-tcp-close-while-connecting-wp.cc @@ -0,0 +1,99 @@ +#include "../include/nsuv-inl.h" +#include "./helpers.h" + +using nsuv::ns_connect; +using nsuv::ns_tcp; +using nsuv::ns_timer; + +static ns_timer timer1_handle; +static ns_timer timer2_handle; +static ns_tcp tcp_handle; + +static int connect_cb_called; +static int timer1_cb_called; +static int close_cb_called; +static int netunreach_errors; + + +static void close_cb(ns_timer*, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + close_cb_called++; +} + + +static void close_cb(ns_tcp*, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + close_cb_called++; +} + + +static void connect_cb(ns_connect*, + int status, + std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + /* The expected error is UV_ECANCELED but the test tries to connect to what + * is basically an arbitrary address in the expectation that no network path + * exists, so UV_ENETUNREACH is an equally plausible outcome. + */ + ASSERT((status == UV_ECANCELED || status == UV_ENETUNREACH)); + uv_timer_stop(&timer2_handle); + connect_cb_called++; + if (status == UV_ENETUNREACH) + netunreach_errors++; +} + + +static void timer1_cb(ns_timer* handle, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + handle->close(close_cb, d); + tcp_handle.close(close_cb, d); + timer1_cb_called++; +} + + +static void timer2_cb(ns_timer*, std::weak_ptr) { + FAIL("should not be called"); +} + + +TEST_CASE("tcp_close_while_connecting_wp", "[tcp]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + ns_connect connect_req; + struct sockaddr_in addr; + uv_loop_t* loop; + int r; + + loop = uv_default_loop(); + ASSERT(0 == uv_ip4_addr("1.2.3.4", kTestPort, &addr)); + ASSERT(0 == tcp_handle.init(loop)); + r = tcp_handle.connect(&connect_req, + SOCKADDR_CONST_CAST(&addr), + connect_cb, + data); + if (r == UV_ENETUNREACH) + RETURN_SKIP("Network unreachable."); + ASSERT(r == 0); + ASSERT(0 == timer1_handle.init(loop)); + ASSERT(0 == timer1_handle.start(timer1_cb, 1, 0, data)); + ASSERT(0 == timer2_handle.init(loop)); + ASSERT(0 == timer2_handle.start(timer2_cb, 86400 * 1000, 0, data)); + ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); + + ASSERT(connect_cb_called == 1); + ASSERT(timer1_cb_called == 1); + ASSERT(close_cb_called == 2); + + make_valgrind_happy(); + + if (netunreach_errors > 0) + RETURN_SKIP("Network unreachable."); +} diff --git a/test/test-tcp-close-wp.cc b/test/test-tcp-close-wp.cc new file mode 100644 index 0000000..fc900d3 --- /dev/null +++ b/test/test-tcp-close-wp.cc @@ -0,0 +1,129 @@ +#include "../include/nsuv-inl.h" +#include "./helpers.h" + +using nsuv::ns_connect; +using nsuv::ns_tcp; +using nsuv::ns_write; + +constexpr size_t num_write_reqs = 32; + +static ns_tcp tcp_handle; +static ns_connect connect_req; + +static int write_cb_called; +static int close_cb_called; + +static void connect_cb(ns_connect*, int, std::weak_ptr); +static void write_cb(ns_write*, int, std::weak_ptr); +static void close_cb(ns_tcp*, std::weak_ptr); + + +static void connect_cb(ns_connect*, + int status, + std::weak_ptr d) { + ns_write* req; + auto sp = d.lock(); + + ASSERT(sp); + ASSERT_EQ(42, *sp); + + ASSERT_EQ(0, status); + for (size_t i = 0; i < num_write_reqs; i++) { + char* ping = new char[4](); // "PING" + uv_buf_t buf = uv_buf_init(ping, 4); + req = new ns_write(); + ASSERT_NOT_NULL(req); + ASSERT_EQ(0, tcp_handle.write(req, &buf, 1, write_cb, d)); + } + + tcp_handle.close(close_cb, d); +} + + +static void write_cb(ns_write* req, + int status, + std::weak_ptr d) { + auto sp = d.lock(); + const uv_buf_t* bufs; + size_t nbufs; + ASSERT(sp); + ASSERT_EQ(42, *sp); + /* write callbacks should run before the close callback */ + ASSERT_EQ(0, status); + ASSERT_EQ(0, close_cb_called); + ASSERT_EQ(req->handle(), &tcp_handle); + write_cb_called++; + bufs = req->bufs(); + nbufs = req->size(); + for (size_t i = 0; i < nbufs; i++) { + delete[] bufs[i].base; + } + delete req; +} + + +static void close_cb(ns_tcp* handle, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + ASSERT_EQ(handle, &tcp_handle); + close_cb_called++; +} + + +static void connection_cb(ns_tcp*, int status, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + ASSERT_EQ(0, status); +} + + +static void start_server(uv_loop_t* loop, + ns_tcp* handle, + std::weak_ptr d) { + auto sp = d.lock(); + struct sockaddr_in addr; + ASSERT(sp); + ASSERT_EQ(42, *sp); + ASSERT_EQ(0, uv_ip4_addr("127.0.0.1", kTestPort, &addr)); + ASSERT_EQ(0, handle->init(loop)); + ASSERT_EQ(0, + handle->bind(reinterpret_cast(&addr), 0)); + ASSERT_EQ(0, handle->listen(128, connection_cb, d)); + handle->unref(); +} + + +/* Check that pending write requests have their callbacks + * invoked when the handle is closed. + */ +TEST_CASE("tcp_close_wp", "[tcp]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + struct sockaddr_in addr; + ns_tcp tcp_server; + + ASSERT_EQ(0, uv_ip4_addr("127.0.0.1", kTestPort, &addr)); + + /* We can't use the echo server, it doesn't handle ECONNRESET. */ + start_server(uv_default_loop(), &tcp_server, data); + + ASSERT_EQ(0, tcp_handle.init(uv_default_loop())); + ASSERT_EQ(0, tcp_handle.connect( + &connect_req, + reinterpret_cast(&addr), + connect_cb, + data)); + ASSERT_EQ(0, write_cb_called); + ASSERT_EQ(0, close_cb_called); + + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + INFO("" << write_cb_called << " of " << num_write_reqs << "write reqs seen"); + + ASSERT_EQ(write_cb_called, num_write_reqs); + ASSERT_EQ(1, close_cb_called); + + make_valgrind_happy(); +} diff --git a/test/test-tcp-close.cc b/test/test-tcp-close.cc index aaae119..8291da3 100644 --- a/test/test-tcp-close.cc +++ b/test/test-tcp-close.cc @@ -8,6 +8,7 @@ using nsuv::ns_write; constexpr size_t num_write_reqs = 32; static ns_tcp tcp_handle; +static ns_connect connect_req; static int write_cb_called; static int close_cb_called; @@ -15,11 +16,6 @@ static int close_cb_called; static void connect_cb(ns_connect* req, int status); static void write_cb(ns_write* req, int status); static void close_cb(ns_tcp* handle); -static void connect_cb_wp(ns_connect* req, - int status, - std::weak_ptr); -static void write_cb_wp(ns_write* req, int status, std::weak_ptr); -static void close_cb_wp(ns_tcp* handle, std::weak_ptr); static void connect_cb(ns_connect*, int status) { @@ -81,13 +77,9 @@ static void start_server(uv_loop_t* loop, ns_tcp* handle) { * invoked when the handle is closed. */ TEST_CASE("tcp_close", "[tcp]") { - ns_connect connect_req; struct sockaddr_in addr; ns_tcp tcp_server; - write_cb_called = 0; - close_cb_called = 0; - ASSERT_EQ(0, uv_ip4_addr("127.0.0.1", kTestPort, &addr)); /* We can't use the echo server, it doesn't handle ECONNRESET. */ @@ -110,115 +102,3 @@ TEST_CASE("tcp_close", "[tcp]") { make_valgrind_happy(); } - - - -static void connect_cb_wp(ns_connect*, - int status, - std::weak_ptr data) { - auto sp = data.lock(); - ns_write* req; - - ASSERT(sp); - ASSERT_EQ(42, *sp); - ASSERT_EQ(0, status); - for (size_t i = 0; i < num_write_reqs; i++) { - char* ping = new char[4](); // "PING" - uv_buf_t buf = uv_buf_init(ping, 4); - req = new ns_write(); - ASSERT_NOT_NULL(req); - ASSERT_EQ(0, tcp_handle.write(req, &buf, 1, write_cb_wp, data)); - } - - tcp_handle.close(close_cb_wp, data); -} - - -static void write_cb_wp(ns_write* req, - int status, - std::weak_ptr data) { - auto sp = data.lock(); - const uv_buf_t* bufs; - size_t nbufs; - ASSERT(sp); - ASSERT_EQ(42, *sp); - /* write callbacks should run before the close callback */ - ASSERT_EQ(0, status); - ASSERT_EQ(0, close_cb_called); - ASSERT_EQ(req->handle(), &tcp_handle); - write_cb_called++; - bufs = req->bufs(); - nbufs = req->size(); - for (size_t i = 0; i < nbufs; i++) { - delete[] bufs[i].base; - } - delete req; -} - - -static void close_cb_wp(ns_tcp* handle, std::weak_ptr data) { - auto sp = data.lock(); - ASSERT(sp); - ASSERT_EQ(42, *sp); - ASSERT_EQ(handle, &tcp_handle); - close_cb_called++; -} - - -static void connection_cb_wp(ns_tcp*, int status, std::weak_ptr data) { - auto sp = data.lock(); - ASSERT(sp); - ASSERT_EQ(42, *sp); - ASSERT_EQ(0, status); -} - - -static void start_server_wp(uv_loop_t* loop, - ns_tcp* handle, - std::weak_ptr data) { - struct sockaddr_in addr; - ASSERT_EQ(0, uv_ip4_addr("127.0.0.1", kTestPort, &addr)); - ASSERT_EQ(0, handle->init(loop)); - ASSERT_EQ(0, - handle->bind(reinterpret_cast(&addr), 0)); - ASSERT_EQ(0, handle->listen(128, connection_cb_wp, data)); - handle->unref(); -} - - -/* Check that pending write requests have their callbacks - * invoked when the handle is closed. - */ -TEST_CASE("tcp_close_wp", "[tcp]") { - std::shared_ptr sp = std::make_shared(42); - std::weak_ptr data = sp; - ns_connect connect_req; - struct sockaddr_in addr; - ns_tcp tcp_server; - - write_cb_called = 0; - close_cb_called = 0; - - ASSERT_EQ(0, uv_ip4_addr("127.0.0.1", kTestPort, &addr)); - - /* We can't use the echo server, it doesn't handle ECONNRESET. */ - start_server_wp(uv_default_loop(), &tcp_server, data); - - ASSERT_EQ(0, tcp_handle.init(uv_default_loop())); - ASSERT_EQ(0, tcp_handle.connect( - &connect_req, - reinterpret_cast(&addr), - connect_cb_wp, - data)); - ASSERT_EQ(0, write_cb_called); - ASSERT_EQ(0, close_cb_called); - - ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - - INFO("" << write_cb_called << " of " << num_write_reqs << "write reqs seen"); - - ASSERT_EQ(write_cb_called, num_write_reqs); - ASSERT_EQ(1, close_cb_called); - - make_valgrind_happy(); -} diff --git a/test/test-thread-wp.cc b/test/test-thread-wp.cc new file mode 100644 index 0000000..1bc59d6 --- /dev/null +++ b/test/test-thread-wp.cc @@ -0,0 +1,131 @@ +#include "../include/nsuv-inl.h" +#include "./helpers.h" + +#include +#include +#include /* memset */ + +#ifdef __POSIX__ +#include +#endif + +using nsuv::ns_thread; + +static int thread_called; +static uv_key_t tls_key; + + +static void thread_entry(ns_thread* thread, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + CHECK(!thread->equal(uv_thread_self())); + CHECK(*sp == 42); + thread_called++; +} + + +TEST_CASE("thread_create_wp", "[thread]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + ns_thread thread; + ASSERT_EQ(0, thread.create(thread_entry, data)); + ASSERT_EQ(0, thread.join()); + ASSERT_EQ(1, thread_called); + ASSERT(thread.equal(uv_thread_self())); +} + + +static void tls_thread(ns_thread* arg, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + CHECK(nullptr == uv_key_get(&tls_key)); + uv_key_set(&tls_key, arg); + CHECK(arg == uv_key_get(&tls_key)); + uv_key_set(&tls_key, nullptr); + CHECK(nullptr == uv_key_get(&tls_key)); +} + + +TEST_CASE("thread_local_storage_wp", "[thread]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + char name[] = "main"; + ns_thread threads[2]; + ASSERT_EQ(0, uv_key_create(&tls_key)); + ASSERT_NULL(uv_key_get(&tls_key)); + uv_key_set(&tls_key, name); + ASSERT_EQ(name, uv_key_get(&tls_key)); + ASSERT_EQ(0, threads[0].create(tls_thread, data)); + ASSERT_EQ(0, threads[1].create(tls_thread, data)); + ASSERT_EQ(0, threads[0].join()); + ASSERT_EQ(0, threads[1].join()); + uv_key_delete(&tls_key); +} + + +static void thread_check_stack(ns_thread*, + std::weak_ptr d) { + auto arg = d.lock(); + ASSERT(arg); +#if defined(__APPLE__) + size_t expected; + expected = arg == nullptr ? 0 : arg->stack_size; + /* 512 kB is the default stack size of threads other than the main thread + * on MacOS. */ + if (expected == 0) + expected = 512 * 1024; + CHECK(pthread_get_stacksize_np(pthread_self()) >= expected); +#elif defined(__linux__) && defined(__GLIBC__) + size_t expected; + struct rlimit lim; + size_t stack_size; + pthread_attr_t attr; + CHECK(0 == getrlimit(RLIMIT_STACK, &lim)); + if (lim.rlim_cur == RLIM_INFINITY) + lim.rlim_cur = 2 << 20; /* glibc default. */ + CHECK(0 == pthread_getattr_np(pthread_self(), &attr)); + CHECK(0 == pthread_attr_getstacksize(&attr, &stack_size)); + expected = arg == nullptr ? 0 : arg->stack_size; + if (expected == 0) + expected = (size_t)lim.rlim_cur; + CHECK(stack_size >= expected); + CHECK(0 == pthread_attr_destroy(&attr)); +#endif +} + + +TEST_CASE("thread_stack_size_explicit_wp", "[thread]") { + std::shared_ptr options = + std::make_shared(); + std::weak_ptr wp = options; + ns_thread thread; + + options->flags = UV_THREAD_HAS_STACK_SIZE; + options->stack_size = 1024 * 1024; + ASSERT_EQ(0, thread.create_ex(options.get(), thread_check_stack, wp)); + ASSERT_EQ(0, thread.join()); + + options->stack_size = 8 * 1024 * 1024; // larger than most default os sizes + ASSERT_EQ(0, thread.create_ex(options.get(), thread_check_stack, wp)); + ASSERT_EQ(0, thread.join()); + + options->stack_size = 0; + ASSERT_EQ(0, thread.create_ex(options.get(), thread_check_stack, wp)); + ASSERT_EQ(0, thread.join()); + +#ifdef PTHREAD_STACK_MIN + options->stack_size = PTHREAD_STACK_MIN - 42; // unaligned size + ASSERT_EQ(0, thread.create_ex(options.get(), thread_check_stack, wp)); + ASSERT_EQ(0, thread.join()); + + options->stack_size = PTHREAD_STACK_MIN / 2 - 42; // unaligned size + ASSERT_EQ(0, thread.create_ex(options.get(), thread_check_stack, wp)); + ASSERT_EQ(0, thread.join()); +#endif + + // unaligned size, should be larger than PTHREAD_STACK_MIN + options->stack_size = 1234567; + ASSERT_EQ(0, thread.create_ex(options.get(), thread_check_stack, wp)); + ASSERT_EQ(0, thread.join()); +} diff --git a/test/test-thread.cc b/test/test-thread.cc index cedd336..41d119d 100644 --- a/test/test-thread.cc +++ b/test/test-thread.cc @@ -139,7 +139,6 @@ static void thread_entry(ns_thread* thread, size_t* arg) { TEST_CASE("thread_create", "[thread]") { ns_thread thread; size_t arg[] = { 42 }; - thread_called = 0; ASSERT_EQ(0, thread.create(thread_entry, arg)); ASSERT_EQ(0, thread.join()); ASSERT_EQ(1, thread_called); @@ -147,27 +146,6 @@ TEST_CASE("thread_create", "[thread]") { } -static void thread_entry_wp(ns_thread* thread, std::weak_ptr arg) { - CHECK(!thread->equal(uv_thread_self())); - auto shared_arg = arg.lock(); - CHECK(shared_arg); - CHECK(*shared_arg == 42); - thread_called++; -} - - -TEST_CASE("thread_create_wp", "[thread]") { - ns_thread thread; - std::shared_ptr arg = std::make_shared(42); - std::weak_ptr weak_arg = arg; - thread_called = 0; - ASSERT_EQ(0, thread.create(thread_entry_wp, weak_arg)); - ASSERT_EQ(0, thread.join()); - ASSERT_EQ(1, thread_called); - ASSERT(thread.equal(uv_thread_self())); -} - - static void tls_thread(ns_thread* arg) { CHECK(nullptr == uv_key_get(&tls_key)); uv_key_set(&tls_key, arg); @@ -222,39 +200,6 @@ static void thread_check_stack(ns_thread*, uv_thread_options_t* arg) { } -static void thread_check_stack_wp(ns_thread*, - std::weak_ptr arg) { -#if defined(__APPLE__) - size_t expected; - std::shared_ptr shared_arg = arg.lock(); - expected = shared_arg == nullptr ? 0 : - (reinterpret_cast(arg.get()))->stack_size; - /* 512 kB is the default stack size of threads other than the main thread - * on MacOS. */ - if (expected == 0) - expected = 512 * 1024; - CHECK(pthread_get_stacksize_np(pthread_self()) >= expected); -#elif defined(__linux__) && defined(__GLIBC__) - size_t expected; - struct rlimit lim; - size_t stack_size; - pthread_attr_t attr; - CHECK(0 == getrlimit(RLIMIT_STACK, &lim)); - if (lim.rlim_cur == RLIM_INFINITY) - lim.rlim_cur = 2 << 20; /* glibc default. */ - CHECK(0 == pthread_getattr_np(pthread_self(), &attr)); - CHECK(0 == pthread_attr_getstacksize(&attr, &stack_size)); - std::shared_ptr shared_arg = arg.lock(); - expected = shared_arg == nullptr ? 0 : - (reinterpret_cast(shared_arg.get()))->stack_size; - if (expected == 0) - expected = (size_t)lim.rlim_cur; - CHECK(stack_size >= expected); - CHECK(0 == pthread_attr_destroy(&attr)); -#endif -} - - TEST_CASE("thread_stack_size", "[thread]") { ns_thread thread; uv_thread_options_t* arg = nullptr; @@ -263,15 +208,6 @@ TEST_CASE("thread_stack_size", "[thread]") { } -TEST_CASE("thread_stack_size_sp", "[thread]") { - ns_thread thread; - std::shared_ptr arg = { nullptr }; - std::weak_ptr weak_arg = arg; - ASSERT_EQ(0, thread.create(thread_check_stack_wp, weak_arg)); - ASSERT_EQ(0, thread.join()); -} - - TEST_CASE("thread_stack_size_explicit", "[thread]") { ns_thread thread; uv_thread_options_t options; diff --git a/test/test-timer-wp.cc b/test/test-timer-wp.cc new file mode 100644 index 0000000..9fe6cc6 --- /dev/null +++ b/test/test-timer-wp.cc @@ -0,0 +1,303 @@ +#include "../include/nsuv-inl.h" +#include "./catch.hpp" +#include "./helpers.h" + +using nsuv::ns_timer; + +static size_t once_cb_called = 0; +static size_t once_close_cb_called = 0; +static size_t repeat_cb_called = 0; +static size_t repeat_close_cb_called = 0; +static size_t order_cb_called = 0; +static uint64_t start_time; +static uint64_t timer_early_check_expected_time; +static ns_timer tiny_timer; +static ns_timer huge_timer1; +static ns_timer huge_timer2; + + +static void once_close_cb(ns_timer* handle, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + + INFO("ONCE_CLOSE_CB"); + + ASSERT_NOT_NULL(handle); + ASSERT_EQ(0, handle->is_active()); + + once_close_cb_called++; +} + + +static void once_cb(ns_timer* handle, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + + INFO("ONCE_CB " << once_cb_called); + + ASSERT_NOT_NULL(handle); + ASSERT_EQ(0, handle->is_active()); + + once_cb_called++; + + handle->close(once_close_cb, d); + + /* Just call this randomly for the code coverage. */ + uv_update_time(uv_default_loop()); +} + + +static void repeat_close_cb(ns_timer* handle, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + + INFO("REPEAT_CLOSE_CB"); + ASSERT_NOT_NULL(handle); + repeat_close_cb_called++; +} + + +static void repeat_cb(ns_timer* handle, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + + INFO("REPEAT_CB"); + ASSERT_NOT_NULL(handle); + ASSERT_EQ(1, handle->is_active()); + + repeat_cb_called++; + + if (repeat_cb_called == 5) { + handle->close(repeat_close_cb, d); + } +} + + +static void never_cb(ns_timer*, std::weak_ptr) { + FAIL("never_cb should never be called"); +} + + +TEST_CASE("timer_wp", "[timer]") { + constexpr size_t kTimersSize = 10; + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + ns_timer once_timers[kTimersSize]; + ns_timer* once; + ns_timer repeat; + ns_timer never; + + once_cb_called = 0; + start_time = uv_now(uv_default_loop()); + ASSERT_LT(0, start_time); + + /* Let 10 timers time out in 500 ms total. */ + for (size_t i = 0; i < kTimersSize; i++) { + once = &once_timers[i]; + ASSERT_EQ(0, once->init(uv_default_loop())); + ASSERT_EQ(0, once->start(once_cb, i * 50, 0, data)); + } + + /* The 11th timer is a repeating timer that runs 4 times */ + ASSERT_EQ(0, repeat.init(uv_default_loop())); + ASSERT_EQ(0, repeat.start(repeat_cb, 100, 100, data)); + + /* The 12th timer should not do anything. */ + ASSERT_EQ(0, never.init(uv_default_loop())); + ASSERT_EQ(0, never.start(never_cb, 100, 100, data)); + ASSERT_EQ(0, never.stop()); + never.unref(); + + uv_run(uv_default_loop(), UV_RUN_DEFAULT); + + ASSERT_EQ(once_cb_called, 10); + ASSERT_EQ(once_close_cb_called, 10); + INFO("repeat_cb_called" << repeat_cb_called); + ASSERT_EQ(repeat_cb_called, 5); + ASSERT_EQ(repeat_close_cb_called, 1); + + ASSERT_LE(500, uv_now(uv_default_loop()) - start_time); + + make_valgrind_happy(); +} + + +TEST_CASE("timer_start_twice_wp", "[timer]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + ns_timer once; + once_cb_called = 0; + ASSERT_EQ(0, once.init(uv_default_loop())); + ASSERT_EQ(0, once.start(never_cb, 86400 * 1000, 0, data)); + ASSERT_EQ(0, once.start(once_cb, 10, 0, data)); + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + ASSERT_EQ(1, once_cb_called); + + make_valgrind_happy(); +} + + +static void order_cb_a(ns_timer*, std::weak_ptr d) { + auto check = d.lock(); + ASSERT(check); + ASSERT_EQ(order_cb_called++, *check); +} + + +static void order_cb_b(ns_timer*, std::weak_ptr d) { + auto check = d.lock(); + ASSERT(check); + ASSERT_EQ(order_cb_called++, *check); +} + + +TEST_CASE("timer_order_wp", "[timer]") { + std::shared_ptr sp1 = std::make_shared(0); + std::shared_ptr sp2 = std::make_shared(1); + std::weak_ptr first = sp1; + std::weak_ptr second = sp2; + ns_timer handle_a; + ns_timer handle_b; + + ASSERT_EQ(0, handle_a.init(uv_default_loop())); + ASSERT_EQ(0, handle_b.init(uv_default_loop())); + + /* Test for starting handle_a then handle_b */ + ASSERT_EQ(0, handle_a.start(order_cb_a, 0, 0, first)); + ASSERT_EQ(0, handle_b.start(order_cb_b, 0, 0, second)); + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + ASSERT_EQ(order_cb_called, 2); + + ASSERT_EQ(0, handle_a.stop()); + ASSERT_EQ(0, handle_b.stop()); + + /* Test for starting handle_b then handle_a */ + order_cb_called = 0; + ASSERT_EQ(0, handle_b.start(order_cb_b, 0, 0, first)); + + ASSERT_EQ(0, handle_a.start(order_cb_a, 0, 0, second)); + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + ASSERT_EQ(order_cb_called, 2); + + make_valgrind_happy(); +} + + +static void tiny_timer_cb(ns_timer* handle, std::weak_ptr d) { + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + ASSERT_EQ(handle, &tiny_timer); + tiny_timer.close(); + huge_timer1.close(); + huge_timer2.close(); +} + + +TEST_CASE("timer_huge_timeout_wp", "[timer]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + ASSERT_EQ(0, tiny_timer.init(uv_default_loop())); + ASSERT_EQ(0, huge_timer1.init(uv_default_loop())); + ASSERT_EQ(0, huge_timer2.init(uv_default_loop())); + ASSERT_EQ(0, tiny_timer.start(tiny_timer_cb, 1, 0, data)); + ASSERT_EQ(0, huge_timer1.start(tiny_timer_cb, 0xffffffffffffLL, 0, data)); + ASSERT_EQ(0, huge_timer2.start(tiny_timer_cb, (uint64_t) -1, 0, data)); + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + make_valgrind_happy(); +} + + +static void huge_repeat_cb(ns_timer* handle, std::weak_ptr d) { + static size_t ncalls = 0; + auto sp = d.lock(); + + ASSERT(sp); + ASSERT_EQ(42, *sp); + + if (ncalls == 0) + ASSERT_EQ(handle, &huge_timer1); + else + ASSERT_EQ(handle, &tiny_timer); + + if (++ncalls == 10) { + tiny_timer.close(); + huge_timer1.close(); + } +} + + +TEST_CASE("timer_huge_repeat_wp", "[timer]") { + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + ASSERT_EQ(0, tiny_timer.init(uv_default_loop())); + ASSERT_EQ(0, huge_timer1.init(uv_default_loop())); + ASSERT_EQ(0, tiny_timer.start(huge_repeat_cb, 2, 2, data)); + ASSERT_EQ(0, huge_timer1.start(huge_repeat_cb, 1, (uint64_t) -1, data)); + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + make_valgrind_happy(); +} + + +static void timer_run_once_timer_cb(ns_timer*, std::weak_ptr d) { + auto cb_called = d.lock(); + ASSERT(cb_called); + *cb_called += 1; +} + + +TEST_CASE("timer_run_once_wp", "[timer]") { + std::shared_ptr sp = std::make_shared(0); + std::weak_ptr cb_called = sp; + ns_timer timer_handle; + + ASSERT_EQ(0, timer_handle.init(uv_default_loop())); + ASSERT_EQ(0, timer_handle.start(timer_run_once_timer_cb, 0, 0, cb_called)); + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_ONCE)); + ASSERT_EQ(1, *sp); + + ASSERT_EQ(0, timer_handle.start(timer_run_once_timer_cb, 1, 0, cb_called)); + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_ONCE)); + ASSERT_EQ(2, *sp); + + timer_handle.close(); + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_ONCE)); + + make_valgrind_happy(); +} + + +static void timer_early_check_cb(ns_timer*, std::weak_ptr d) { + uint64_t hrtime = uv_hrtime() / 1000000; + auto sp = d.lock(); + ASSERT(sp); + ASSERT_EQ(42, *sp); + ASSERT_GE(hrtime, timer_early_check_expected_time); +} + + +TEST_CASE("timer_early_check_wp", "[timer]") { + const uint64_t timeout_ms = 10; + std::shared_ptr sp = std::make_shared(42); + std::weak_ptr data = sp; + ns_timer timer_handle; + + timer_early_check_expected_time = uv_now(uv_default_loop()) + timeout_ms; + + ASSERT_EQ(0, timer_handle.init(uv_default_loop())); + ASSERT_EQ(0, timer_handle.start(timer_early_check_cb, timeout_ms, 0, data)); + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + timer_handle.close(); + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + make_valgrind_happy(); +} diff --git a/test/test-timer.cc b/test/test-timer.cc index 6b31f4f..8019b71 100644 --- a/test/test-timer.cc +++ b/test/test-timer.cc @@ -147,8 +147,6 @@ TEST_CASE("timer_order", "[timer]") { ns_timer handle_a; ns_timer handle_b; - order_cb_called = 0; - ASSERT_EQ(0, handle_a.init(uv_default_loop())); ASSERT_EQ(0, handle_b.init(uv_default_loop())); @@ -175,56 +173,6 @@ TEST_CASE("timer_order", "[timer]") { } -static void order_cb_a_wp(ns_timer*, std::weak_ptr check) { - auto val = check.lock(); - ASSERT(val); - ASSERT_EQ(order_cb_called++, *val); -} - - -static void order_cb_b_wp(ns_timer*, std::weak_ptr check) { - auto val = check.lock(); - ASSERT(val); - ASSERT_EQ(order_cb_called++, *val); -} - - -TEST_CASE("timer_order_wp", "[timer]") { - std::shared_ptr first = std::make_shared(0); - std::shared_ptr second = std::make_shared(1); - std::weak_ptr first_wp = first; - std::weak_ptr second_wp = second; - ns_timer handle_a; - ns_timer handle_b; - - order_cb_called = 0; - - ASSERT_EQ(0, handle_a.init(uv_default_loop())); - ASSERT_EQ(0, handle_b.init(uv_default_loop())); - - /* Test for starting handle_a then handle_b */ - ASSERT_EQ(0, handle_a.start(order_cb_a_wp, 0, 0, first_wp)); - ASSERT_EQ(0, handle_b.start(order_cb_b_wp, 0, 0, second_wp)); - ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - - ASSERT_EQ(order_cb_called, 2); - - ASSERT_EQ(0, handle_a.stop()); - ASSERT_EQ(0, handle_b.stop()); - - /* Test for starting handle_b then handle_a */ - order_cb_called = 0; - ASSERT_EQ(0, handle_b.start(order_cb_b_wp, 0, 0, first_wp)); - - ASSERT_EQ(0, handle_a.start(order_cb_a_wp, 0, 0, second_wp)); - ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - - ASSERT_EQ(order_cb_called, 2); - - make_valgrind_happy(); -} - - static void tiny_timer_cb(ns_timer* handle) { ASSERT_EQ(handle, &tiny_timer); tiny_timer.close();